text
stringlengths 4
1.02M
| meta
dict |
---|---|
import psycopg2
from psycopg2 import extras
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash
import config
from contextlib import closing
from database import connect_db
# create our little application :)
app = Flask(__name__)
app.config.from_object(config)
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def init_db():
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as file:
db.cursor().execute(file.read())
db.commit()
def add_paprika():
with closing(connect_db()) as db:
db.execute('UPDATE entries set title = ?', ['PAPRIKA'])
db.commit()
def query_db(query, args=(), one=False):
dict_cur = g.db.cursor(cursor_factory=psycopg2.extras.DictCursor)
dict_cur.execute(query, args)
rv = dict_cur.fetchall()
dict_cur.close()
return (rv[0] if rv else None) if one else rv
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/')
def show_entries():
entries = query_db('select title, text from entries order by id desc')
return render_template('show_entries.html', entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
dict_cur = g.db.cursor(cursor_factory=psycopg2.extras.DictCursor)
dict_cur.execute('insert into entries (title, text) values (%s,%s)', [request.form['title'], request.form['text']])
dict_cur.execute('COMMIT')
flash('New entry was successfully posted')
return redirect(url_for('show_entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
if __name__ == '__main__':
app.run()
| {
"content_hash": "d4fe1825fa1091652d7ee12267157954",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 119,
"avg_line_length": 26.72826086956522,
"alnum_prop": 0.6380642537616917,
"repo_name": "casassg/schedule_generator",
"id": "0d309dd282a01ce8bb5599857373df2b9ae1f1d1",
"size": "2477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "918"
},
{
"name": "HTML",
"bytes": "1535"
},
{
"name": "Python",
"bytes": "10125"
}
],
"symlink_target": ""
} |
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from setupDB import Base, Project, Category, Entry
engine = create_engine('sqlite:///projects.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
project1 = Project(name = "TEST PROJECT")
session.add(project1)
session.commit()
category1 = Category(name = "TEST CATEGORY", project_id = 1)
session.add(category1)
session.commit()
entry1 = Entry(description = "THIS IS A TEST DESCRIPTION.", category_id = 1, project_id = 1)
session.add(entry1)
session.commit() | {
"content_hash": "c0eddfd7ee2f76995e0c35f513b2d457",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 92,
"avg_line_length": 33.23529411764706,
"alnum_prop": 0.7663716814159292,
"repo_name": "pash080/projects_web",
"id": "612554f2aeb2b42d234678e47620d6bae2f4af48",
"size": "1130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "addTestProjects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "62"
},
{
"name": "HTML",
"bytes": "5995"
},
{
"name": "Python",
"bytes": "8422"
}
],
"symlink_target": ""
} |
def build_from_name(city_name):
return {
'match':{
'name':{
'query':city_name,
'operator':'and',
'fuzziness':'AUTO'
}
}
}
def build_from_guide(guide, regions=None, weathers=None, months=None, ignore_cids=None, ignore_name=None):
must = [{
'match':{'guide':{
'query':guide,
'cutoff_frequency':0.005
}}
}]
should = []
must_not = []
if regions:
if isinstance(regions, (unicode,str)):
regions = [regions]
must.append({
'match':{'regions':{
'query':regions,
'operator':'or',
'fuzziness':'AUTO'
}}
})
if weathers:
if isinstance(weathers, dict):
weathers = [weathers]
should += list(yield_weather_conditions(*weathers))
if months:
should.append({'terms':{'months_ideal':months}})
if ignore_cids:
must_not.append({'terms':{'cid':ignore_cids}})
if ignore_name:
must_not.append(build_from_name(ignore_name))
query = {'bool':{'must':must}}
if should:
query['bool']['should'] = should
if must_not:
query['bool']['must_not'] = must_not
return query
def yield_weather_conditions(*weathers):
temps = [w['temperature']['celsius'] for w in weathers if 'celsius' in w.get('temperature',{})]
if temps:
yield {'range':{'weather.temperature.celsius':{
'gte':min(temps) - 3,
'lte':max(temps) + 3
}}}
humidities = [w['humidity'] for w in weathers if 'humidity' in w]
if humidities:
yield {'range':{'weather.humidity':{
'gte':min(humidities) - 0.1,
'lte':max(humidities) + 0.1
}}}
patterns = [w['pattern'] for w in weathers if 'pattern' in w]
if patterns:
yield {'terms':{'weather.pattern':patterns}}
| {
"content_hash": "f1a64a5e654e0ae3aa3dfe5c6e81ea36",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 106,
"avg_line_length": 31.375,
"alnum_prop": 0.499003984063745,
"repo_name": "whosken/destinate",
"id": "d6c88877f6348395133e891479fc3793bc89156b",
"size": "2008",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "destinate/query_builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17644"
}
],
"symlink_target": ""
} |
from copy import deepcopy
from enum import Enum, unique
import json
from kii import exceptions as exc
class Clause:
def __str__(self):
return '<Clause Object:{0}> {1}'.format(
id(self),
json.dumps(self.query(), ensure_ascii=False))
@classmethod
def all(cls):
return AllClause()
@classmethod
def and_(cls, *clauses):
return AndClause(*clauses)
@classmethod
def eq(cls, field, value):
return EqualClause(field, value)
@classmethod
def in_(cls, field, values):
return InClause(field, values)
@classmethod
def not_(cls, clause):
return NotClause(clause)
@classmethod
def or_(cls, *clauses):
return OrClause(*clauses)
@classmethod
def prefix(cls, field, prefix):
return PrefixClause(field, prefix)
def clone(self):
return deepcopy(self)
class AllClause(Clause):
def query(self):
return {'type': 'all'}
class AndClause(Clause):
def __init__(self, *clauses):
for c in clauses:
if not isinstance(c, Clause):
raise exc.KiiInvalidClauseError
self.children = list(clauses)
def __len__(self):
return len(self.children)
def add(self, clause):
if not isinstance(clause, Clause):
raise exc.KiiInvalidClauseError
self.children.append(clause)
return self
def query(self):
return {
'type': 'and',
'clauses': [c.query() for c in self.children],
}
class EqualClause(Clause):
def __init__(self, field, value):
self.field = field
self.value = value
def query(self):
return {
'type': 'eq',
'field': self.field,
'value': self.value,
}
class GeoBoxClause(Clause):
def __init__(self, field, ne_lat, ne_lon, sw_lat, sw_lon):
self.field = field
self.ne_lat = ne_lat
self.ne_lon = ne_lon
self.sw_lat = sw_lat
self.sw_lon = sw_lon
def query(self):
return {
'type': 'geobox',
'field': self.field,
'box': {
'ne': {
'_type': 'point',
'lat': self.ne_lat,
'lon': self.ne_lon,
},
'sw': {
'_type': 'point',
'lat': self.sw_lat,
'lon': self.sw_lon,
}
}
}
class GeoDistanceClause(Clause):
def __init__(self, field, center_lat, center_lon, radius, put_distance_into=None):
self.field = field
self.center_lat = center_lat
self.center_lon = center_lon
self.radius = radius
self.put_distance_into = put_distance_into
def query(self):
params = {
'type': 'geodistance',
'field': self.field,
'center': {
'_type': 'point',
'lat': self.center_lat,
'lon': self.center_lon,
},
'radius': self.radius,
}
if self.put_distance_into is not None:
params['putDistanceInto'] = self.put_distance_into
return params
class HasFieldClause(Clause):
@unique
class Types(Enum):
string = 'STRING'
integer = 'INTEGER'
decimal = 'DECIMAL'
boolean = 'BOOLEAN'
def __init__(self, field, field_type):
self.field = field
if not isinstance(field_type, HasFieldClause.Types):
field_type = HasFieldClause.Types(field_type)
self.field_type = field_type
def query(self):
return {
'type': 'hasField',
'field': self.field,
'fieldType': self.field_type.value,
}
class InClause(Clause):
def __init__(self, field, values):
self.field = field
if not isinstance(values, (tuple, list)):
values = tuple(values)
self.values = values
def query(self):
return {
'type': 'in',
'field': self.field,
'values': self.values,
}
class NotClause(Clause):
def __init__(self, clause):
if not isinstance(clause, Clause):
raise exc.KiiInvalidClauseError
self.clause = clause
def query(self):
return {
'type': 'not',
'clause': self.clause.query(),
}
class OrClause(AndClause):
def query(self):
return {
'type': 'or',
'clauses': [c.query() for c in self.children],
}
class PrefixClause(Clause):
def __init__(self, field, prefix):
self.field = field
self.prefix = prefix
def query(self):
return {
'type': 'prefix',
'field': self.field,
'prefix': self.prefix,
}
class RangeClause(Clause):
def __init__(self, field):
self.field = field
self.lower_limit = None
self.lower_included = True
self.upper_limit = None
self.upper_included = True
def query(self):
query = {
'type': 'range',
'field': self.field,
}
if self.lower_limit is not None:
query['lowerLimit'] = self.lower_limit
if not self.lower_included:
query['lowerIncluded'] = self.lower_included
if self.upper_limit is not None:
query['upperLimit'] = self.upper_limit
if not self.upper_included:
query['upperIncluded'] = self.upper_included
return query
def ge(self, lower_limit):
self.lower_limit = lower_limit
self.lower_included = True
return self
def gt(self, lower_limit):
self.lower_limit = lower_limit
self.lower_included = False
return self
def le(self, upper_limit):
self.upper_limit = upper_limit
self.upper_included = True
return self
def lt(self, upper_limit):
self.upper_limit = upper_limit
self.upper_included = False
return self
| {
"content_hash": "29f23e6dd98427e571454b97c8368888",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 86,
"avg_line_length": 24.09765625,
"alnum_prop": 0.5218025611930621,
"repo_name": "ta2xeo/python3-kii",
"id": "002d41abd27bf89f5b9a5cfe05429edddcc5c593",
"size": "6169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kii/data/clauses.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "185387"
}
],
"symlink_target": ""
} |
"""Test Python APIs for working with formatters"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class SBFormattersAPITestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
self.line = line_number('main.cpp', '// Set break point at this line.')
@add_test_categories(['pyapi'])
def test_formatters_api(self):
"""Test Python APIs for working with formatters"""
self.build()
self.setTearDownCleanup()
"""Test Python APIs for working with formatters"""
self.runCmd("file " + self.getBuildArtifact("a.out"),
CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=1,
loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type filter clear', check=False)
self.runCmd('type synthetic clear', check=False)
self.runCmd('type category delete foobar', check=False)
self.runCmd('type category delete JASSynth', check=False)
self.runCmd('type category delete newbar', check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
format = lldb.SBTypeFormat(lldb.eFormatHex)
category = self.dbg.GetDefaultCategory()
category.AddTypeFormat(lldb.SBTypeNameSpecifier("int"), format)
self.expect("frame variable foo.A",
substrs=['0x00000001'])
self.expect("frame variable foo.E", matching=False,
substrs=['b8cca70a'])
category.AddTypeFormat(lldb.SBTypeNameSpecifier("long"), format)
self.expect("frame variable foo.A",
substrs=['0x00000001'])
self.expect("frame variable foo.E",
substrs=['b8cca70a'])
format.SetFormat(lldb.eFormatOctal)
category.AddTypeFormat(lldb.SBTypeNameSpecifier("int"), format)
self.expect("frame variable foo.A",
substrs=[' 01'])
self.expect("frame variable foo.E",
substrs=['b8cca70a'])
category.DeleteTypeFormat(lldb.SBTypeNameSpecifier("int"))
category.DeleteTypeFormat(lldb.SBTypeNameSpecifier("long"))
self.expect("frame variable foo.A", matching=False,
substrs=[' 01'])
self.expect("frame variable foo.E", matching=False,
substrs=['b8cca70a'])
summary = lldb.SBTypeSummary.CreateWithSummaryString(
"the hello world you'll never see")
summary.SetSummaryString('hello world')
new_category = self.dbg.GetCategory("foobar")
self.assertFalse(
new_category.IsValid(),
"getting a non-existing category worked")
new_category = self.dbg.CreateCategory("foobar")
new_category.SetEnabled(True)
new_category.AddTypeSummary(
lldb.SBTypeNameSpecifier(
"^.*t$",
True, # is_regexp
), summary)
self.expect("frame variable foo.A",
substrs=['hello world'])
self.expect("frame variable foo.E", matching=False,
substrs=['hello world'])
self.expect("frame variable foo.B",
substrs=['hello world'])
self.expect("frame variable foo.F",
substrs=['hello world'])
new_category.SetEnabled(False)
self.expect("frame variable foo.A", matching=False,
substrs=['hello world'])
self.expect("frame variable foo.E", matching=False,
substrs=['hello world'])
self.expect("frame variable foo.B", matching=False,
substrs=['hello world'])
self.expect("frame variable foo.F", matching=False,
substrs=['hello world'])
self.dbg.DeleteCategory(new_category.GetName())
self.expect("frame variable foo.A", matching=False,
substrs=['hello world'])
self.expect("frame variable foo.E", matching=False,
substrs=['hello world'])
self.expect("frame variable foo.B", matching=False,
substrs=['hello world'])
self.expect("frame variable foo.F", matching=False,
substrs=['hello world'])
filter = lldb.SBTypeFilter(0)
filter.AppendExpressionPath("A")
filter.AppendExpressionPath("D")
self.assertTrue(
filter.GetNumberOfExpressionPaths() == 2,
"filter with two items does not have two items")
category.AddTypeFilter(lldb.SBTypeNameSpecifier("JustAStruct"), filter)
self.expect("frame variable foo",
substrs=['A = 1', 'D = 6.28'])
self.expect("frame variable foo", matching=False,
substrs=['B = ', 'C = ', 'E = ', 'F = '])
category.DeleteTypeFilter(
lldb.SBTypeNameSpecifier(
"JustAStruct", True))
self.expect("frame variable foo",
substrs=['A = 1', 'D = 6.28'])
self.expect("frame variable foo", matching=False,
substrs=['B = ', 'C = ', 'E = ', 'F = '])
category.DeleteTypeFilter(
lldb.SBTypeNameSpecifier(
"JustAStruct", False))
self.expect("frame variable foo",
substrs=['A = 1', 'D = 6.28'])
self.expect("frame variable foo", matching=True,
substrs=['B = ', 'C = ', 'E = ', 'F = '])
self.runCmd("command script import --allow-reload ./synth.py")
self.expect("frame variable foo", matching=False,
substrs=['X = 1'])
self.dbg.GetCategory("JASSynth").SetEnabled(True)
self.expect("frame variable foo", matching=True,
substrs=['X = 1'])
self.dbg.GetCategory("CCCSynth").SetEnabled(True)
self.expect(
"frame variable ccc",
matching=True,
substrs=[
'CCC object with leading value (int) a = 111',
'a = 111',
'b = 222',
'c = 333'])
foo_var = self.dbg.GetSelectedTarget().GetProcess(
).GetSelectedThread().GetSelectedFrame().FindVariable('foo')
self.assertTrue(foo_var.IsValid(), 'could not find foo')
self.assertTrue(
foo_var.GetDeclaration().IsValid(),
'foo declaration is invalid')
self.assertTrue(
foo_var.GetNumChildren() == 2,
'synthetic value has wrong number of child items (synth)')
self.assertTrue(
foo_var.GetChildMemberWithName('X').GetValueAsUnsigned() == 1,
'foo_synth.X has wrong value (synth)')
self.assertFalse(
foo_var.GetChildMemberWithName('B').IsValid(),
'foo_synth.B is valid but should not (synth)')
self.dbg.GetCategory("JASSynth").SetEnabled(False)
foo_var = self.dbg.GetSelectedTarget().GetProcess(
).GetSelectedThread().GetSelectedFrame().FindVariable('foo')
self.assertTrue(foo_var.IsValid(), 'could not find foo')
self.assertFalse(
foo_var.GetNumChildren() == 2,
'still seeing synthetic value')
filter = lldb.SBTypeFilter(0)
filter.AppendExpressionPath("A")
filter.AppendExpressionPath("D")
category.AddTypeFilter(lldb.SBTypeNameSpecifier("JustAStruct"), filter)
self.expect("frame variable foo",
substrs=['A = 1', 'D = 6.28'])
foo_var = self.dbg.GetSelectedTarget().GetProcess(
).GetSelectedThread().GetSelectedFrame().FindVariable('foo')
self.assertTrue(foo_var.IsValid(), 'could not find foo')
self.assertTrue(
foo_var.GetNumChildren() == 2,
'synthetic value has wrong number of child items (filter)')
self.assertTrue(
foo_var.GetChildMemberWithName('X').GetValueAsUnsigned() == 0,
'foo_synth.X has wrong value (filter)')
self.assertTrue(
foo_var.GetChildMemberWithName('A').GetValueAsUnsigned() == 1,
'foo_synth.A has wrong value (filter)')
self.assertTrue(filter.ReplaceExpressionPathAtIndex(
0, "C"), "failed to replace an expression path in filter")
self.expect("frame variable foo",
substrs=['A = 1', 'D = 6.28'])
category.AddTypeFilter(lldb.SBTypeNameSpecifier("JustAStruct"), filter)
self.expect("frame variable foo",
substrs=["C = 'e'", 'D = 6.28'])
category.AddTypeFilter(lldb.SBTypeNameSpecifier("FooType"), filter)
filter.ReplaceExpressionPathAtIndex(1, "F")
self.expect("frame variable foo",
substrs=["C = 'e'", 'D = 6.28'])
category.AddTypeFilter(lldb.SBTypeNameSpecifier("JustAStruct"), filter)
self.expect("frame variable foo",
substrs=["C = 'e'", 'F = 0'])
self.expect("frame variable bar",
substrs=["C = 'e'", 'D = 6.28'])
foo_var = self.dbg.GetSelectedTarget().GetProcess(
).GetSelectedThread().GetSelectedFrame().FindVariable('foo')
self.assertTrue(foo_var.IsValid(), 'could not find foo')
self.assertTrue(
foo_var.GetChildMemberWithName('C').GetValueAsUnsigned() == ord('e'),
'foo_synth.C has wrong value (filter)')
chosen = self.dbg.GetFilterForType(
lldb.SBTypeNameSpecifier("JustAStruct"))
self.assertTrue(
chosen.count == 2,
"wrong filter found for JustAStruct")
self.assertTrue(
chosen.GetExpressionPathAtIndex(0) == 'C',
"wrong item at index 0 for JustAStruct")
self.assertTrue(
chosen.GetExpressionPathAtIndex(1) == 'F',
"wrong item at index 1 for JustAStruct")
self.assertFalse(
category.DeleteTypeFilter(
lldb.SBTypeNameSpecifier("NoSuchType")),
"deleting a non-existing filter worked")
self.assertFalse(
category.DeleteTypeSummary(
lldb.SBTypeNameSpecifier("NoSuchType")),
"deleting a non-existing summary worked")
self.assertFalse(
category.DeleteTypeFormat(
lldb.SBTypeNameSpecifier("NoSuchType")),
"deleting a non-existing format worked")
self.assertFalse(
category.DeleteTypeSynthetic(
lldb.SBTypeNameSpecifier("NoSuchType")),
"deleting a non-existing synthetic worked")
self.assertFalse(
category.DeleteTypeFilter(
lldb.SBTypeNameSpecifier("")),
"deleting a filter for '' worked")
self.assertFalse(
category.DeleteTypeSummary(
lldb.SBTypeNameSpecifier("")),
"deleting a summary for '' worked")
self.assertFalse(
category.DeleteTypeFormat(
lldb.SBTypeNameSpecifier("")),
"deleting a format for '' worked")
self.assertFalse(
category.DeleteTypeSynthetic(
lldb.SBTypeNameSpecifier("")),
"deleting a synthetic for '' worked")
try:
self.assertFalse(
category.AddTypeSummary(
lldb.SBTypeNameSpecifier("NoneSuchType"),
None),
"adding a summary valued None worked")
except:
pass
else:
self.assertFalse(True, "adding a summary valued None worked")
try:
self.assertFalse(
category.AddTypeFilter(
lldb.SBTypeNameSpecifier("NoneSuchType"),
None),
"adding a filter valued None worked")
except:
pass
else:
self.assertFalse(True, "adding a filter valued None worked")
try:
self.assertFalse(
category.AddTypeSynthetic(
lldb.SBTypeNameSpecifier("NoneSuchType"),
None),
"adding a synthetic valued None worked")
except:
pass
else:
self.assertFalse(True, "adding a synthetic valued None worked")
try:
self.assertFalse(
category.AddTypeFormat(
lldb.SBTypeNameSpecifier("NoneSuchType"),
None),
"adding a format valued None worked")
except:
pass
else:
self.assertFalse(True, "adding a format valued None worked")
self.assertFalse(
category.AddTypeSummary(
lldb.SBTypeNameSpecifier("EmptySuchType"),
lldb.SBTypeSummary()),
"adding a summary without value worked")
self.assertFalse(
category.AddTypeFilter(
lldb.SBTypeNameSpecifier("EmptySuchType"),
lldb.SBTypeFilter()),
"adding a filter without value worked")
self.assertFalse(
category.AddTypeSynthetic(
lldb.SBTypeNameSpecifier("EmptySuchType"),
lldb.SBTypeSynthetic()),
"adding a synthetic without value worked")
self.assertFalse(
category.AddTypeFormat(
lldb.SBTypeNameSpecifier("EmptySuchType"),
lldb.SBTypeFormat()),
"adding a format without value worked")
self.assertFalse(
category.AddTypeSummary(
lldb.SBTypeNameSpecifier(""),
lldb.SBTypeSummary.CreateWithSummaryString("")),
"adding a summary for an invalid type worked")
self.assertFalse(
category.AddTypeFilter(
lldb.SBTypeNameSpecifier(""),
lldb.SBTypeFilter(0)),
"adding a filter for an invalid type worked")
self.assertFalse(
category.AddTypeSynthetic(
lldb.SBTypeNameSpecifier(""),
lldb.SBTypeSynthetic.CreateWithClassName("")),
"adding a synthetic for an invalid type worked")
self.assertFalse(
category.AddTypeFormat(
lldb.SBTypeNameSpecifier(""),
lldb.SBTypeFormat(
lldb.eFormatHex)),
"adding a format for an invalid type worked")
new_category = self.dbg.CreateCategory("newbar")
new_category.AddTypeSummary(
lldb.SBTypeNameSpecifier("JustAStruct"),
lldb.SBTypeSummary.CreateWithScriptCode("return 'hello scripted world';"))
self.expect("frame variable foo", matching=False,
substrs=['hello scripted world'])
new_category.SetEnabled(True)
self.expect("frame variable foo", matching=True,
substrs=['hello scripted world'])
self.expect("frame variable foo_ptr", matching=True,
substrs=['hello scripted world'])
new_category.AddTypeSummary(
lldb.SBTypeNameSpecifier("JustAStruct"),
lldb.SBTypeSummary.CreateWithScriptCode(
"return 'hello scripted world';",
lldb.eTypeOptionSkipPointers))
self.expect("frame variable foo", matching=True,
substrs=['hello scripted world'])
frame = self.dbg.GetSelectedTarget().GetProcess(
).GetSelectedThread().GetSelectedFrame()
foo_ptr = frame.FindVariable("foo_ptr")
summary = foo_ptr.GetTypeSummary()
self.assertFalse(
summary.IsValid(),
"summary found for foo* when none was planned")
self.expect("frame variable foo_ptr", matching=False,
substrs=['hello scripted world'])
new_category.AddTypeSummary(
lldb.SBTypeNameSpecifier("JustAStruct"),
lldb.SBTypeSummary.CreateWithSummaryString(
"hello static world",
lldb.eTypeOptionNone))
summary = foo_ptr.GetTypeSummary()
self.assertTrue(
summary.IsValid(),
"no summary found for foo* when one was in place")
self.assertTrue(
summary.GetData() == "hello static world",
"wrong summary found for foo*")
self.expect("frame variable e1", substrs=["I am an empty Empty1 {}"])
self.expect("frame variable e2", substrs=["I am an empty Empty2"])
self.expect(
"frame variable e2",
substrs=["I am an empty Empty2 {}"],
matching=False)
self.assertTrue(
self.dbg.GetCategory(
lldb.eLanguageTypeObjC) is not None,
"ObjC category is None")
@add_test_categories(['pyapi'])
def test_force_synth_off(self):
"""Test that one can have the public API return non-synthetic SBValues if desired"""
self.build(dictionary={'EXE': 'no_synth'})
self.setTearDownCleanup()
self.runCmd("file " + self.getBuildArtifact("no_synth"),
CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type filter clear', check=False)
self.runCmd('type synthetic clear', check=False)
self.runCmd('type category delete foobar', check=False)
self.runCmd('type category delete JASSynth', check=False)
self.runCmd('type category delete newbar', check=False)
self.runCmd('settings set target.enable-synthetic-value true')
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
frame = self.dbg.GetSelectedTarget().GetProcess(
).GetSelectedThread().GetSelectedFrame()
int_vector = frame.FindVariable("int_vector")
if self.TraceOn():
print(int_vector)
self.assertTrue(
int_vector.GetNumChildren() == 0,
'synthetic vector is empty')
self.runCmd('settings set target.enable-synthetic-value false')
frame = self.dbg.GetSelectedTarget().GetProcess(
).GetSelectedThread().GetSelectedFrame()
int_vector = frame.FindVariable("int_vector")
if self.TraceOn():
print(int_vector)
self.assertFalse(
int_vector.GetNumChildren() == 0,
'"physical" vector is not empty')
self.runCmd('settings set target.enable-synthetic-value true')
frame = self.dbg.GetSelectedTarget().GetProcess(
).GetSelectedThread().GetSelectedFrame()
int_vector = frame.FindVariable("int_vector")
if self.TraceOn():
print(int_vector)
self.assertTrue(
int_vector.GetNumChildren() == 0,
'synthetic vector is still empty')
| {
"content_hash": "f12f0902273bc5e2816a0a6d13441d50",
"timestamp": "",
"source": "github",
"line_count": 500,
"max_line_length": 92,
"avg_line_length": 40.456,
"alnum_prop": 0.5758849120031639,
"repo_name": "endlessm/chromium-browser",
"id": "f01d7c457c5f400b581df26c3b641be8e917f7cd",
"size": "20228",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/llvm/lldb/test/API/python_api/formatters/TestFormattersSBAPI.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Add a new file to the ``fdep.yml`` file.
.. code:: bash
fdep add [--version=if_any] <local path> <remote path>
.. note:: This doesn't download the file. Use ``fdep install`` to download the files.
"""
import sys
from fdep.commands import ConfigRequiredMixin, SubcommandRunner
class AddCommandRunner(SubcommandRunner, ConfigRequiredMixin):
"""Handle add commands."""
COMMAND_NAME = 'add'
def run(self, *args, **kwargs):
if len(args) != 2:
sys.stderr.write(self.messages.ERROR_INVALID_ARGUMENT)
self.root_runner.commands['help'].run()
return False
entry = self.path_helper.resolve_path_to_entry(args[0])
source = args[1]
self.entries[entry] = {"source": source}
version = kwargs.get('version')
if version:
self.entries[entry]['version'] = version
self.config.save()
print(self.messages.ADDED.format(entry))
return True
| {
"content_hash": "a48d28fff2bf6b532dfd1316dcff25dc",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 85,
"avg_line_length": 26.16216216216216,
"alnum_prop": 0.6229338842975206,
"repo_name": "checkr/fdep",
"id": "4af3dcda452b37a50d9626ab4e1bbba19faab546",
"size": "968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fdep/commands/add.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70522"
}
],
"symlink_target": ""
} |
import logging
import json
import os
import io
import unittest
import hashlib
import xml.etree.ElementTree as ET
import requests
from base64 import b64encode
from environs import Env
from dvvset import DVVSet
import boto3
from botocore.config import Config
from botocore.utils import fix_s3_host
env = Env()
env.read_env('.env')
#logger = logging.getLogger(__name__) # pylint: disable=invalid-name
for name in ['botocore', 's3transfer', 'boto3']:
logging.getLogger(name).setLevel(logging.CRITICAL)
BASE_URL = env.str("BASE_URL", "https://lightupon.cloud")
USERNAME_1 = env.str("USERNAME_1")
PASSWORD_1 = env.str("PASSWORD_1")
USERNAME_2 = env.str("USERNAME_2")
PASSWORD_2 = env.str("PASSWORD_2")
TEST_BUCKET_1 = env.str("TEST_BUCKET_1")
TEST_BUCKET_2 = env.str("TEST_BUCKET_2")
UPLOADS_BUCKET_NAME = env.str("UPLOADS_BUCKET_NAME")
ACCESS_KEY = env.str("ACCESS_KEY")
SECRET_KEY = env.str("SECRET_KEY")
HTTP_PROXY = env.str("HTTP_PROXY")
RIAK_ACTION_LOG_FILENAME = ".riak_action_log.xml"
FILE_UPLOAD_CHUNK_SIZE = 2000000
def configure_boto3():
session = boto3.Session(
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
)
resource = session.resource('s3', config=Config(proxies={'http': HTTP_PROXY}), use_ssl=False)
resource.meta.client.meta.events.unregister('before-sign.s3', fix_s3_host)
boto3.set_stream_logger('botocore')
return resource
class TestClient(unittest.TestCase):
def setUp(self):
creds = {"login": USERNAME_1, "password": PASSWORD_1}
response = requests.post("{}/riak/login".format(BASE_URL), data=json.dumps(creds),
headers={'content-type': 'application/json'})
data = response.json()
self.token = data['token']
self.user_id = data['id']
self.resource = configure_boto3()
self.purge_test_buckets()
def get_json(self, url, status=200, **kwargs):
response = requests.get(url, headers={'content-type': 'application/json',
'authorization': 'Token {}'.format(self.token)}, **kwargs)
assert response.status_code == status
assert response.headers['content-type'] == 'application/json'
return response.json()
def post_json(self, url, data, status=200, **kwargs):
response = requests.post(url, data=json.dumps(data),
headers={'content-type': 'application/json',
'authorization': 'Token {}'.format(self.token)})
assert response.status_code == status
return json.loads(response.data.decode('utf8'))
def delete_json(self, url, data, status=200, **kwargs):
response = requests.delete(url, data=json.dumps(data),
headers={'content-type': 'application/json',
'authorization': 'Token {}'.format(self.token)})
assert response.status_code == status
assert response.headers['content-type'] == 'application/json'
return response.json()
def upload_file(self, url, fn, prefix='', guid='',
last_seen_version=None, form_data=None, **kwargs):
"""
Uploads file to server by splitting it to chunks and testing if server
has chunk already, before actual upload.
``url`` -- The base upload API endpoint
``fn`` -- filename
``prefix`` -- an object's prefix on server
``guid`` -- unique identifier ( UUID4 ) for tracking history of changes
``last_seen_version`` -- casual history value, generated by DVVSet()
"""
data = {}
stat = os.stat(fn)
modified_utc = str(int(stat.st_mtime))
size = stat.st_size
if not last_seen_version:
dvvset = DVVSet()
dot = dvvset.create(dvvset.new(modified_utc), self.user_id)
version = b64encode(json.dumps(dot).encode())
else:
# increment version
context = dvvset.join(last_seen_version)
new_dot = dvvset.update(dvvset.new_with_history(context, modified_utc),
dot, self.user_id)
version = dvvset.sync([last_seen_version, new_dot])
version = b64encode(json.dumps(version)).encode()
result = None
with open(fn, 'rb') as fd:
_read_chunk = lambda: fd.read(FILE_UPLOAD_CHUNK_SIZE)
part_num = 1
md5_list = []
upload_id = None
offset = 0
for chunk in iter(_read_chunk, ''):
md5 = hashlib.md5(chunk)
md5_digest = md5.hexdigest()
md5_list.append(md5_digest)
multipart_form_data = {
'files[]': (fn, ''),
'md5': md5_digest,
'prefix': prefix,
'guid': guid,
'version': version
}
chunk_size = len(chunk)
if form_data:
multipart_form_data.update(form_data)
if size > FILE_UPLOAD_CHUNK_SIZE:
offset = (part_num-1) * FILE_UPLOAD_CHUNK_SIZE
limit = offset+chunk_size-1
if limit < 0:
limit = 0
ct_range = "bytes {}-{}/{}".format(offset, limit, size)
else:
ct_range = "bytes 0-{}/{}".format(size-1, size)
headers = {
'accept': 'application/json',
'authorization': 'Token {}'.format(self.token),
'content-range': ct_range
}
if part_num == 1:
r_url = url
else:
r_url = "{}{}/{}/".format(url, upload_id, part_num)
if offset+chunk_size == size:
# last chunk
etags = ",".join(["{},{}".format(i+1, md5_list[i]) for i in range(len(md5_list))])
multipart_form_data.update({
'etags[]': etags
})
# send request without binary data first
response = requests.post(r_url, files=multipart_form_data,
headers=headers)
if response.status_code == 206:
# skip chunk upload, as server has it aleady
response_json = response.json()
upload_id = response_json['upload_id']
guid = response_json['guid']
part_num += 1
if offset+chunk_size == size:
result = response_json
break
else:
continue
self.assertEqual(response.status_code, 200)
response_json = response.json()
upload_id = response_json['upload_id']
guid = response_json['guid'] # server could change GUID
server_md5 = response_json['md5']
self.assertEqual(md5_digest, server_md5)
# upload an actual data now
multipart_form_data.update({
'files[]': (fn, chunk),
'guid': guid # GUID could change
})
response = requests.post(r_url, files=multipart_form_data, headers=headers)
self.assertEqual(response.status_code, 200)
response_json = response.json()
if offset+chunk_size == size:
# the last chunk has been processed, expect complete_upload response
expected = set(['lock_user_tel', 'lock_user_name', 'guid', 'upload_id',
'lock_modified_utc', 'lock_user_id', 'is_locked',
'author_tel', 'is_deleted', 'upload_time', 'md5',
'version', 'height', 'author_id', 'author_name',
'object_key', 'bytes', 'width', 'orig_name', 'end_byte'])
self.assertEqual(expected, set(response_json.keys()))
result = response_json
break
else:
self.assertEqual(set(['end_byte', 'upload_id', 'guid', 'upload_id', 'md5']),
set(response_json.keys()))
#self.assertEqual(response_json['guid'], guid)
#self.assertEqual(response_json['upload_id'], upload_id)
server_md5 = response_json['md5']
self.assertEqual(md5_digest, server_md5)
upload_id = response_json['upload_id']
part_num += 1
return result
def download_object(self, bucketId, objectKey):
"""
This method downloads aby object from the object storage.
Unlike download_file, it queries Riak CS directly.
"""
bucket = self.resource.Bucket(bucketId)
content = io.BytesIO()
bucket.download_fileobj(Fileobj=content, Key=objectKey)
return content.getvalue()
def download_file(self, bucketId, objectKey):
"""
This method uses /riak/download/ API endpoint to download file
"""
url = '{}/riak/download/{}/{}'.format(BASE_URL, bucketId, objectKey)
response = requests.get(url, headers={"authorization": "Token {}".format(self.token)})
return response.content
def head(self, bucketId, objectKey):
obj = self.resource.Object(bucketId, objectKey)
obj.load()
return obj.metadata
def remove_object(self, bucketId, objectKey):
bucket = self.resource.Bucket(bucketId)
bucket.Object(objectKey).delete()
def purge_test_buckets(self):
"""
Deletes all objects from bucket
"""
bucket = self.resource.Bucket(TEST_BUCKET_1)
try:
objects = [i for i in bucket.objects.all()]
except self.resource.meta.client.exceptions.NoSuchBucket:
objects = []
for obj in objects:
obj.delete()
bucket = self.resource.Bucket(UPLOADS_BUCKET_NAME)
try:
objects = [i for i in bucket.objects.all()]
except self.resource.meta.client.exceptions.NoSuchBucket:
objects = []
for obj in objects:
obj.delete()
def create_bucket(self, name):
pass
def parse_action_log(self, xmlstring):
tree = ET.ElementTree(ET.fromstring(xmlstring))
root = tree.getroot()
record = root.find("record")
action = record.find("action").text
details = record.find("details").text
user_name = record.find("user_name").text
tenant_name = record.find("tenant_name").text
return {
'action': action,
'details': details,
'user_name': user_name,
'tenant_name': tenant_name
}
def create_pseudo_directory(self, name):
req_headers = {
'content-type': 'application/json',
'authorization': 'Token {}'.format(self.token),
}
data = {
'prefix': '',
'directory_name': name
}
url = "{}/riak/list/{}/".format(BASE_URL, TEST_BUCKET_1)
return requests.post(url, json=data, headers=req_headers)
| {
"content_hash": "1127f7c2129711c198d5087545dac387",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 102,
"avg_line_length": 39.958333333333336,
"alnum_prop": 0.5319777546054918,
"repo_name": "imgrey/riak-middleware",
"id": "652ee8b869fbfbcd5df3c5435a9dd50765d80fd4",
"size": "11533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/client_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11421"
},
{
"name": "Erlang",
"bytes": "313379"
},
{
"name": "Makefile",
"bytes": "894"
},
{
"name": "Shell",
"bytes": "193"
}
],
"symlink_target": ""
} |
r"""Find the full path to commands.
which(command, path=None, verbose=0, exts=None)
Return the full path to the first match of the given command on the
path.
whichall(command, path=None, verbose=0, exts=None)
Return a list of full paths to all matches of the given command on
the path.
whichgen(command, path=None, verbose=0, exts=None)
Return a generator which will yield full paths to all matches of the
given command on the path.
By default the PATH environment variable is searched (as well as, on
Windows, the AppPaths key in the registry), but a specific 'path' list
to search may be specified as well. On Windows, the PATHEXT environment
variable is applied as appropriate.
If "verbose" is true then a tuple of the form
(<fullpath>, <matched-where-description>)
is returned for each match. The latter element is a textual description
of where the match was found. For example:
from PATH element 0
from HKLM\SOFTWARE\...\perl.exe
"""
_cmdlnUsage = """
Show the full path of commands.
Usage:
which [<options>...] [<command-name>...]
Options:
-h, --help Print this help and exit.
-V, --version Print the version info and exit.
-a, --all Print *all* matching paths.
-v, --verbose Print out how matches were located and
show near misses on stderr.
-q, --quiet Just print out matches. I.e., do not print out
near misses.
-p <altpath>, --path=<altpath>
An alternative path (list of directories) may
be specified for searching.
-e <exts>, --exts=<exts>
Specify a list of extensions to consider instead
of the usual list (';'-separate list, Windows
only).
Show the full path to the program that would be run for each given
command name, if any. Which, like GNU's which, returns the number of
failed arguments, or -1 when no <command-name> was given.
Near misses include duplicates, non-regular files and (on Un*x)
files without executable access.
"""
__revision__ = "$Id: which.py 1448 2007-02-28 19:13:06Z trentm $"
__version_info__ = (1, 1, 3)
__version__ = '.'.join(map(str, __version_info__))
__all__ = ["which", "whichall", "whichgen", "WhichError"]
import os
import sys
import getopt
import stat
#---- exceptions
class WhichError(Exception):
pass
#---- internal support stuff
def _getRegisteredExecutable(exeName):
"""Windows allow application paths to be registered in the registry."""
registered = None
if sys.platform.startswith('win'):
if os.path.splitext(exeName)[1].lower() != '.exe':
exeName += '.exe'
import _winreg
try:
key = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\" +\
exeName
value = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, key)
registered = (value, "from HKLM\\"+key)
except _winreg.error:
pass
if registered and not os.path.exists(registered[0]):
registered = None
return registered
def _samefile(fname1, fname2):
if sys.platform.startswith('win'):
return ( os.path.normpath(os.path.normcase(fname1)) ==\
os.path.normpath(os.path.normcase(fname2)) )
else:
return os.path.samefile(fname1, fname2)
def _cull(potential, matches, verbose=0):
"""Cull inappropriate matches. Possible reasons:
- a duplicate of a previous match
- not a disk file
- not executable (non-Windows)
If 'potential' is approved it is returned and added to 'matches'.
Otherwise, None is returned.
"""
for match in matches: # don't yield duplicates
if _samefile(potential[0], match[0]):
if verbose:
sys.stderr.write("duplicate: %s (%s)\n" % potential)
return None
else:
if not stat.S_ISREG(os.stat(potential[0]).st_mode):
if verbose:
sys.stderr.write("not a regular file: %s (%s)\n" % potential)
elif sys.platform != "win32" \
and not os.access(potential[0], os.X_OK):
if verbose:
sys.stderr.write("no executable access: %s (%s)\n"\
% potential)
else:
matches.append(potential)
return potential
#---- module API
def whichgen(command, path=None, verbose=0, exts=None):
"""Return a generator of full paths to the given command.
"command" is a the name of the executable to search for.
"path" is an optional alternate path list to search. The default it
to use the PATH environment variable.
"verbose", if true, will cause a 2-tuple to be returned for each
match. The second element is a textual description of where the
match was found.
"exts" optionally allows one to specify a list of extensions to use
instead of the standard list for this system. This can
effectively be used as an optimization to, for example, avoid
stat's of "foo.vbs" when searching for "foo" and you know it is
not a VisualBasic script but ".vbs" is on PATHEXT. This option
is only supported on Windows.
This method returns a generator which yields either full paths to
the given command or, if verbose, tuples of the form (<path to
command>, <where path found>).
"""
matches = []
if path is None:
usingGivenPath = 0
path = os.environ.get("PATH", "").split(os.pathsep)
if sys.platform.startswith("win"):
path.insert(0, os.curdir) # implied by Windows shell
else:
usingGivenPath = 1
# Windows has the concept of a list of extensions (PATHEXT env var).
if sys.platform.startswith("win"):
if exts is None:
exts = os.environ.get("PATHEXT", "").split(os.pathsep)
# If '.exe' is not in exts then obviously this is Win9x and
# or a bogus PATHEXT, then use a reasonable default.
for ext in exts:
if ext.lower() == ".exe":
break
else:
exts = ['.COM', '.EXE', '.BAT']
elif not isinstance(exts, list):
raise TypeError("'exts' argument must be a list or None")
else:
if exts is not None:
raise WhichError("'exts' argument is not supported on "\
"platform '%s'" % sys.platform)
exts = []
# File name cannot have path separators because PATH lookup does not
# work that way.
if os.sep in command or os.altsep and os.altsep in command:
if os.path.exists(command):
match = _cull((command, "explicit path given"), matches, verbose)
if verbose:
yield match
else:
yield match[0]
else:
for i in range(len(path)):
dirName = path[i]
# On windows the dirName *could* be quoted, drop the quotes
if sys.platform.startswith("win") and len(dirName) >= 2\
and dirName[0] == '"' and dirName[-1] == '"':
dirName = dirName[1:-1]
for ext in ['']+exts:
absName = os.path.abspath(
os.path.normpath(os.path.join(dirName, command+ext)))
if os.path.isfile(absName):
if usingGivenPath:
fromWhere = "from given path element %d" % i
elif not sys.platform.startswith("win"):
fromWhere = "from PATH element %d" % i
elif i == 0:
fromWhere = "from current directory"
else:
fromWhere = "from PATH element %d" % (i-1)
match = _cull((absName, fromWhere), matches, verbose)
if match:
if verbose:
yield match
else:
yield match[0]
match = _getRegisteredExecutable(command)
if match is not None:
match = _cull(match, matches, verbose)
if match:
if verbose:
yield match
else:
yield match[0]
def which(command, path=None, verbose=0, exts=None):
"""Return the full path to the first match of the given command on
the path.
"command" is a the name of the executable to search for.
"path" is an optional alternate path list to search. The default it
to use the PATH environment variable.
"verbose", if true, will cause a 2-tuple to be returned. The second
element is a textual description of where the match was found.
"exts" optionally allows one to specify a list of extensions to use
instead of the standard list for this system. This can
effectively be used as an optimization to, for example, avoid
stat's of "foo.vbs" when searching for "foo" and you know it is
not a VisualBasic script but ".vbs" is on PATHEXT. This option
is only supported on Windows.
If no match is found for the command, a WhichError is raised.
"""
try:
match = whichgen(command, path, verbose, exts).next()
except StopIteration:
raise WhichError("Could not find '%s' on the path." % command)
return match
def whichall(command, path=None, verbose=0, exts=None):
"""Return a list of full paths to all matches of the given command
on the path.
"command" is a the name of the executable to search for.
"path" is an optional alternate path list to search. The default it
to use the PATH environment variable.
"verbose", if true, will cause a 2-tuple to be returned for each
match. The second element is a textual description of where the
match was found.
"exts" optionally allows one to specify a list of extensions to use
instead of the standard list for this system. This can
effectively be used as an optimization to, for example, avoid
stat's of "foo.vbs" when searching for "foo" and you know it is
not a VisualBasic script but ".vbs" is on PATHEXT. This option
is only supported on Windows.
"""
return list( whichgen(command, path, verbose, exts) )
#---- mainline
def main(argv):
all = 0
verbose = 0
altpath = None
exts = None
try:
optlist, args = getopt.getopt(argv[1:], 'haVvqp:e:',
['help', 'all', 'version', 'verbose', 'quiet', 'path=', 'exts='])
except getopt.GetoptError, msg:
sys.stderr.write("which: error: %s. Your invocation was: %s\n"\
% (msg, argv))
sys.stderr.write("Try 'which --help'.\n")
return 1
for opt, optarg in optlist:
if opt in ('-h', '--help'):
print _cmdlnUsage
return 0
elif opt in ('-V', '--version'):
print "which %s" % __version__
return 0
elif opt in ('-a', '--all'):
all = 1
elif opt in ('-v', '--verbose'):
verbose = 1
elif opt in ('-q', '--quiet'):
verbose = 0
elif opt in ('-p', '--path'):
if optarg:
altpath = optarg.split(os.pathsep)
else:
altpath = []
elif opt in ('-e', '--exts'):
if optarg:
exts = optarg.split(os.pathsep)
else:
exts = []
if len(args) == 0:
return -1
failures = 0
for arg in args:
#print "debug: search for %r" % arg
nmatches = 0
for match in whichgen(arg, path=altpath, verbose=verbose, exts=exts):
if verbose:
print "%s (%s)" % match
else:
print match
nmatches += 1
if not all:
break
if not nmatches:
failures += 1
return failures
if __name__ == "__main__":
sys.exit( main(sys.argv) )
| {
"content_hash": "61f7f29f78df23166496e9f88a65ce8a",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 80,
"avg_line_length": 36.616766467065865,
"alnum_prop": 0.5726901062959935,
"repo_name": "trentm/which",
"id": "99f5e3fc6d318fcd4aea60cad1f431a6751637ca",
"size": "12440",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "which.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "11649"
},
{
"name": "Python",
"bytes": "23451"
}
],
"symlink_target": ""
} |
import copy
import functools
import os
import re
import stat
import json
import shlex
import shutil
import textwrap
import platform
from enum import Enum
from pathlib import PurePath
from .. import mlog
from .. import mesonlib
from ..compilers import clib_langs
from ..mesonlib import MesonException, OrderedSet
from ..mesonlib import Popen_safe, version_compare_many, version_compare, listify
# These must be defined in this file to avoid cyclical references.
packages = {}
_packages_accept_language = set()
class DependencyException(MesonException):
'''Exceptions raised while trying to find dependencies'''
class DependencyMethods(Enum):
# Auto means to use whatever dependency checking mechanisms in whatever order meson thinks is best.
AUTO = 'auto'
PKGCONFIG = 'pkg-config'
QMAKE = 'qmake'
# Just specify the standard link arguments, assuming the operating system provides the library.
SYSTEM = 'system'
# This is only supported on OSX - search the frameworks directory by name.
EXTRAFRAMEWORK = 'extraframework'
# Detect using the sysconfig module.
SYSCONFIG = 'sysconfig'
# Specify using a "program"-config style tool
CONFIG_TOOL = 'config-tool'
# For backwards compatibility
SDLCONFIG = 'sdlconfig'
CUPSCONFIG = 'cups-config'
PCAPCONFIG = 'pcap-config'
LIBWMFCONFIG = 'libwmf-config'
# Misc
DUB = 'dub'
class Dependency:
@classmethod
def _process_method_kw(cls, kwargs):
method = kwargs.get('method', 'auto')
if method not in [e.value for e in DependencyMethods]:
raise DependencyException('method {!r} is invalid'.format(method))
method = DependencyMethods(method)
# This sets per-tool config methods which are deprecated to to the new
# generic CONFIG_TOOL value.
if method in [DependencyMethods.SDLCONFIG, DependencyMethods.CUPSCONFIG,
DependencyMethods.PCAPCONFIG, DependencyMethods.LIBWMFCONFIG]:
mlog.warning(textwrap.dedent("""\
Configuration method {} has been deprecated in favor of
'config-tool'. This will be removed in a future version of
meson.""".format(method)))
method = DependencyMethods.CONFIG_TOOL
# Set the detection method. If the method is set to auto, use any available method.
# If method is set to a specific string, allow only that detection method.
if method == DependencyMethods.AUTO:
methods = cls.get_methods()
elif method in cls.get_methods():
methods = [method]
else:
raise DependencyException(
'Unsupported detection method: {}, allowed methods are {}'.format(
method.value,
mlog.format_list([x.value for x in [DependencyMethods.AUTO] + cls.get_methods()])))
return methods
def __init__(self, type_name, kwargs):
self.name = "null"
self.version = None
self.language = None # None means C-like
self.is_found = False
self.type_name = type_name
self.compile_args = []
self.link_args = []
# Raw -L and -l arguments without manual library searching
# If None, self.link_args will be used
self.raw_link_args = None
self.sources = []
self.methods = self._process_method_kw(kwargs)
def __repr__(self):
s = '<{0} {1}: {2}>'
return s.format(self.__class__.__name__, self.name, self.is_found)
def get_compile_args(self):
return self.compile_args
def get_link_args(self, raw=False):
if raw and self.raw_link_args is not None:
return self.raw_link_args
return self.link_args
def found(self):
return self.is_found
def get_sources(self):
"""Source files that need to be added to the target.
As an example, gtest-all.cc when using GTest."""
return self.sources
@staticmethod
def get_methods():
return [DependencyMethods.AUTO]
def get_name(self):
return self.name
def get_version(self):
if self.version:
return self.version
else:
return 'unknown'
def get_exe_args(self, compiler):
return []
def need_openmp(self):
return False
def need_threads(self):
return False
def get_pkgconfig_variable(self, variable_name, kwargs):
raise DependencyException('{!r} is not a pkgconfig dependency'.format(self.name))
def get_configtool_variable(self, variable_name):
raise DependencyException('{!r} is not a config-tool dependency'.format(self.name))
def get_partial_dependency(self, *, compile_args=False, link_args=False,
links=False, includes=False, sources=False):
"""Create a new dependency that contains part of the parent dependency.
The following options can be inherited:
links -- all link_with arguemnts
includes -- all include_directory and -I/-isystem calls
sources -- any source, header, or generated sources
compile_args -- any compile args
link_args -- any link args
Additionally the new dependency will have the version parameter of it's
parent (if any) and the requested values of any dependencies will be
added as well.
"""
RuntimeError('Unreachable code in partial_dependency called')
class InternalDependency(Dependency):
def __init__(self, version, incdirs, compile_args, link_args, libraries, whole_libraries, sources, ext_deps):
super().__init__('internal', {})
self.version = version
self.is_found = True
self.include_directories = incdirs
self.compile_args = compile_args
self.link_args = link_args
self.libraries = libraries
self.whole_libraries = whole_libraries
self.sources = sources
self.ext_deps = ext_deps
def get_pkgconfig_variable(self, variable_name, kwargs):
raise DependencyException('Method "get_pkgconfig_variable()" is '
'invalid for an internal dependency')
def get_configtool_variable(self, variable_name):
raise DependencyException('Method "get_configtool_variable()" is '
'invalid for an internal dependency')
def get_partial_dependency(self, *, compile_args=False, link_args=False,
links=False, includes=False, sources=False):
compile_args = self.compile_args.copy() if compile_args else []
link_args = self.link_args.copy() if link_args else []
libraries = self.libraries.copy() if links else []
whole_libraries = self.whole_libraries.copy() if links else []
sources = self.sources.copy() if sources else []
includes = self.include_directories.copy() if includes else []
deps = [d.get_partial_dependency(
compile_args=compile_args, link_args=link_args, links=links,
includes=includes, sources=sources) for d in self.ext_deps]
return InternalDependency(
self.version, includes, compile_args, link_args, libraries,
whole_libraries, sources, deps)
class ExternalDependency(Dependency):
def __init__(self, type_name, environment, language, kwargs):
super().__init__(type_name, kwargs)
self.env = environment
self.name = type_name # default
self.is_found = False
self.language = language
self.version_reqs = kwargs.get('version', None)
if isinstance(self.version_reqs, str):
self.version_reqs = [self.version_reqs]
self.required = kwargs.get('required', True)
self.silent = kwargs.get('silent', False)
self.static = kwargs.get('static', False)
if not isinstance(self.static, bool):
raise DependencyException('Static keyword must be boolean')
# Is this dependency for cross-compilation?
if 'native' in kwargs and self.env.is_cross_build():
self.want_cross = not kwargs['native']
else:
self.want_cross = self.env.is_cross_build()
self.clib_compiler = None
# Set the compiler that will be used by this dependency
# This is only used for configuration checks
if self.want_cross:
compilers = self.env.coredata.cross_compilers
else:
compilers = self.env.coredata.compilers
# Set the compiler for this dependency if a language is specified,
# else try to pick something that looks usable.
if self.language:
if self.language not in compilers:
m = self.name.capitalize() + ' requires a {0} compiler, but ' \
'{0} is not in the list of project languages'
raise DependencyException(m.format(self.language.capitalize()))
self.clib_compiler = compilers[self.language]
else:
# Try to find a compiler that can find C libraries for
# running compiler.find_library()
for lang in clib_langs:
self.clib_compiler = compilers.get(lang, None)
if self.clib_compiler:
break
def get_compiler(self):
return self.clib_compiler
def get_partial_dependency(self, *, compile_args=False, link_args=False,
links=False, includes=False, sources=False):
new = copy.copy(self)
if not compile_args:
new.compile_args = []
if not link_args:
new.link_args = []
if not sources:
new.sources = []
return new
def log_details(self):
return ''
def log_info(self):
return ''
def log_tried(self):
return ''
# Check if dependency version meets the requirements
def _check_version(self):
if not self.is_found:
return
if self.version_reqs:
# an unknown version can never satisfy any requirement
if not self.version:
found_msg = ['Dependency', mlog.bold(self.name), 'found:']
found_msg += [mlog.red('NO'), 'unknown version, but need:',
self.version_reqs]
mlog.log(*found_msg)
if self.required:
m = 'Unknown version of dependency {!r}, but need {!r}.'
raise DependencyException(m.format(self.name, self.version_reqs))
else:
(self.is_found, not_found, found) = \
version_compare_many(self.version, self.version_reqs)
if not self.is_found:
found_msg = ['Dependency', mlog.bold(self.name), 'found:']
found_msg += [mlog.red('NO'),
'found {!r} but need:'.format(self.version),
', '.join(["'{}'".format(e) for e in not_found])]
if found:
found_msg += ['; matched:',
', '.join(["'{}'".format(e) for e in found])]
mlog.log(*found_msg)
if self.required:
m = 'Invalid version of dependency, need {!r} {!r} found {!r}.'
raise DependencyException(m.format(self.name, not_found, self.version))
return
class NotFoundDependency(Dependency):
def __init__(self, environment):
super().__init__('not-found', {})
self.env = environment
self.name = 'not-found'
self.is_found = False
class ConfigToolDependency(ExternalDependency):
"""Class representing dependencies found using a config tool."""
tools = None
tool_name = None
__strip_version = re.compile(r'^[0-9.]*')
def __init__(self, name, environment, language, kwargs):
super().__init__('config-tool', environment, language, kwargs)
self.name = name
self.native = kwargs.get('native', False)
self.tools = listify(kwargs.get('tools', self.tools))
req_version = kwargs.get('version', None)
tool, version = self.find_config(req_version)
self.config = tool
self.is_found = self.report_config(version, req_version)
if not self.is_found:
self.config = None
return
self.version = version
if getattr(self, 'finish_init', None):
self.finish_init(self)
def _sanitize_version(self, version):
"""Remove any non-numeric, non-point version suffixes."""
m = self.__strip_version.match(version)
if m:
# Ensure that there isn't a trailing '.', such as an input like
# `1.2.3.git-1234`
return m.group(0).rstrip('.')
return version
@classmethod
def factory(cls, name, environment, language, kwargs, tools, tool_name, finish_init=None):
"""Constructor for use in dependencies that can be found multiple ways.
In addition to the standard constructor values, this constructor sets
the tool_name and tools values of the instance.
"""
# This deserves some explanation, because metaprogramming is hard.
# This uses type() to create a dynamic subclass of ConfigToolDependency
# with the tools and tool_name class attributes set, this class is then
# instantiated and returned. The reduce function (method) is also
# attached, since python's pickle module won't be able to do anything
# with this dynamically generated class otherwise.
def reduce(self):
return (cls._unpickle, (), self.__dict__)
sub = type('{}Dependency'.format(name.capitalize()), (cls, ),
{'tools': tools, 'tool_name': tool_name, '__reduce__': reduce, 'finish_init': staticmethod(finish_init)})
return sub(name, environment, language, kwargs)
@classmethod
def _unpickle(cls):
return cls.__new__(cls)
def find_config(self, versions=None):
"""Helper method that searchs for config tool binaries in PATH and
returns the one that best matches the given version requirements.
"""
if not isinstance(versions, list) and versions is not None:
versions = listify(versions)
if self.env.is_cross_build() and not self.native:
cross_file = self.env.cross_info.config['binaries']
try:
tools = [cross_file[self.tool_name]]
except KeyError:
mlog.warning('No entry for {0} specified in your cross file. '
'Falling back to searching PATH. This may find a '
'native version of {0}!'.format(self.tool_name))
tools = self.tools
else:
tools = self.tools
best_match = (None, None)
for tool in tools:
try:
p, out = Popen_safe([tool, '--version'])[:2]
except (FileNotFoundError, PermissionError):
continue
if p.returncode != 0:
continue
out = self._sanitize_version(out.strip())
# Some tools, like pcap-config don't supply a version, but also
# don't fail with --version, in that case just assume that there is
# only one version and return it.
if not out:
return (tool, None)
if versions:
is_found = version_compare_many(out, versions)[0]
# This allows returning a found version without a config tool,
# which is useful to inform the user that you found version x,
# but y was required.
if not is_found:
tool = None
if best_match[1]:
if version_compare(out, '> {}'.format(best_match[1])):
best_match = (tool, out)
else:
best_match = (tool, out)
return best_match
def report_config(self, version, req_version):
"""Helper method to print messages about the tool."""
if self.config is None:
if version is not None:
mlog.log('Found', mlog.bold(self.tool_name), repr(version),
mlog.red('NO'), '(needed', req_version, ')')
else:
mlog.log('Found', mlog.bold(self.tool_name), repr(req_version),
mlog.red('NO'))
return False
mlog.log('Found {}:'.format(self.tool_name), mlog.bold(shutil.which(self.config)),
'({})'.format(version))
return True
def get_config_value(self, args, stage):
p, out, err = Popen_safe([self.config] + args)
# This is required to keep shlex from stripping path separators on
# Windows. Also, don't put escape sequences in config values, okay?
out = out.replace('\\', '\\\\')
if p.returncode != 0:
if self.required:
raise DependencyException(
'Could not generate {} for {}.\n{}'.format(
stage, self.name, err))
return []
return shlex.split(out)
@staticmethod
def get_methods():
return [DependencyMethods.AUTO, DependencyMethods.CONFIG_TOOL]
def get_configtool_variable(self, variable_name):
p, out, _ = Popen_safe([self.config, '--{}'.format(variable_name)])
if p.returncode != 0:
if self.required:
raise DependencyException(
'Could not get variable "{}" for dependency {}'.format(
variable_name, self.name))
variable = out.strip()
mlog.debug('Got config-tool variable {} : {}'.format(variable_name, variable))
return variable
def log_tried(self):
return self.type_name
class PkgConfigDependency(ExternalDependency):
# The class's copy of the pkg-config path. Avoids having to search for it
# multiple times in the same Meson invocation.
class_pkgbin = None
# We cache all pkg-config subprocess invocations to avoid redundant calls
pkgbin_cache = {}
def __init__(self, name, environment, kwargs, language=None):
super().__init__('pkgconfig', environment, language, kwargs)
self.name = name
self.is_libtool = False
# Store a copy of the pkg-config path on the object itself so it is
# stored in the pickled coredata and recovered.
self.pkgbin = None
# When finding dependencies for cross-compiling, we don't care about
# the 'native' pkg-config
if self.want_cross:
if 'pkgconfig' not in environment.cross_info.config['binaries']:
if self.required:
raise DependencyException('Pkg-config binary missing from cross file')
else:
potential_pkgbin = ExternalProgram.from_cross_info(environment.cross_info, 'pkgconfig')
if potential_pkgbin.found():
self.pkgbin = potential_pkgbin
else:
mlog.debug('Cross pkg-config %s not found.' % potential_pkgbin.name)
# Only search for the native pkg-config the first time and
# store the result in the class definition
elif PkgConfigDependency.class_pkgbin is None:
self.pkgbin = self.check_pkgconfig()
PkgConfigDependency.class_pkgbin = self.pkgbin
else:
self.pkgbin = PkgConfigDependency.class_pkgbin
if not self.pkgbin:
if self.required:
raise DependencyException('Pkg-config not found.')
return
mlog.debug('Determining dependency {!r} with pkg-config executable '
'{!r}'.format(name, self.pkgbin.get_path()))
ret, self.version = self._call_pkgbin(['--modversion', name])
if ret != 0:
return
try:
# Fetch cargs to be used while using this dependency
self._set_cargs()
# Fetch the libraries and library paths needed for using this
self._set_libs()
except DependencyException as e:
if self.required:
raise
else:
self.compile_args = []
self.link_args = []
self.is_found = False
self.reason = e
self.is_found = True
def __repr__(self):
s = '<{0} {1}: {2} {3}>'
return s.format(self.__class__.__name__, self.name, self.is_found,
self.version_reqs)
def _call_pkgbin_real(self, args, env):
cmd = self.pkgbin.get_command() + args
p, out = Popen_safe(cmd, env=env)[0:2]
rc, out = p.returncode, out.strip()
call = ' '.join(cmd)
mlog.debug("Called `{}` -> {}\n{}".format(call, rc, out))
return rc, out
def _call_pkgbin(self, args, env=None):
if env is None:
fenv = env
env = os.environ
else:
fenv = frozenset(env.items())
targs = tuple(args)
cache = PkgConfigDependency.pkgbin_cache
if (self.pkgbin, targs, fenv) not in cache:
cache[(self.pkgbin, targs, fenv)] = self._call_pkgbin_real(args, env)
return cache[(self.pkgbin, targs, fenv)]
def _convert_mingw_paths(self, args):
'''
Both MSVC and native Python on Windows cannot handle MinGW-esque /c/foo
paths so convert them to C:/foo. We cannot resolve other paths starting
with / like /home/foo so leave them as-is so that the user gets an
error/warning from the compiler/linker.
'''
if not mesonlib.is_windows():
return args
converted = []
for arg in args:
pargs = []
# Library search path
if arg.startswith('-L/'):
pargs = PurePath(arg[2:]).parts
tmpl = '-L{}:/{}'
elif arg.startswith('-I/'):
pargs = PurePath(arg[2:]).parts
tmpl = '-I{}:/{}'
# Full path to library or .la file
elif arg.startswith('/'):
pargs = PurePath(arg).parts
tmpl = '{}:/{}'
if len(pargs) > 1 and len(pargs[1]) == 1:
arg = tmpl.format(pargs[1], '/'.join(pargs[2:]))
converted.append(arg)
return converted
def _set_cargs(self):
env = None
if self.language == 'fortran':
# gfortran doesn't appear to look in system paths for INCLUDE files,
# so don't allow pkg-config to suppress -I flags for system paths
env = os.environ.copy()
env['PKG_CONFIG_ALLOW_SYSTEM_CFLAGS'] = '1'
ret, out = self._call_pkgbin(['--cflags', self.name], env=env)
if ret != 0:
raise DependencyException('Could not generate cargs for %s:\n\n%s' %
(self.name, out))
self.compile_args = self._convert_mingw_paths(shlex.split(out))
def _search_libs(self, out, out_raw):
'''
@out: PKG_CONFIG_ALLOW_SYSTEM_LIBS=1 pkg-config --libs
@out_raw: pkg-config --libs
We always look for the file ourselves instead of depending on the
compiler to find it with -lfoo or foo.lib (if possible) because:
1. We want to be able to select static or shared
2. We need the full path of the library to calculate RPATH values
3. De-dup of libraries is easier when we have absolute paths
Libraries that are provided by the toolchain or are not found by
find_library() will be added with -L -l pairs.
'''
# Library paths should be safe to de-dup
#
# First, figure out what library paths to use. Originally, we were
# doing this as part of the loop, but due to differences in the order
# of -L values between pkg-config and pkgconf, we need to do that as
# a separate step. See:
# https://github.com/mesonbuild/meson/issues/3951
# https://github.com/mesonbuild/meson/issues/4023
#
# Separate system and prefix paths, and ensure that prefix paths are
# always searched first.
prefix_libpaths = OrderedSet()
# We also store this raw_link_args on the object later
raw_link_args = self._convert_mingw_paths(shlex.split(out_raw))
for arg in raw_link_args:
if arg.startswith('-L') and not arg.startswith(('-L-l', '-L-L')):
prefix_libpaths.add(arg[2:])
system_libpaths = OrderedSet()
full_args = self._convert_mingw_paths(shlex.split(out))
for arg in full_args:
if arg.startswith(('-L-l', '-L-L')):
# These are D language arguments, not library paths
continue
if arg.startswith('-L') and arg[2:] not in prefix_libpaths:
system_libpaths.add(arg[2:])
# Use this re-ordered path list for library resolution
libpaths = list(prefix_libpaths) + list(system_libpaths)
# Track -lfoo libraries to avoid duplicate work
libs_found = OrderedSet()
# Track not-found libraries to know whether to add library paths
libs_notfound = []
libtype = 'static' if self.static else 'default'
# Generate link arguments for this library
link_args = []
for lib in full_args:
if lib.startswith(('-L-l', '-L-L')):
# These are D language arguments, add them as-is
pass
elif lib.startswith('-L'):
# We already handled library paths above
continue
elif lib.startswith('-l'):
# Don't resolve the same -lfoo argument again
if lib in libs_found:
continue
if self.clib_compiler:
args = self.clib_compiler.find_library(lib[2:], self.env,
libpaths, libtype)
# If the project only uses a non-clib language such as D, Rust,
# C#, Python, etc, all we can do is limp along by adding the
# arguments as-is and then adding the libpaths at the end.
else:
args = None
if args is not None:
libs_found.add(lib)
# Replace -l arg with full path to library if available
# else, library is either to be ignored, or is provided by
# the compiler, can't be resolved, and should be used as-is
if args:
if not args[0].startswith('-l'):
lib = args[0]
else:
continue
else:
# Library wasn't found, maybe we're looking in the wrong
# places or the library will be provided with LDFLAGS or
# LIBRARY_PATH from the environment (on macOS), and many
# other edge cases that we can't account for.
#
# Add all -L paths and use it as -lfoo
if lib in libs_notfound:
continue
if self.static:
mlog.warning('Static library {!r} not found for dependency {!r}, may '
'not be statically linked'.format(lib[2:], self.name))
libs_notfound.append(lib)
elif lib.endswith(".la"):
shared_libname = self.extract_libtool_shlib(lib)
shared_lib = os.path.join(os.path.dirname(lib), shared_libname)
if not os.path.exists(shared_lib):
shared_lib = os.path.join(os.path.dirname(lib), ".libs", shared_libname)
if not os.path.exists(shared_lib):
raise DependencyException('Got a libtools specific "%s" dependencies'
'but we could not compute the actual shared'
'library path' % lib)
self.is_libtool = True
lib = shared_lib
if lib in link_args:
continue
link_args.append(lib)
# Add all -Lbar args if we have -lfoo args in link_args
if libs_notfound:
# Order of -L flags doesn't matter with ld, but it might with other
# linkers such as MSVC, so prepend them.
link_args = ['-L' + lp for lp in prefix_libpaths] + link_args
return link_args, raw_link_args
def _set_libs(self):
env = None
libcmd = [self.name, '--libs']
if self.static:
libcmd.append('--static')
# Force pkg-config to output -L fields even if they are system
# paths so we can do manual searching with cc.find_library() later.
env = os.environ.copy()
env['PKG_CONFIG_ALLOW_SYSTEM_LIBS'] = '1'
ret, out = self._call_pkgbin(libcmd, env=env)
if ret != 0:
raise DependencyException('Could not generate libs for %s:\n\n%s' %
(self.name, out))
# Also get the 'raw' output without -Lfoo system paths for adding -L
# args with -lfoo when a library can't be found, and also in
# gnome.generate_gir + gnome.gtkdoc which need -L -l arguments.
ret, out_raw = self._call_pkgbin(libcmd)
if ret != 0:
raise DependencyException('Could not generate libs for %s:\n\n%s' %
(self.name, out_raw))
self.link_args, self.raw_link_args = self._search_libs(out, out_raw)
def get_pkgconfig_variable(self, variable_name, kwargs):
options = ['--variable=' + variable_name, self.name]
if 'define_variable' in kwargs:
definition = kwargs.get('define_variable', [])
if not isinstance(definition, list):
raise MesonException('define_variable takes a list')
if len(definition) != 2 or not all(isinstance(i, str) for i in definition):
raise MesonException('define_variable must be made up of 2 strings for VARIABLENAME and VARIABLEVALUE')
options = ['--define-variable=' + '='.join(definition)] + options
ret, out = self._call_pkgbin(options)
variable = ''
if ret != 0:
if self.required:
raise DependencyException('dependency %s not found.' %
(self.name))
else:
variable = out.strip()
# pkg-config doesn't distinguish between empty and non-existent variables
# use the variable list to check for variable existence
if not variable:
ret, out = self._call_pkgbin(['--print-variables', self.name])
if not re.search(r'^' + variable_name + r'$', out, re.MULTILINE):
if 'default' in kwargs:
variable = kwargs['default']
else:
mlog.warning("pkgconfig variable '%s' not defined for dependency %s." % (variable_name, self.name))
mlog.debug('Got pkgconfig variable %s : %s' % (variable_name, variable))
return variable
@staticmethod
def get_methods():
return [DependencyMethods.PKGCONFIG]
def check_pkgconfig(self):
evar = 'PKG_CONFIG'
if evar in os.environ:
pkgbin = os.environ[evar].strip()
else:
pkgbin = 'pkg-config'
pkgbin = ExternalProgram(pkgbin, silent=True)
if pkgbin.found():
try:
p, out = Popen_safe(pkgbin.get_command() + ['--version'])[0:2]
if p.returncode != 0:
mlog.warning('Found pkg-config {!r} but couldn\'t run it'
''.format(' '.join(pkgbin.get_command())))
# Set to False instead of None to signify that we've already
# searched for it and not found it
pkgbin = False
except (FileNotFoundError, PermissionError):
pkgbin = False
else:
pkgbin = False
if not self.silent:
if pkgbin:
mlog.log('Found pkg-config:', mlog.bold(pkgbin.get_path()),
'(%s)' % out.strip())
else:
mlog.log('Found Pkg-config:', mlog.red('NO'))
return pkgbin
def extract_field(self, la_file, fieldname):
with open(la_file) as f:
for line in f:
arr = line.strip().split('=')
if arr[0] == fieldname:
return arr[1][1:-1]
return None
def extract_dlname_field(self, la_file):
return self.extract_field(la_file, 'dlname')
def extract_libdir_field(self, la_file):
return self.extract_field(la_file, 'libdir')
def extract_libtool_shlib(self, la_file):
'''
Returns the path to the shared library
corresponding to this .la file
'''
dlname = self.extract_dlname_field(la_file)
if dlname is None:
return None
# Darwin uses absolute paths where possible; since the libtool files never
# contain absolute paths, use the libdir field
if mesonlib.is_osx():
dlbasename = os.path.basename(dlname)
libdir = self.extract_libdir_field(la_file)
if libdir is None:
return dlbasename
return os.path.join(libdir, dlbasename)
# From the comments in extract_libtool(), older libtools had
# a path rather than the raw dlname
return os.path.basename(dlname)
def log_tried(self):
return self.type_name
class DubDependency(ExternalDependency):
class_dubbin = None
def __init__(self, name, environment, kwargs):
super().__init__('dub', environment, 'd', kwargs)
self.name = name
self.compiler = super().get_compiler()
self.module_path = None
if 'required' in kwargs:
self.required = kwargs.get('required')
if DubDependency.class_dubbin is None:
self.dubbin = self._check_dub()
DubDependency.class_dubbin = self.dubbin
else:
self.dubbin = DubDependency.class_dubbin
if not self.dubbin:
if self.required:
raise DependencyException('DUB not found.')
self.is_found = False
return
mlog.debug('Determining dependency {!r} with DUB executable '
'{!r}'.format(name, self.dubbin.get_path()))
# we need to know the target architecture
arch = self.compiler.arch
# Ask dub for the package
ret, res = self._call_dubbin(['describe', name, '--arch=' + arch])
if ret != 0:
self.is_found = False
return
comp = self.compiler.get_id().replace('llvm', 'ldc').replace('gcc', 'gdc')
packages = []
description = json.loads(res)
for package in description['packages']:
packages.append(package['name'])
if package['name'] == name:
self.is_found = True
not_lib = True
if 'targetType' in package:
if package['targetType'] == 'library':
not_lib = False
if not_lib:
mlog.error(mlog.bold(name), "found but it isn't a library")
self.is_found = False
return
self.module_path = self._find_right_lib_path(package['path'], comp, description, True, package['targetFileName'])
if not os.path.exists(self.module_path):
# check if the dependency was built for other archs
archs = [['x86_64'], ['x86'], ['x86', 'x86_mscoff']]
for a in archs:
description_a = copy.deepcopy(description)
description_a['architecture'] = a
arch_module_path = self._find_right_lib_path(package['path'], comp, description_a, True, package['targetFileName'])
if arch_module_path:
mlog.error(mlog.bold(name), "found but it wasn't compiled for", mlog.bold(arch))
self.is_found = False
return
mlog.error(mlog.bold(name), "found but it wasn't compiled with", mlog.bold(comp))
self.is_found = False
return
self.version = package['version']
self.pkg = package
if self.pkg['targetFileName'].endswith('.a'):
self.static = True
self.compile_args = []
for flag in self.pkg['dflags']:
self.link_args.append(flag)
for path in self.pkg['importPaths']:
self.compile_args.append('-I' + os.path.join(self.pkg['path'], path))
self.link_args = self.raw_link_args = []
for flag in self.pkg['lflags']:
self.link_args.append(flag)
self.link_args.append(os.path.join(self.module_path, self.pkg['targetFileName']))
# Handle dependencies
libs = []
def add_lib_args(field_name, target):
if field_name in target['buildSettings']:
for lib in target['buildSettings'][field_name]:
if lib not in libs:
libs.append(lib)
if os.name is not 'nt':
pkgdep = PkgConfigDependency(lib, environment, {'required': 'true', 'silent': 'true'})
for arg in pkgdep.get_compile_args():
self.compile_args.append(arg)
for arg in pkgdep.get_link_args():
self.link_args.append(arg)
for arg in pkgdep.get_link_args(raw=True):
self.raw_link_args.append(arg)
for target in description['targets']:
if target['rootPackage'] in packages:
add_lib_args('libs', target)
add_lib_args('libs-{}'.format(platform.machine()), target)
for file in target['buildSettings']['linkerFiles']:
lib_path = self._find_right_lib_path(file, comp, description)
if lib_path:
self.link_args.append(lib_path)
else:
self.is_found = False
def get_compiler(self):
return self.compiler
def _find_right_lib_path(self, default_path, comp, description, folder_only=False, file_name=''):
module_path = lib_file_name = ''
if folder_only:
module_path = default_path
lib_file_name = file_name
else:
module_path = os.path.dirname(default_path)
lib_file_name = os.path.basename(default_path)
module_build_path = os.path.join(module_path, '.dub', 'build')
# Get D version implemented in the compiler
# gdc doesn't support this
ret, res = self._call_dubbin(['--version'])
if ret != 0:
mlog.error('Failed to run {!r}', mlog.bold(comp))
return
d_ver = re.search('v[0-9].[0-9][0-9][0-9].[0-9]', res) # Ex.: v2.081.2
if d_ver is not None:
d_ver = d_ver.group().rsplit('.', 1)[0].replace('v', '').replace('.', '') # Fix structure. Ex.: 2081
else:
d_ver = '' # gdc
if not os.path.isdir(module_build_path):
return ''
# Ex.: library-debug-linux.posix-x86_64-ldc_2081-EF934983A3319F8F8FF2F0E107A363BA
build_name = 'library-{}-{}-{}-{}_{}'.format(description['buildType'], '.'.join(description['platform']), '.'.join(description['architecture']), comp, d_ver)
for entry in os.listdir(module_build_path):
if entry.startswith(build_name):
for file in os.listdir(os.path.join(module_build_path, entry)):
if file == lib_file_name:
if folder_only:
return os.path.join(module_build_path, entry)
else:
return os.path.join(module_build_path, entry, lib_file_name)
return ''
def _call_dubbin(self, args, env=None):
p, out = Popen_safe(self.dubbin.get_command() + args, env=env)[0:2]
return p.returncode, out.strip()
def _call_copmbin(self, args, env=None):
p, out = Popen_safe(self.compiler.get_exelist() + args, env=env)[0:2]
return p.returncode, out.strip()
def _check_dub(self):
dubbin = ExternalProgram('dub', silent=True)
if dubbin.found():
try:
p, out = Popen_safe(dubbin.get_command() + ['--version'])[0:2]
if p.returncode != 0:
mlog.warning('Found dub {!r} but couldn\'t run it'
''.format(' '.join(dubbin.get_command())))
# Set to False instead of None to signify that we've already
# searched for it and not found it
dubbin = False
except (FileNotFoundError, PermissionError):
dubbin = False
else:
dubbin = False
if dubbin:
mlog.log('Found DUB:', mlog.bold(dubbin.get_path()),
'(%s)' % out.strip())
else:
mlog.log('Found DUB:', mlog.red('NO'))
return dubbin
@staticmethod
def get_methods():
return [DependencyMethods.DUB]
class ExternalProgram:
windows_exts = ('exe', 'msc', 'com', 'bat', 'cmd')
def __init__(self, name, command=None, silent=False, search_dir=None):
self.name = name
if command is not None:
self.command = listify(command)
else:
self.command = self._search(name, search_dir)
# Set path to be the last item that is actually a file (in order to
# skip options in something like ['python', '-u', 'file.py']. If we
# can't find any components, default to the last component of the path.
self.path = self.command[-1]
for i in range(len(self.command) - 1, -1, -1):
arg = self.command[i]
if arg is not None and os.path.isfile(arg):
self.path = arg
break
if not silent:
if self.found():
mlog.log('Program', mlog.bold(name), 'found:', mlog.green('YES'),
'(%s)' % ' '.join(self.command))
else:
mlog.log('Program', mlog.bold(name), 'found:', mlog.red('NO'))
def __repr__(self):
r = '<{} {!r} -> {!r}>'
return r.format(self.__class__.__name__, self.name, self.command)
def description(self):
'''Human friendly description of the command'''
return ' '.join(self.command)
@staticmethod
def from_cross_info(cross_info, name):
if name not in cross_info.config['binaries']:
return NonExistingExternalProgram()
command = cross_info.config['binaries'][name]
if not isinstance(command, (list, str)):
raise MesonException('Invalid type {!r} for binary {!r} in cross file'
''.format(command, name))
if isinstance(command, list):
if len(command) == 1:
command = command[0]
# We cannot do any searching if the command is a list, and we don't
# need to search if the path is an absolute path.
if isinstance(command, list) or os.path.isabs(command):
return ExternalProgram(name, command=command, silent=True)
# Search for the command using the specified string!
return ExternalProgram(command, silent=True)
@staticmethod
def _shebang_to_cmd(script):
"""
Check if the file has a shebang and manually parse it to figure out
the interpreter to use. This is useful if the script is not executable
or if we're on Windows (which does not understand shebangs).
"""
try:
with open(script) as f:
first_line = f.readline().strip()
if first_line.startswith('#!'):
# In a shebang, everything before the first space is assumed to
# be the command to run and everything after the first space is
# the single argument to pass to that command. So we must split
# exactly once.
commands = first_line[2:].split('#')[0].strip().split(maxsplit=1)
if mesonlib.is_windows():
# Windows does not have UNIX paths so remove them,
# but don't remove Windows paths
if commands[0].startswith('/'):
commands[0] = commands[0].split('/')[-1]
if len(commands) > 0 and commands[0] == 'env':
commands = commands[1:]
# Windows does not ship python3.exe, but we know the path to it
if len(commands) > 0 and commands[0] == 'python3':
commands = mesonlib.python_command + commands[1:]
elif mesonlib.is_haiku():
# Haiku does not have /usr, but a lot of scripts assume that
# /usr/bin/env always exists. Detect that case and run the
# script with the interpreter after it.
if commands[0] == '/usr/bin/env':
commands = commands[1:]
# We know what python3 is, we're running on it
if len(commands) > 0 and commands[0] == 'python3':
commands = mesonlib.python_command + commands[1:]
return commands + [script]
except Exception as e:
mlog.debug(e)
pass
mlog.debug('Unusable script {!r}'.format(script))
return False
def _is_executable(self, path):
suffix = os.path.splitext(path)[-1].lower()[1:]
if mesonlib.is_windows():
if suffix in self.windows_exts:
return True
elif os.access(path, os.X_OK):
return not os.path.isdir(path)
return False
def _search_dir(self, name, search_dir):
if search_dir is None:
return False
trial = os.path.join(search_dir, name)
if os.path.exists(trial):
if self._is_executable(trial):
return [trial]
# Now getting desperate. Maybe it is a script file that is
# a) not chmodded executable, or
# b) we are on windows so they can't be directly executed.
return self._shebang_to_cmd(trial)
else:
if mesonlib.is_windows():
for ext in self.windows_exts:
trial_ext = '{}.{}'.format(trial, ext)
if os.path.exists(trial_ext):
return [trial_ext]
return False
def _search_windows_special_cases(self, name, command):
'''
Lots of weird Windows quirks:
1. PATH search for @name returns files with extensions from PATHEXT,
but only self.windows_exts are executable without an interpreter.
2. @name might be an absolute path to an executable, but without the
extension. This works inside MinGW so people use it a lot.
3. The script is specified without an extension, in which case we have
to manually search in PATH.
4. More special-casing for the shebang inside the script.
'''
if command:
# On Windows, even if the PATH search returned a full path, we can't be
# sure that it can be run directly if it's not a native executable.
# For instance, interpreted scripts sometimes need to be run explicitly
# with an interpreter if the file association is not done properly.
name_ext = os.path.splitext(command)[1]
if name_ext[1:].lower() in self.windows_exts:
# Good, it can be directly executed
return [command]
# Try to extract the interpreter from the shebang
commands = self._shebang_to_cmd(command)
if commands:
return commands
return [None]
# Maybe the name is an absolute path to a native Windows
# executable, but without the extension. This is technically wrong,
# but many people do it because it works in the MinGW shell.
if os.path.isabs(name):
for ext in self.windows_exts:
command = '{}.{}'.format(name, ext)
if os.path.exists(command):
return [command]
# On Windows, interpreted scripts must have an extension otherwise they
# cannot be found by a standard PATH search. So we do a custom search
# where we manually search for a script with a shebang in PATH.
search_dirs = os.environ.get('PATH', '').split(';')
for search_dir in search_dirs:
commands = self._search_dir(name, search_dir)
if commands:
return commands
return [None]
def _search(self, name, search_dir):
'''
Search in the specified dir for the specified executable by name
and if not found search in PATH
'''
commands = self._search_dir(name, search_dir)
if commands:
return commands
# Do a standard search in PATH
command = shutil.which(name)
if mesonlib.is_windows():
return self._search_windows_special_cases(name, command)
# On UNIX-like platforms, shutil.which() is enough to find
# all executables whether in PATH or with an absolute path
return [command]
def found(self):
return self.command[0] is not None
def get_command(self):
return self.command[:]
def get_path(self):
return self.path
def get_name(self):
return self.name
class NonExistingExternalProgram(ExternalProgram):
"A program that will never exist"
def __init__(self):
self.name = 'nonexistingprogram'
self.command = [None]
self.path = None
def __repr__(self):
r = '<{} {!r} -> {!r}>'
return r.format(self.__class__.__name__, self.name, self.command)
def found(self):
return False
class EmptyExternalProgram(ExternalProgram):
'''
A program object that returns an empty list of commands. Used for cases
such as a cross file exe_wrapper to represent that it's not required.
'''
def __init__(self):
self.name = None
self.command = []
self.path = None
def __repr__(self):
r = '<{} {!r} -> {!r}>'
return r.format(self.__class__.__name__, self.name, self.command)
def found(self):
return True
class ExternalLibrary(ExternalDependency):
def __init__(self, name, link_args, environment, language, silent=False):
super().__init__('library', environment, language, {})
self.name = name
self.language = language
self.is_found = False
if link_args:
self.is_found = True
self.link_args = link_args
if not silent:
if self.is_found:
mlog.log('Library', mlog.bold(name), 'found:', mlog.green('YES'))
else:
mlog.log('Library', mlog.bold(name), 'found:', mlog.red('NO'))
def get_link_args(self, language=None, **kwargs):
'''
External libraries detected using a compiler must only be used with
compatible code. For instance, Vala libraries (.vapi files) cannot be
used with C code, and not all Rust library types can be linked with
C-like code. Note that C++ libraries *can* be linked with C code with
a C++ linker (and vice-versa).
'''
# Using a vala library in a non-vala target, or a non-vala library in a vala target
# XXX: This should be extended to other non-C linkers such as Rust
if (self.language == 'vala' and language != 'vala') or \
(language == 'vala' and self.language != 'vala'):
return []
return super().get_link_args(**kwargs)
def get_partial_dependency(self, *, compile_args=False, link_args=False,
links=False, includes=False, sources=False):
# External library only has link_args, so ignore the rest of the
# interface.
new = copy.copy(self)
if not link_args:
new.link_args = []
return new
class ExtraFrameworkDependency(ExternalDependency):
def __init__(self, name, required, path, env, lang, kwargs):
super().__init__('extraframeworks', env, lang, kwargs)
self.name = name
self.required = required
self.detect(name, path)
if self.found():
self.compile_args = ['-I' + os.path.join(self.path, self.name, 'Headers')]
self.link_args = ['-F' + self.path, '-framework', self.name.split('.')[0]]
def detect(self, name, path):
# should use the compiler to look for frameworks, rather than peering at
# the filesystem, so we can also find them when cross-compiling
if self.want_cross:
return
lname = name.lower()
if path is None:
paths = ['/System/Library/Frameworks', '/Library/Frameworks']
else:
paths = [path]
for p in paths:
for d in os.listdir(p):
fullpath = os.path.join(p, d)
if lname != d.rsplit('.', 1)[0].lower():
continue
if not stat.S_ISDIR(os.stat(fullpath).st_mode):
continue
self.path = p
self.name = d
self.is_found = True
return
def log_info(self):
return os.path.join(self.path, self.name)
def log_tried(self):
return 'framework'
def get_dep_identifier(name, kwargs, want_cross):
# Need immutable objects since the identifier will be used as a dict key
version_reqs = listify(kwargs.get('version', []))
if isinstance(version_reqs, list):
version_reqs = frozenset(version_reqs)
identifier = (name, version_reqs, want_cross)
for key, value in kwargs.items():
# 'version' is embedded above as the second element for easy access
# 'native' is handled above with `want_cross`
# 'required' is irrelevant for caching; the caller handles it separately
# 'fallback' subprojects cannot be cached -- they must be initialized
if key in ('version', 'native', 'required', 'fallback',):
continue
# All keyword arguments are strings, ints, or lists (or lists of lists)
if isinstance(value, list):
value = frozenset(listify(value))
identifier += (key, value)
return identifier
display_name_map = {
'boost': 'Boost',
'dub': 'DUB',
'gmock': 'GMock',
'gtest': 'GTest',
'llvm': 'LLVM',
'mpi': 'MPI',
'openmp': 'OpenMP',
'wxwidgets': 'WxWidgets',
}
def find_external_dependency(name, env, kwargs):
assert(name)
required = kwargs.get('required', True)
if not isinstance(required, bool):
raise DependencyException('Keyword "required" must be a boolean.')
if not isinstance(kwargs.get('method', ''), str):
raise DependencyException('Keyword "method" must be a string.')
lname = name.lower()
if lname not in _packages_accept_language and 'language' in kwargs:
raise DependencyException('%s dependency does not accept "language" keyword argument' % (name, ))
if not isinstance(kwargs.get('version', ''), (str, list)):
raise DependencyException('Keyword "Version" must be string or list.')
# display the dependency name with correct casing
display_name = display_name_map.get(lname, lname)
# if this isn't a cross-build, it's uninteresting if native: is used or not
if not env.is_cross_build():
type_text = 'Dependency'
else:
type_text = 'Native' if kwargs.get('native', False) else 'Cross'
type_text += ' dependency'
# build a list of dependency methods to try
candidates = _build_external_dependency_list(name, env, kwargs)
pkg_exc = None
pkgdep = []
details = ''
for c in candidates:
# try this dependency method
try:
d = c()
d._check_version()
pkgdep.append(d)
except Exception as e:
mlog.debug(str(e))
# store the first exception we see
if not pkg_exc:
pkg_exc = e
else:
details = d.log_details()
if details:
details = '(' + details + ') '
if 'language' in kwargs:
details += 'for ' + d.language + ' '
# if the dependency was found
if d.found():
info = []
if d.version:
info.append(d.version)
log_info = d.log_info()
if log_info:
info.append('(' + log_info + ')')
info = ' '.join(info)
mlog.log(type_text, mlog.bold(display_name), details + 'found:', mlog.green('YES'), info)
return d
# otherwise, the dependency could not be found
tried_methods = [d.log_tried() for d in pkgdep if d.log_tried()]
if tried_methods:
tried = '{}'.format(mlog.format_list(tried_methods))
else:
tried = ''
mlog.log(type_text, mlog.bold(display_name), details + 'found:', mlog.red('NO'),
'(tried {})'.format(tried) if tried else '')
if required:
# if exception(s) occurred, re-raise the first one (on the grounds that
# it came from a preferred dependency detection method)
if pkg_exc:
raise pkg_exc
# we have a list of failed ExternalDependency objects, so we can report
# the methods we tried to find the dependency
raise DependencyException('Dependency "%s" not found' % (name) +
(', tried %s' % (tried) if tried else ''))
# return the last failed dependency object
if pkgdep:
return pkgdep[-1]
# this should never happen
raise DependencyException('Dependency "%s" not found, but no dependency object to return' % (name))
def _build_external_dependency_list(name, env, kwargs):
# Is there a specific dependency detector for this dependency?
lname = name.lower()
if lname in packages:
# Create the list of dependency object constructors using a factory
# class method, if one exists, otherwise the list just consists of the
# constructor
if getattr(packages[lname], '_factory', None):
dep = packages[lname]._factory(env, kwargs)
else:
dep = [functools.partial(packages[lname], env, kwargs)]
return dep
candidates = []
# If it's explicitly requested, use the dub detection method (only)
if 'dub' == kwargs.get('method', ''):
candidates.append(functools.partial(DubDependency, name, env, kwargs))
return candidates
# TBD: other values of method should control what method(s) are used
# Otherwise, just use the pkgconfig dependency detector
candidates.append(functools.partial(PkgConfigDependency, name, env, kwargs))
# On OSX, also try framework dependency detector
if mesonlib.is_osx():
candidates.append(functools.partial(ExtraFrameworkDependency, name,
False, None, env, None, kwargs))
return candidates
def strip_system_libdirs(environment, link_args):
"""Remove -L<system path> arguments.
leaving these in will break builds where a user has a version of a library
in the system path, and a different version not in the system path if they
want to link against the non-system path version.
"""
exclude = {'-L{}'.format(p) for p in environment.get_compiler_system_dirs()}
return [l for l in link_args if l not in exclude]
| {
"content_hash": "43bd5933a3fd8d0ce9eee7012e80a2db",
"timestamp": "",
"source": "github",
"line_count": 1500,
"max_line_length": 165,
"avg_line_length": 40.70066666666666,
"alnum_prop": 0.562185713583725,
"repo_name": "MathieuDuponchelle/meson",
"id": "e67f4c08613cf77bd44601f715d55d77da0ce08c",
"size": "61778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mesonbuild/dependencies/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4190"
},
{
"name": "Batchfile",
"bytes": "868"
},
{
"name": "C",
"bytes": "146965"
},
{
"name": "C#",
"bytes": "949"
},
{
"name": "C++",
"bytes": "27342"
},
{
"name": "CMake",
"bytes": "1780"
},
{
"name": "D",
"bytes": "5077"
},
{
"name": "Dockerfile",
"bytes": "957"
},
{
"name": "Emacs Lisp",
"bytes": "919"
},
{
"name": "Fortran",
"bytes": "4590"
},
{
"name": "Genie",
"bytes": "341"
},
{
"name": "Inno Setup",
"bytes": "354"
},
{
"name": "Java",
"bytes": "2570"
},
{
"name": "JavaScript",
"bytes": "136"
},
{
"name": "LLVM",
"bytes": "75"
},
{
"name": "Lex",
"bytes": "135"
},
{
"name": "Meson",
"bytes": "341983"
},
{
"name": "Objective-C",
"bytes": "1092"
},
{
"name": "Objective-C++",
"bytes": "332"
},
{
"name": "PowerShell",
"bytes": "2249"
},
{
"name": "Python",
"bytes": "1964481"
},
{
"name": "Roff",
"bytes": "301"
},
{
"name": "Rust",
"bytes": "1079"
},
{
"name": "Shell",
"bytes": "2083"
},
{
"name": "Swift",
"bytes": "1152"
},
{
"name": "Vala",
"bytes": "10025"
},
{
"name": "Verilog",
"bytes": "709"
},
{
"name": "Vim script",
"bytes": "9480"
},
{
"name": "Yacc",
"bytes": "50"
}
],
"symlink_target": ""
} |
from peewee import *
from playhouse.sqlite_ext import *
import datetime
from pushbullet import PushBullet
import ConfigParser
import re
config = ConfigParser.RawConfigParser()
config.read('app.cfg')
db = SqliteDatabase("eventbullet.db")
api_key = config.get("pushbullet", "api_key")
title_base = config.get("pushbullet", "title")
message_base = config.get("pushbullet", "message")
pb = PushBullet(api_key)
class Event(Model):
title = CharField()
url = CharField(unique=True)
event_from = DateTimeField(null=True)
event_to = DateTimeField(null=True)
description = TextField(null=True)
# tags = TextField(null=True)
notified = BooleanField(default=False) # Is notified this event
updated_at = DateTimeField(default=datetime.datetime.now)
created_at = DateTimeField(default=datetime.datetime.now)
class Meta:
database = db
def notify(self, force=False):
if force == True or self.notified == False:
success, push = pb.push_note(self.event_title(), self.event_message())
self.notified = True
self.save()
def event_title(self):
return self.replace_message_tags(title_base)
def event_message(self):
return self.replace_message_tags(message_base)
@staticmethod
def add_event(title, url, tags, description=None, event_from=None, event_to=None):
ev = Event()
ev.title = title
ev.url = url
ev.description = description
ev.save()
for tag in tags:
Tag.add_tag(ev, tag)
def update_date(self, event_from=None, event_to=None):
if event_from is not None:
self.event_from = event_from
if event_to is not None:
self.event_to = event_to
self.notified = False
self.save()
def replace_message_tags(self, base):
r = base
r = re.sub("#title#", self.title, r)
r = re.sub("#description#", self.description, r)
r = re.sub("#from#", (self.event_from.strftime("%Y/%m/%d %H:%M") if self.event_from is not None else "") , r)
r = re.sub("#from_time#", (self.event_from.strftime("%H:%M") if self.event_from is not None else "") , r)
r = re.sub("#to#", (self.event_to.strftime("%Y/%m/%d %H:%M") if self.event_to is not None else ""), r)
r = re.sub("#to_time#", (self.event_to.strftime("%H:%M") if self.event_to is not None else ""), r)
r = re.sub("#url#", self.url, r)
return r
@staticmethod
def get_not_end_events():
events = Event.select().where(Event.event_to > datetime.datetime.now())
return events
class Tag(Model):
event = ForeignKeyField(Event, related_name="event")
name = CharField()
created_at = DateTimeField(default=datetime.datetime.now)
class Meta:
database = db
@staticmethod
def add_tag(event, name):
tag = Tag()
tag.event = event
tag.name = name
tag.save()
db.create_tables([Event, Tag], True)
| {
"content_hash": "637fa0cb6e15912f622641526f249944",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 117,
"avg_line_length": 28.22429906542056,
"alnum_prop": 0.6158940397350994,
"repo_name": "takudo/eventbullet",
"id": "cc10adb57ac67271cceb54dad62e13ceefd9036a",
"size": "3046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eventbullet/db/events.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13155"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import errno
import os
import sys
import signal
from mock import Mock, patch, call
from celery import _find_option_with_arg
from celery import platforms
from celery.five import open_fqdn
from celery.platforms import (
get_fdmax,
ignore_errno,
set_process_title,
signals,
maybe_drop_privileges,
setuid,
setgid,
initgroups,
parse_uid,
parse_gid,
detached,
DaemonContext,
create_pidlock,
Pidfile,
LockFailed,
setgroups,
_setgroups_hack,
close_open_fds,
)
try:
import resource
except ImportError: # pragma: no cover
resource = None # noqa
from celery.tests.case import (
Case, WhateverIO, override_stdouts, mock_open, SkipTest,
)
class test_find_option_with_arg(Case):
def test_long_opt(self):
self.assertEqual(
_find_option_with_arg(['--foo=bar'], long_opts=['--foo']),
'bar'
)
def test_short_opt(self):
self.assertEqual(
_find_option_with_arg(['-f', 'bar'], short_opts=['-f']),
'bar'
)
class test_close_open_fds(Case):
def test_closes(self):
with patch('os.close') as _close:
with patch('celery.platforms.get_fdmax') as fdmax:
fdmax.return_value = 3
close_open_fds()
_close.assert_has_calls([call(2), call(1), call(0)])
_close.side_effect = OSError()
_close.side_effect.errno = errno.EBADF
close_open_fds()
class test_ignore_errno(Case):
def test_raises_EBADF(self):
with ignore_errno('EBADF'):
exc = OSError()
exc.errno = errno.EBADF
raise exc
def test_otherwise(self):
with self.assertRaises(OSError):
with ignore_errno('EBADF'):
exc = OSError()
exc.errno = errno.ENOENT
raise exc
class test_set_process_title(Case):
def when_no_setps(self):
prev = platforms._setproctitle = platforms._setproctitle, None
try:
set_process_title('foo')
finally:
platforms._setproctitle = prev
class test_Signals(Case):
@patch('signal.getsignal')
def test_getitem(self, getsignal):
signals['SIGINT']
getsignal.assert_called_with(signal.SIGINT)
def test_supported(self):
self.assertTrue(signals.supported('INT'))
self.assertFalse(signals.supported('SIGIMAGINARY'))
def test_reset_alarm(self):
if sys.platform == 'win32':
raise SkipTest('signal.alarm not available on Windows')
with patch('signal.alarm') as _alarm:
signals.reset_alarm()
_alarm.assert_called_with(0)
def test_arm_alarm(self):
if hasattr(signal, 'setitimer'):
with patch('signal.setitimer', create=True) as seti:
signals.arm_alarm(30)
self.assertTrue(seti.called)
def test_signum(self):
self.assertEqual(signals.signum(13), 13)
self.assertEqual(signals.signum('INT'), signal.SIGINT)
self.assertEqual(signals.signum('SIGINT'), signal.SIGINT)
with self.assertRaises(TypeError):
signals.signum('int')
signals.signum(object())
@patch('signal.signal')
def test_ignore(self, set):
signals.ignore('SIGINT')
set.assert_called_with(signals.signum('INT'), signals.ignored)
signals.ignore('SIGTERM')
set.assert_called_with(signals.signum('TERM'), signals.ignored)
@patch('signal.signal')
def test_setitem(self, set):
handle = lambda *a: a
signals['INT'] = handle
set.assert_called_with(signal.SIGINT, handle)
@patch('signal.signal')
def test_setitem_raises(self, set):
set.side_effect = ValueError()
signals['INT'] = lambda *a: a
if not platforms.IS_WINDOWS:
class test_get_fdmax(Case):
@patch('resource.getrlimit')
def test_when_infinity(self, getrlimit):
getrlimit.return_value = [None, resource.RLIM_INFINITY]
default = object()
self.assertIs(get_fdmax(default), default)
@patch('resource.getrlimit')
def test_when_actual(self, getrlimit):
getrlimit.return_value = [None, 13]
self.assertEqual(get_fdmax(None), 13)
class test_maybe_drop_privileges(Case):
@patch('celery.platforms.parse_uid')
@patch('pwd.getpwuid')
@patch('celery.platforms.setgid')
@patch('celery.platforms.setuid')
@patch('celery.platforms.initgroups')
def test_with_uid(self, initgroups, setuid, setgid,
getpwuid, parse_uid):
class pw_struct(object):
pw_gid = 50001
def raise_on_second_call(*args, **kwargs):
setuid.side_effect = OSError()
setuid.side_effect.errno = errno.EPERM
setuid.side_effect = raise_on_second_call
getpwuid.return_value = pw_struct()
parse_uid.return_value = 5001
maybe_drop_privileges(uid='user')
parse_uid.assert_called_with('user')
getpwuid.assert_called_with(5001)
setgid.assert_called_with(50001)
initgroups.assert_called_with(5001, 50001)
setuid.assert_has_calls([call(5001), call(0)])
@patch('celery.platforms.parse_uid')
@patch('celery.platforms.parse_gid')
@patch('celery.platforms.setgid')
@patch('celery.platforms.setuid')
@patch('celery.platforms.initgroups')
def test_with_guid(self, initgroups, setuid, setgid,
parse_gid, parse_uid):
def raise_on_second_call(*args, **kwargs):
setuid.side_effect = OSError()
setuid.side_effect.errno = errno.EPERM
setuid.side_effect = raise_on_second_call
parse_uid.return_value = 5001
parse_gid.return_value = 50001
maybe_drop_privileges(uid='user', gid='group')
parse_uid.assert_called_with('user')
parse_gid.assert_called_with('group')
setgid.assert_called_with(50001)
initgroups.assert_called_with(5001, 50001)
setuid.assert_has_calls([call(5001), call(0)])
setuid.side_effect = None
with self.assertRaises(RuntimeError):
maybe_drop_privileges(uid='user', gid='group')
setuid.side_effect = OSError()
setuid.side_effect.errno = errno.EINVAL
with self.assertRaises(OSError):
maybe_drop_privileges(uid='user', gid='group')
@patch('celery.platforms.setuid')
@patch('celery.platforms.setgid')
@patch('celery.platforms.parse_gid')
def test_only_gid(self, parse_gid, setgid, setuid):
parse_gid.return_value = 50001
maybe_drop_privileges(gid='group')
parse_gid.assert_called_with('group')
setgid.assert_called_with(50001)
self.assertFalse(setuid.called)
class test_setget_uid_gid(Case):
@patch('celery.platforms.parse_uid')
@patch('os.setuid')
def test_setuid(self, _setuid, parse_uid):
parse_uid.return_value = 5001
setuid('user')
parse_uid.assert_called_with('user')
_setuid.assert_called_with(5001)
@patch('celery.platforms.parse_gid')
@patch('os.setgid')
def test_setgid(self, _setgid, parse_gid):
parse_gid.return_value = 50001
setgid('group')
parse_gid.assert_called_with('group')
_setgid.assert_called_with(50001)
def test_parse_uid_when_int(self):
self.assertEqual(parse_uid(5001), 5001)
@patch('pwd.getpwnam')
def test_parse_uid_when_existing_name(self, getpwnam):
class pwent(object):
pw_uid = 5001
getpwnam.return_value = pwent()
self.assertEqual(parse_uid('user'), 5001)
@patch('pwd.getpwnam')
def test_parse_uid_when_nonexisting_name(self, getpwnam):
getpwnam.side_effect = KeyError('user')
with self.assertRaises(KeyError):
parse_uid('user')
def test_parse_gid_when_int(self):
self.assertEqual(parse_gid(50001), 50001)
@patch('grp.getgrnam')
def test_parse_gid_when_existing_name(self, getgrnam):
class grent(object):
gr_gid = 50001
getgrnam.return_value = grent()
self.assertEqual(parse_gid('group'), 50001)
@patch('grp.getgrnam')
def test_parse_gid_when_nonexisting_name(self, getgrnam):
getgrnam.side_effect = KeyError('group')
with self.assertRaises(KeyError):
parse_gid('group')
class test_initgroups(Case):
@patch('pwd.getpwuid')
@patch('os.initgroups', create=True)
def test_with_initgroups(self, initgroups_, getpwuid):
getpwuid.return_value = ['user']
initgroups(5001, 50001)
initgroups_.assert_called_with('user', 50001)
@patch('celery.platforms.setgroups')
@patch('grp.getgrall')
@patch('pwd.getpwuid')
def test_without_initgroups(self, getpwuid, getgrall, setgroups):
prev = getattr(os, 'initgroups', None)
try:
delattr(os, 'initgroups')
except AttributeError:
pass
try:
getpwuid.return_value = ['user']
class grent(object):
gr_mem = ['user']
def __init__(self, gid):
self.gr_gid = gid
getgrall.return_value = [grent(1), grent(2), grent(3)]
initgroups(5001, 50001)
setgroups.assert_called_with([1, 2, 3])
finally:
if prev:
os.initgroups = prev
class test_detached(Case):
def test_without_resource(self):
prev, platforms.resource = platforms.resource, None
try:
with self.assertRaises(RuntimeError):
detached()
finally:
platforms.resource = prev
@patch('celery.platforms._create_pidlock')
@patch('celery.platforms.signals')
@patch('celery.platforms.maybe_drop_privileges')
@patch('os.geteuid')
@patch(open_fqdn)
def test_default(self, open, geteuid, maybe_drop,
signals, pidlock):
geteuid.return_value = 0
context = detached(uid='user', gid='group')
self.assertIsInstance(context, DaemonContext)
signals.reset.assert_called_with('SIGCLD')
maybe_drop.assert_called_with(uid='user', gid='group')
open.return_value = Mock()
geteuid.return_value = 5001
context = detached(uid='user', gid='group', logfile='/foo/bar')
self.assertIsInstance(context, DaemonContext)
self.assertTrue(context.after_chdir)
context.after_chdir()
open.assert_called_with('/foo/bar', 'a')
open.return_value.close.assert_called_with()
context = detached(pidfile='/foo/bar/pid')
self.assertIsInstance(context, DaemonContext)
self.assertTrue(context.after_chdir)
context.after_chdir()
pidlock.assert_called_with('/foo/bar/pid')
class test_DaemonContext(Case):
@patch('os.fork')
@patch('os.setsid')
@patch('os._exit')
@patch('os.chdir')
@patch('os.umask')
@patch('os.close')
@patch('os.closerange')
@patch('os.open')
@patch('os.dup2')
def test_open(self, dup2, open, close, closer, umask, chdir,
_exit, setsid, fork):
x = DaemonContext(workdir='/opt/workdir')
fork.return_value = 0
with x:
self.assertTrue(x._is_open)
with x:
pass
self.assertEqual(fork.call_count, 2)
setsid.assert_called_with()
self.assertFalse(_exit.called)
chdir.assert_called_with(x.workdir)
umask.assert_called_with(x.umask)
self.assertTrue(dup2.called)
fork.reset_mock()
fork.return_value = 1
x = DaemonContext(workdir='/opt/workdir')
with x:
pass
self.assertEqual(fork.call_count, 1)
_exit.assert_called_with(0)
x = DaemonContext(workdir='/opt/workdir', fake=True)
x._detach = Mock()
with x:
pass
self.assertFalse(x._detach.called)
x.after_chdir = Mock()
with x:
pass
x.after_chdir.assert_called_with()
class test_Pidfile(Case):
@patch('celery.platforms.Pidfile')
def test_create_pidlock(self, Pidfile):
p = Pidfile.return_value = Mock()
p.is_locked.return_value = True
p.remove_if_stale.return_value = False
with self.assertRaises(SystemExit):
create_pidlock('/var/pid')
p.remove_if_stale.return_value = True
ret = create_pidlock('/var/pid')
self.assertIs(ret, p)
def test_context(self):
p = Pidfile('/var/pid')
p.write_pid = Mock()
p.remove = Mock()
with p as _p:
self.assertIs(_p, p)
p.write_pid.assert_called_with()
p.remove.assert_called_with()
def test_acquire_raises_LockFailed(self):
p = Pidfile('/var/pid')
p.write_pid = Mock()
p.write_pid.side_effect = OSError()
with self.assertRaises(LockFailed):
with p:
pass
@patch('os.path.exists')
def test_is_locked(self, exists):
p = Pidfile('/var/pid')
exists.return_value = True
self.assertTrue(p.is_locked())
exists.return_value = False
self.assertFalse(p.is_locked())
def test_read_pid(self):
with mock_open() as s:
s.write('1816\n')
s.seek(0)
p = Pidfile('/var/pid')
self.assertEqual(p.read_pid(), 1816)
def test_read_pid_partially_written(self):
with mock_open() as s:
s.write('1816')
s.seek(0)
p = Pidfile('/var/pid')
with self.assertRaises(ValueError):
p.read_pid()
def test_read_pid_raises_ENOENT(self):
exc = IOError()
exc.errno = errno.ENOENT
with mock_open(side_effect=exc):
p = Pidfile('/var/pid')
self.assertIsNone(p.read_pid())
def test_read_pid_raises_IOError(self):
exc = IOError()
exc.errno = errno.EAGAIN
with mock_open(side_effect=exc):
p = Pidfile('/var/pid')
with self.assertRaises(IOError):
p.read_pid()
def test_read_pid_bogus_pidfile(self):
with mock_open() as s:
s.write('eighteensixteen\n')
s.seek(0)
p = Pidfile('/var/pid')
with self.assertRaises(ValueError):
p.read_pid()
@patch('os.unlink')
def test_remove(self, unlink):
unlink.return_value = True
p = Pidfile('/var/pid')
p.remove()
unlink.assert_called_with(p.path)
@patch('os.unlink')
def test_remove_ENOENT(self, unlink):
exc = OSError()
exc.errno = errno.ENOENT
unlink.side_effect = exc
p = Pidfile('/var/pid')
p.remove()
unlink.assert_called_with(p.path)
@patch('os.unlink')
def test_remove_EACCES(self, unlink):
exc = OSError()
exc.errno = errno.EACCES
unlink.side_effect = exc
p = Pidfile('/var/pid')
p.remove()
unlink.assert_called_with(p.path)
@patch('os.unlink')
def test_remove_OSError(self, unlink):
exc = OSError()
exc.errno = errno.EAGAIN
unlink.side_effect = exc
p = Pidfile('/var/pid')
with self.assertRaises(OSError):
p.remove()
unlink.assert_called_with(p.path)
@patch('os.kill')
def test_remove_if_stale_process_alive(self, kill):
p = Pidfile('/var/pid')
p.read_pid = Mock()
p.read_pid.return_value = 1816
kill.return_value = 0
self.assertFalse(p.remove_if_stale())
kill.assert_called_with(1816, 0)
p.read_pid.assert_called_with()
kill.side_effect = OSError()
kill.side_effect.errno = errno.ENOENT
self.assertFalse(p.remove_if_stale())
@patch('os.kill')
def test_remove_if_stale_process_dead(self, kill):
with override_stdouts():
p = Pidfile('/var/pid')
p.read_pid = Mock()
p.read_pid.return_value = 1816
p.remove = Mock()
exc = OSError()
exc.errno = errno.ESRCH
kill.side_effect = exc
self.assertTrue(p.remove_if_stale())
kill.assert_called_with(1816, 0)
p.remove.assert_called_with()
def test_remove_if_stale_broken_pid(self):
with override_stdouts():
p = Pidfile('/var/pid')
p.read_pid = Mock()
p.read_pid.side_effect = ValueError()
p.remove = Mock()
self.assertTrue(p.remove_if_stale())
p.remove.assert_called_with()
def test_remove_if_stale_no_pidfile(self):
p = Pidfile('/var/pid')
p.read_pid = Mock()
p.read_pid.return_value = None
p.remove = Mock()
self.assertTrue(p.remove_if_stale())
p.remove.assert_called_with()
@patch('os.fsync')
@patch('os.getpid')
@patch('os.open')
@patch('os.fdopen')
@patch(open_fqdn)
def test_write_pid(self, open_, fdopen, osopen, getpid, fsync):
getpid.return_value = 1816
osopen.return_value = 13
w = fdopen.return_value = WhateverIO()
w.close = Mock()
r = open_.return_value = WhateverIO()
r.write('1816\n')
r.seek(0)
p = Pidfile('/var/pid')
p.write_pid()
w.seek(0)
self.assertEqual(w.readline(), '1816\n')
self.assertTrue(w.close.called)
getpid.assert_called_with()
osopen.assert_called_with(p.path, platforms.PIDFILE_FLAGS,
platforms.PIDFILE_MODE)
fdopen.assert_called_with(13, 'w')
fsync.assert_called_with(13)
open_.assert_called_with(p.path)
@patch('os.fsync')
@patch('os.getpid')
@patch('os.open')
@patch('os.fdopen')
@patch(open_fqdn)
def test_write_reread_fails(self, open_, fdopen,
osopen, getpid, fsync):
getpid.return_value = 1816
osopen.return_value = 13
w = fdopen.return_value = WhateverIO()
w.close = Mock()
r = open_.return_value = WhateverIO()
r.write('11816\n')
r.seek(0)
p = Pidfile('/var/pid')
with self.assertRaises(LockFailed):
p.write_pid()
class test_setgroups(Case):
@patch('os.setgroups', create=True)
def test_setgroups_hack_ValueError(self, setgroups):
def on_setgroups(groups):
if len(groups) <= 200:
setgroups.return_value = True
return
raise ValueError()
setgroups.side_effect = on_setgroups
_setgroups_hack(list(range(400)))
setgroups.side_effect = ValueError()
with self.assertRaises(ValueError):
_setgroups_hack(list(range(400)))
@patch('os.setgroups', create=True)
def test_setgroups_hack_OSError(self, setgroups):
exc = OSError()
exc.errno = errno.EINVAL
def on_setgroups(groups):
if len(groups) <= 200:
setgroups.return_value = True
return
raise exc
setgroups.side_effect = on_setgroups
_setgroups_hack(list(range(400)))
setgroups.side_effect = exc
with self.assertRaises(OSError):
_setgroups_hack(list(range(400)))
exc2 = OSError()
exc.errno = errno.ESRCH
setgroups.side_effect = exc2
with self.assertRaises(OSError):
_setgroups_hack(list(range(400)))
@patch('os.sysconf')
@patch('celery.platforms._setgroups_hack')
def test_setgroups(self, hack, sysconf):
sysconf.return_value = 100
setgroups(list(range(400)))
hack.assert_called_with(list(range(100)))
@patch('os.sysconf')
@patch('celery.platforms._setgroups_hack')
def test_setgroups_sysconf_raises(self, hack, sysconf):
sysconf.side_effect = ValueError()
setgroups(list(range(400)))
hack.assert_called_with(list(range(400)))
@patch('os.getgroups')
@patch('os.sysconf')
@patch('celery.platforms._setgroups_hack')
def test_setgroups_raises_ESRCH(self, hack, sysconf, getgroups):
sysconf.side_effect = ValueError()
esrch = OSError()
esrch.errno = errno.ESRCH
hack.side_effect = esrch
with self.assertRaises(OSError):
setgroups(list(range(400)))
@patch('os.getgroups')
@patch('os.sysconf')
@patch('celery.platforms._setgroups_hack')
def test_setgroups_raises_EPERM(self, hack, sysconf, getgroups):
sysconf.side_effect = ValueError()
eperm = OSError()
eperm.errno = errno.EPERM
hack.side_effect = eperm
getgroups.return_value = list(range(400))
setgroups(list(range(400)))
getgroups.assert_called_with()
getgroups.return_value = [1000]
with self.assertRaises(OSError):
setgroups(list(range(400)))
getgroups.assert_called_with()
| {
"content_hash": "34438040357f0e98d5a74ead4e82a6e0",
"timestamp": "",
"source": "github",
"line_count": 691,
"max_line_length": 75,
"avg_line_length": 33.32706222865412,
"alnum_prop": 0.5388857527465369,
"repo_name": "sivaprakashniet/push_pull",
"id": "b01ad12c124ad05f4f4f152aae22c15b41b88ba2",
"size": "23029",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "p2p/lib/python2.7/site-packages/celery/tests/utils/test_platforms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "33347"
},
{
"name": "CSS",
"bytes": "111284"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "933220"
},
{
"name": "JavaScript",
"bytes": "260224"
},
{
"name": "Nginx",
"bytes": "4758"
},
{
"name": "Python",
"bytes": "9725308"
},
{
"name": "Roff",
"bytes": "17679"
},
{
"name": "Shell",
"bytes": "6008"
}
],
"symlink_target": ""
} |
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_gradient03.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [61363328, 61364864]
chart.axis_ids = [61363712, 61365248]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'values': '=Sheet1!$A$1:$A$5',
'gradient': {
'colors': ['#DDEBCF', '#9CB86E', '#156B13'],
'positions': [0, 40, 100],
}
})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| {
"content_hash": "727c520ef896678896b0a1f9334d6c03",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 68,
"avg_line_length": 27.10169491525424,
"alnum_prop": 0.5428392745465916,
"repo_name": "jkyeung/XlsxWriter",
"id": "8b0e50a85edba90ba00b82f34114d36e195b2009",
"size": "1772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xlsxwriter/test/comparison/test_chart_gradient03.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7819"
},
{
"name": "Perl",
"bytes": "3504"
},
{
"name": "Python",
"bytes": "2430294"
},
{
"name": "Shell",
"bytes": "6064"
}
],
"symlink_target": ""
} |
from chunsabot.database import Database
def make_initial_config():
Database.mkdir()
if Database.config_exists():
print("There exists account info. \r\nReally overwrite config file? (Y / N)")
if input().lower() == "y":
result = True
else:
result = False
else:
print("There isn't default config file (data/config.yaml). Do you want to generate default config file? (Y / N)")
if input().lower() == "y":
result = True
else:
result = False
if result:
Database.save_config('leave', u"안녕히 계세요!")
Database.save_config('debug_users', [])
Database.save_config('curse_map', [])
Database.save_config('sensitive_map', [])
Database.save_config('debug_allowed_room', [])
Database.save_config('debug_users', [])
Database.save_config('debug_mode', False)
Database.save_config('google_api_key', '')
Database.save_config('weather_url_all', 'http://www.kma.go.kr/weather/forecast/mid-term-rss3.jsp')
print("Config sucessfully saved.")
else:
print("Configmaker work cancelled.")
| {
"content_hash": "4954b3ad265c8d2846f5eae8e02fc829",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 121,
"avg_line_length": 36.4375,
"alnum_prop": 0.5909090909090909,
"repo_name": "susemeee/Chunsabot-framework",
"id": "68c7beabc533fe3e09c1d37e358265c1c7713e98",
"size": "1203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chunsabot/configmaker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "80286"
}
],
"symlink_target": ""
} |
"""
Can be run to install all the subpieces.
"""
from __future__ import print_function
try:
from setuptools import setup
from setuptools.command.install import install
from setuptools.command.sdist import sdist
except ImportError:
from distutils.core import setup
from distutils.command.install import install
from distutils.command.sdist import sdist
from distutils.cmd import Command
import os
import sys
from contextlib import contextmanager
VERSION = "1.16.0"
@contextmanager
def chdir(dir):
prev = os.getcwd()
try:
os.chdir(dir)
yield
finally:
os.chdir(prev)
def run_cmd(command_string):
import subprocess
try:
print(f"+ {command_string}")
rc = subprocess.call(command_string, shell=True)
if rc < 0:
print(f"Command terminated by signal {-rc}", file=sys.stderr)
except OSError as e:
print(f"Command failed: {e}", file=sys.stderr)
PACKAGES = [
"bdc",
"course",
"db_edu_util",
"gendbc",
"master_parse",
]
class TestCommand(Command):
description = "run all tests"
user_options = []
def __init__(self, dist):
Command.__init__(self, dist)
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
run_cmd("./run-tests.sh")
setup(
name="db-build-tooling",
packages=PACKAGES,
cmdclass={"test": TestCommand},
install_requires=[
"databricks-cli >= 0.8.7",
"docopt >= 0.6.2",
"GitPython >= 3.1.2",
"grizzled-python >= 2.2.0",
"markdown2 >= 2.3.7",
"parsimonious >= 0.8.1",
"pystache >= 0.5.4",
"PyYAML >= 5.1",
"nbformat >= 4.4.0",
"requests >= 2.22.0",
"termcolor >= 1.1.0",
"WeasyPrint >= 45",
],
entry_points={
"console_scripts": [
"bdc=bdc:main",
"course=course:main",
"gendbc=gendbc:main",
"master_parse=master_parse:main",
]
},
version=VERSION,
description="Wrapper package for Databricks Training build tools",
author="Databricks Education Team",
author_email="training-logins@databricks.com",
license="Creative Commons Attribution-NonCommercial 4.0 International",
)
| {
"content_hash": "18ef2813b3a564a42179ce9ac740a95a",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 75,
"avg_line_length": 22.259615384615383,
"alnum_prop": 0.5952483801295896,
"repo_name": "databricks-edu/build-tooling",
"id": "34577b57b55dfeb1125710f9504291b44a5005f0",
"size": "2315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1707"
},
{
"name": "Python",
"bytes": "394219"
},
{
"name": "Shell",
"bytes": "22711"
}
],
"symlink_target": ""
} |
"""Test for kubernetes_e2e.py"""
import os
import shutil
import string
import tempfile
import urllib2
import unittest
import time
import kubernetes_e2e
FAKE_WORKSPACE_STATUS = 'STABLE_BUILD_GIT_COMMIT 599539dc0b99976fda0f326f4ce47e93ec07217c\n' \
'STABLE_BUILD_SCM_STATUS clean\n' \
'STABLE_BUILD_SCM_REVISION v1.7.0-alpha.0.1320+599539dc0b9997\n' \
'STABLE_BUILD_MAJOR_VERSION 1\n' \
'STABLE_BUILD_MINOR_VERSION 7+\n' \
'STABLE_gitCommit 599539dc0b99976fda0f326f4ce47e93ec07217c\n' \
'STABLE_gitTreeState clean\n' \
'STABLE_gitVersion v1.7.0-alpha.0.1320+599539dc0b9997\n' \
'STABLE_gitMajor 1\n' \
'STABLE_gitMinor 7+\n'
FAKE_WORKSPACE_STATUS_V1_6 = 'STABLE_BUILD_GIT_COMMIT 84febd4537dd190518657405b7bdb921dfbe0387\n' \
'STABLE_BUILD_SCM_STATUS clean\n' \
'STABLE_BUILD_SCM_REVISION v1.6.4-beta.0.18+84febd4537dd19\n' \
'STABLE_BUILD_MAJOR_VERSION 1\n' \
'STABLE_BUILD_MINOR_VERSION 6+\n' \
'STABLE_gitCommit 84febd4537dd190518657405b7bdb921dfbe0387\n' \
'STABLE_gitTreeState clean\n' \
'STABLE_gitVersion v1.6.4-beta.0.18+84febd4537dd19\n' \
'STABLE_gitMajor 1\n' \
'STABLE_gitMinor 6+\n'
FAKE_DESCRIBE_FROM_FAMILY_RESPONSE = """
archiveSizeBytes: '1581831882'
creationTimestamp: '2017-06-16T10:37:57.681-07:00'
description: 'Google, Container-Optimized OS, 59-9460.64.0 stable, Kernel: ChromiumOS-4.4.52
Kubernetes: 1.6.4 Docker: 1.11.2'
diskSizeGb: '10'
family: cos-stable
id: '2388425242502080922'
kind: compute#image
labelFingerprint: 42WmSpB8rSM=
licenses:
- https://www.googleapis.com/compute/v1/projects/cos-cloud/global/licenses/cos
name: cos-stable-59-9460-64-0
rawDisk:
containerType: TAR
source: ''
selfLink: https://www.googleapis.com/compute/v1/projects/cos-cloud/global/images/cos-stable-59-9460-64-0
sourceType: RAW
status: READY
"""
def fake_pass(*_unused, **_unused2):
"""Do nothing."""
pass
def fake_bomb(*a, **kw):
"""Always raise."""
raise AssertionError('Should not happen', a, kw)
def raise_urllib2_error(*_unused, **_unused2):
"""Always raise a urllib2.URLError"""
raise urllib2.URLError("test failure")
def always_kubernetes(*_unused, **_unused2):
"""Always return 'kubernetes'"""
return 'kubernetes'
class Stub(object):
"""Replace thing.param with replacement until exiting with."""
def __init__(self, thing, param, replacement):
self.thing = thing
self.param = param
self.replacement = replacement
self.old = getattr(thing, param)
setattr(thing, param, self.replacement)
def __enter__(self, *a, **kw):
return self.replacement
def __exit__(self, *a, **kw):
setattr(self.thing, self.param, self.old)
class ClusterNameTest(unittest.TestCase):
def test_name_filled(self):
"""Return the cluster name if set."""
name = 'foo'
build = '1984'
os.environ['BUILD_ID'] = build
actual = kubernetes_e2e.cluster_name(name)
self.assertTrue(actual)
self.assertIn(name, actual)
self.assertNotIn(build, actual)
def test_name_empty_short_build(self):
"""Return the build number if name is empty."""
name = ''
build = '1984'
os.environ['BUILD_ID'] = build
actual = kubernetes_e2e.cluster_name(name)
self.assertTrue(actual)
self.assertIn(build, actual)
def test_name_empty_long_build(self):
"""Return a short hash of a long build number if name is empty."""
name = ''
build = '0' * 63
os.environ['BUILD_ID'] = build
actual = kubernetes_e2e.cluster_name(name)
self.assertTrue(actual)
self.assertNotIn(build, actual)
if len(actual) > 32: # Some firewall names consume half the quota
self.fail('Name should be short: %s' % actual)
class ScenarioTest(unittest.TestCase): # pylint: disable=too-many-public-methods
"""Test for e2e scenario."""
callstack = []
envs = {}
def setUp(self):
self.boiler = [
Stub(kubernetes_e2e, 'check', self.fake_check),
Stub(shutil, 'copy', fake_pass),
]
def tearDown(self):
for stub in self.boiler:
with stub: # Leaving with restores things
pass
self.callstack[:] = []
self.envs.clear()
def fake_check(self, *cmd):
"""Log the command."""
self.callstack.append(string.join(cmd))
def fake_check_env(self, env, *cmd):
"""Log the command with a specific env."""
self.envs.update(env)
self.callstack.append(string.join(cmd))
def fake_output_work_status(self, *cmd):
"""fake a workstatus blob."""
self.callstack.append(string.join(cmd))
return FAKE_WORKSPACE_STATUS
def fake_output_work_status_v1_6(self, *cmd):
"""fake a workstatus blob for v1.6."""
self.callstack.append(string.join(cmd))
return FAKE_WORKSPACE_STATUS_V1_6
def fake_output_get_latest_image(self, *cmd):
"""fake a `gcloud compute images describe-from-family` response."""
self.callstack.append(string.join(cmd))
return FAKE_DESCRIBE_FROM_FAMILY_RESPONSE
def test_local(self):
"""Make sure local mode is fine overall."""
args = kubernetes_e2e.parse_args()
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
kubernetes_e2e.main(args)
self.assertNotEqual(self.envs, {})
for call in self.callstack:
self.assertFalse(call.startswith('docker'))
def test_check_leaks(self):
"""Ensure --check-leaked-resources=true sends flag to kubetest."""
args = kubernetes_e2e.parse_args(['--check-leaked-resources=true'])
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
kubernetes_e2e.main(args)
self.assertIn('--check-leaked-resources=true', self.callstack[-1])
def test_check_leaks_false(self):
"""Ensure --check-leaked-resources=true sends flag to kubetest."""
args = kubernetes_e2e.parse_args(['--check-leaked-resources=false'])
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
kubernetes_e2e.main(args)
self.assertIn('--check-leaked-resources=false', self.callstack[-1])
def test_check_leaks_default(self):
"""Ensure --check-leaked-resources=true sends flag to kubetest."""
args = kubernetes_e2e.parse_args(['--check-leaked-resources'])
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
kubernetes_e2e.main(args)
self.assertIn('--check-leaked-resources', self.callstack[-1])
def test_check_leaks_unset(self):
"""Ensure --check-leaked-resources=true sends flag to kubetest."""
args = kubernetes_e2e.parse_args(['--mode=local'])
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
kubernetes_e2e.main(args)
self.assertNotIn('--check-leaked-resources', self.callstack[-1])
def test_migrated_kubetest_args(self):
migrated = [
'--stage-suffix=panda',
'--random-flag', 'random-value',
'--multiple-federations',
'arg1', 'arg2',
'--federation',
'--kubemark',
'--extract=this',
'--extract=that',
'--perf-tests',
'--save=somewhere',
'--skew',
'--publish=location',
'--timeout=42m',
'--upgrade_args=ginkgo',
'--check-leaked-resources=true',
'--charts',
]
explicit_passthrough_args = [
'--deployment=yay',
'--provider=gce',
]
args = kubernetes_e2e.parse_args(migrated
+ explicit_passthrough_args
+ ['--test=false'])
self.assertEquals(migrated, args.kubetest_args)
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
kubernetes_e2e.main(args)
lastcall = self.callstack[-1]
for arg in migrated:
self.assertIn(arg, lastcall)
for arg in explicit_passthrough_args:
self.assertIn(arg, lastcall)
def test_updown_default(self):
args = kubernetes_e2e.parse_args([])
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
kubernetes_e2e.main(args)
lastcall = self.callstack[-1]
self.assertIn('--up', lastcall)
self.assertIn('--down', lastcall)
def test_updown_set(self):
args = kubernetes_e2e.parse_args(['--up=false', '--down=true'])
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
kubernetes_e2e.main(args)
lastcall = self.callstack[-1]
self.assertNotIn('--up', lastcall)
self.assertIn('--down', lastcall)
def test_kubeadm_ci(self):
"""Make sure kubeadm ci mode is fine overall."""
args = kubernetes_e2e.parse_args(['--kubeadm=ci'])
self.assertEqual(args.kubeadm, 'ci')
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
with Stub(kubernetes_e2e, 'check_output', self.fake_output_work_status):
kubernetes_e2e.main(args)
self.assertNotIn('E2E_OPT', self.envs)
version = 'gs://kubernetes-release-dev/ci/v1.7.0-alpha.0.1320+599539dc0b9997-bazel/bin/linux/amd64/' # pylint: disable=line-too-long
self.assertIn('--kubernetes-anywhere-kubeadm-version=%s' % version, self.callstack[-1])
called = False
for call in self.callstack:
self.assertFalse(call.startswith('docker'))
if call == 'hack/print-workspace-status.sh':
called = True
self.assertTrue(called)
def test_local_env(self):
"""
Ensure that host variables (such as GOPATH) are included,
and added envs/env files overwrite os environment.
"""
mode = kubernetes_e2e.LocalMode('/orig-workspace', '/random-artifacts')
mode.add_environment(*(
'FOO=BAR', 'GOPATH=/go/path', 'WORKSPACE=/new/workspace'))
mode.add_os_environment(*('USER=jenkins', 'FOO=BAZ', 'GOOS=linux'))
with tempfile.NamedTemporaryFile() as temp:
temp.write('USER=prow')
temp.flush()
mode.add_file(temp.name)
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
mode.start([])
self.assertIn(('FOO', 'BAR'), self.envs.viewitems())
self.assertIn(('WORKSPACE', '/new/workspace'), self.envs.viewitems())
self.assertIn(('GOPATH', '/go/path'), self.envs.viewitems())
self.assertIn(('USER', 'prow'), self.envs.viewitems())
self.assertIn(('GOOS', 'linux'), self.envs.viewitems())
self.assertNotIn(('USER', 'jenkins'), self.envs.viewitems())
self.assertNotIn(('FOO', 'BAZ'), self.envs.viewitems())
def test_kubeadm_periodic(self):
"""Make sure kubeadm periodic mode is fine overall."""
args = kubernetes_e2e.parse_args(['--kubeadm=periodic'])
self.assertEqual(args.kubeadm, 'periodic')
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
with Stub(kubernetes_e2e, 'check_output', self.fake_output_work_status):
kubernetes_e2e.main(args)
self.assertNotIn('E2E_OPT', self.envs)
version = 'gs://kubernetes-release-dev/ci/v1.7.0-alpha.0.1320+599539dc0b9997-bazel/bin/linux/amd64/' # pylint: disable=line-too-long
self.assertIn('--kubernetes-anywhere-kubeadm-version=%s' % version, self.callstack[-1])
called = False
for call in self.callstack:
self.assertFalse(call.startswith('docker'))
if call == 'hack/print-workspace-status.sh':
called = True
self.assertTrue(called)
def test_kubeadm_pull(self):
"""Make sure kubeadm pull mode is fine overall."""
args = kubernetes_e2e.parse_args([
'--kubeadm=pull',
'--use-shared-build=bazel'
])
self.assertEqual(args.kubeadm, 'pull')
self.assertEqual(args.use_shared_build, 'bazel')
gcs_bucket = "gs://kubernetes-release-dev/bazel/v1.8.0-beta.1.132+599539dc0b9997"
def fake_gcs_path(path):
bazel_default = os.path.join(
'gs://kubernetes-jenkins/shared-results', 'bazel-build-location.txt')
self.assertEqual(path, bazel_default)
return gcs_bucket
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
with Stub(kubernetes_e2e, 'read_gcs_path', fake_gcs_path):
kubernetes_e2e.main(args)
self.assertNotIn('E2E_OPT', self.envs)
version = '%s/bin/linux/amd64/' % gcs_bucket
self.assertIn('--kubernetes-anywhere-kubeadm-version=%s' % version, self.callstack[-1])
def test_kubeadm_invalid(self):
"""Make sure kubeadm invalid mode exits unsuccessfully."""
with self.assertRaises(SystemExit) as sysexit:
kubernetes_e2e.parse_args(['--mode=local', '--kubeadm=deploy'])
self.assertEqual(sysexit.exception.code, 2)
def test_parse_args_order_agnostic(self):
args = kubernetes_e2e.parse_args([
'--some-kubetest-arg=foo',
'--cluster=test'])
self.assertEqual(args.kubetest_args, ['--some-kubetest-arg=foo'])
self.assertEqual(args.cluster, 'test')
def test_gcp_network(self):
args = kubernetes_e2e.parse_args(['--mode=local', '--cluster=test'])
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
kubernetes_e2e.main(args)
lastcall = self.callstack[-1]
self.assertIn('--gcp-network=test', lastcall)
def test_env_local(self):
env = 'FOO'
value = 'BLAT'
args = kubernetes_e2e.parse_args([
'--mode=local',
'--env={env}={value}'.format(env=env, value=value),
])
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
kubernetes_e2e.main(args)
self.assertIn(env, self.envs)
self.assertEqual(self.envs[env], value)
def test_aws(self):
temp = tempfile.NamedTemporaryFile()
args = kubernetes_e2e.parse_args([
'--aws',
'--cluster=foo',
'--aws-cluster-domain=test-aws.k8s.io',
'--aws-ssh=%s' % temp.name,
'--aws-pub=%s' % temp.name,
'--aws-cred=%s' % temp.name,
])
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
kubernetes_e2e.main(args)
lastcall = self.callstack[-1]
self.assertIn('kops-e2e-runner.sh', lastcall)
self.assertIn('--kops-cluster=foo.test-aws.k8s.io', lastcall)
self.assertIn('--kops-zones', lastcall)
self.assertIn('--kops-state=s3://k8s-kops-prow/', lastcall)
self.assertIn('--kops-nodes=4', lastcall)
self.assertIn('--kops-ssh-key', lastcall)
self.assertNotIn('kubetest', lastcall)
self.assertIn('kops-e2e-runner.sh', lastcall)
self.assertEqual(
self.envs['JENKINS_AWS_SSH_PRIVATE_KEY_FILE'], temp.name)
self.assertEqual(
self.envs['JENKINS_AWS_SSH_PUBLIC_KEY_FILE'], temp.name)
self.assertEqual(
self.envs['JENKINS_AWS_CREDENTIALS_FILE'], temp.name)
def test_kops_aws(self):
temp = tempfile.NamedTemporaryFile()
args = kubernetes_e2e.parse_args([
'--provider=aws',
'--deployment=kops',
'--cluster=foo.example.com',
'--aws-ssh=%s' % temp.name,
'--aws-pub=%s' % temp.name,
'--aws-cred=%s' % temp.name,
])
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
kubernetes_e2e.main(args)
lastcall = self.callstack[-1]
self.assertIn('kubetest', lastcall)
self.assertIn('--provider=aws', lastcall)
self.assertIn('--deployment=kops', lastcall)
self.assertIn('--kops-cluster=foo.example.com', lastcall)
self.assertIn('--kops-zones', lastcall)
self.assertIn('--kops-state=s3://k8s-kops-prow/', lastcall)
self.assertIn('--kops-nodes=4', lastcall)
self.assertIn('--kops-ssh-key', lastcall)
self.assertIn('kubetest', lastcall)
self.assertNotIn('kops-e2e-runner.sh', lastcall)
def test_kops_gce(self):
temp = tempfile.NamedTemporaryFile()
args = kubernetes_e2e.parse_args([
'--provider=gce',
'--deployment=kops',
'--cluster=foo.example.com',
'--gce-ssh=%s' % temp.name,
'--gce-pub=%s' % temp.name,
])
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
kubernetes_e2e.main(args)
lastcall = self.callstack[-1]
self.assertIn('kubetest', lastcall)
self.assertIn('--provider=gce', lastcall)
self.assertIn('--deployment=kops', lastcall)
self.assertIn('--kops-cluster=foo.example.com', lastcall)
self.assertIn('--kops-zones', lastcall)
self.assertIn('--kops-state=gs://k8s-kops-gce/', lastcall)
self.assertIn('--kops-nodes=4', lastcall)
self.assertIn('--kops-ssh-key', lastcall)
def test_use_shared_build(self):
# normal path
args = kubernetes_e2e.parse_args([
'--use-shared-build=bazel'
])
def expect_bazel_gcs(path):
bazel_default = os.path.join(
'gs://kubernetes-jenkins/shared-results', 'bazel-build-location.txt')
self.assertEqual(path, bazel_default)
return always_kubernetes()
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
with Stub(kubernetes_e2e, 'read_gcs_path', expect_bazel_gcs):
with Stub(time, 'sleep', fake_pass):
kubernetes_e2e.main(args)
lastcall = self.callstack[-1]
self.assertIn('--extract=kubernetes', lastcall)
# normal path, not bazel
args = kubernetes_e2e.parse_args([
'--use-shared-build'
])
def expect_normal_gcs(path):
bazel_default = os.path.join(
'gs://kubernetes-jenkins/shared-results', 'build-location.txt')
self.assertEqual(path, bazel_default)
return always_kubernetes()
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
with Stub(kubernetes_e2e, 'read_gcs_path', expect_normal_gcs):
kubernetes_e2e.main(args)
lastcall = self.callstack[-1]
self.assertIn('--extract=kubernetes', lastcall)
# test failure to read shared path from GCS
with Stub(kubernetes_e2e, 'check_env', self.fake_check_env):
with Stub(kubernetes_e2e, 'read_gcs_path', raise_urllib2_error):
with Stub(os, 'getcwd', always_kubernetes):
with Stub(time, 'sleep', fake_pass):
try:
kubernetes_e2e.main(args)
except RuntimeError as err:
if not err.message.startswith('Failed to get shared build location'):
raise err
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "3d9066b73548b230f2512d72eb39e5d7",
"timestamp": "",
"source": "github",
"line_count": 488,
"max_line_length": 141,
"avg_line_length": 39.66393442622951,
"alnum_prop": 0.6006406282289729,
"repo_name": "foxish/test-infra",
"id": "41f584dbd5488853a4a4c91e49230358269f87a5",
"size": "20060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scenarios/kubernetes_e2e_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "26680"
},
{
"name": "Go",
"bytes": "3516095"
},
{
"name": "HTML",
"bytes": "73212"
},
{
"name": "JavaScript",
"bytes": "207614"
},
{
"name": "Makefile",
"bytes": "61977"
},
{
"name": "Python",
"bytes": "958919"
},
{
"name": "Roff",
"bytes": "5462"
},
{
"name": "Shell",
"bytes": "96590"
},
{
"name": "Smarty",
"bytes": "516"
}
],
"symlink_target": ""
} |
import datetime
import unittest
from unittest.mock import Mock, patch
import pendulum
from airflow import settings
from airflow.models.dag import DAG
from airflow.models.skipmixin import SkipMixin
from airflow.models.taskinstance import TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils import timezone
from airflow.utils.state import State
from airflow.utils.types import DagRunType
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
class TestSkipMixin(unittest.TestCase):
@patch('airflow.utils.timezone.utcnow')
def test_skip(self, mock_now):
session = settings.Session()
now = datetime.datetime.utcnow().replace(tzinfo=pendulum.timezone('UTC'))
mock_now.return_value = now
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
)
with dag:
tasks = [DummyOperator(task_id='task')]
dag_run = dag.create_dagrun(
run_type=DagRunType.MANUAL,
execution_date=now,
state=State.FAILED,
)
SkipMixin().skip(dag_run=dag_run, execution_date=now, tasks=tasks, session=session)
session.query(TI).filter(
TI.dag_id == 'dag',
TI.task_id == 'task',
TI.state == State.SKIPPED,
TI.start_date == now,
TI.end_date == now,
).one()
@patch('airflow.utils.timezone.utcnow')
def test_skip_none_dagrun(self, mock_now):
session = settings.Session()
now = datetime.datetime.utcnow().replace(tzinfo=pendulum.timezone('UTC'))
mock_now.return_value = now
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
)
with dag:
tasks = [DummyOperator(task_id='task')]
SkipMixin().skip(dag_run=None, execution_date=now, tasks=tasks, session=session)
session.query(TI).filter(
TI.dag_id == 'dag',
TI.task_id == 'task',
TI.state == State.SKIPPED,
TI.start_date == now,
TI.end_date == now,
).one()
def test_skip_none_tasks(self):
session = Mock()
SkipMixin().skip(dag_run=None, execution_date=None, tasks=[], session=session)
self.assertFalse(session.query.called)
self.assertFalse(session.commit.called)
def test_skip_all_except(self):
dag = DAG(
'dag_test_skip_all_except',
start_date=DEFAULT_DATE,
)
with dag:
task1 = DummyOperator(task_id='task1')
task2 = DummyOperator(task_id='task2')
task3 = DummyOperator(task_id='task3')
task1 >> [task2, task3]
ti1 = TI(task1, execution_date=DEFAULT_DATE)
ti2 = TI(task2, execution_date=DEFAULT_DATE)
ti3 = TI(task3, execution_date=DEFAULT_DATE)
SkipMixin().skip_all_except(ti=ti1, branch_task_ids=['task2'])
def get_state(ti):
ti.refresh_from_db()
return ti.state
assert get_state(ti2) == State.NONE
assert get_state(ti3) == State.SKIPPED
| {
"content_hash": "0ce1a24b7fd821be302a2a2272f5e023",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 91,
"avg_line_length": 32.25,
"alnum_prop": 0.5968992248062015,
"repo_name": "mrkm4ntr/incubator-airflow",
"id": "a9145701c6458734bdf5b83b6565d3678dd81026",
"size": "3884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/models/test_skipmixin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22581"
},
{
"name": "Dockerfile",
"bytes": "31475"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "221101"
},
{
"name": "JavaScript",
"bytes": "32643"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "14407542"
},
{
"name": "Shell",
"bytes": "541811"
}
],
"symlink_target": ""
} |
import datetime
from pyoperant import hwio, utils, ComponentError
class BaseComponent(object):
"""Base class for physcal component"""
def __init__(self, name=None, *args, **kwargs):
self.name = name
pass
## Hopper ##
class HopperActiveError(ComponentError):
"""raised when the hopper is up when it shouldn't be"""
pass
class HopperInactiveError(ComponentError):
"""raised when the hopper is down when it shouldn't be"""
pass
class HopperAlreadyUpError(HopperActiveError):
"""raised when the hopper is already up before it goes up"""
pass
class HopperWontComeUpError(HopperInactiveError):
"""raised when the hopper won't come up"""
pass
class HopperWontDropError(HopperActiveError):
"""raised when the hopper won't drop"""
pass
class Hopper(BaseComponent):
""" Class which holds information about a hopper
Parameters
----------
solenoid : `hwio.BooleanOutput`
output channel to activate the solenoid & raise the hopper
IR : :class:`hwio.BooleanInput`
input channel for the IR beam to check if the hopper is up
max_lag : float, optional
time in seconds to wait before checking to make sure the hopper is up (default=0.3)
Attributes
----------
solenoid : hwio.BooleanOutput
output channel to activate the solenoid & raise the hopper
IR : hwio.BooleanInput
input channel for the IR beam to check if the hopper is up
max_lag : float
time in seconds to wait before checking to make sure the hopper is up
"""
def __init__(self,IR,solenoid,max_lag=0.3,*args,**kwargs):
super(Hopper, self).__init__(*args,**kwargs)
self.max_lag = max_lag
if isinstance(IR,hwio.BooleanInput):
self.IR = IR
else:
raise ValueError('%s is not an input channel' % IR)
if isinstance(solenoid,hwio.BooleanOutput):
self.solenoid = solenoid
else:
raise ValueError('%s is not an output channel' % solenoid)
def check(self):
"""reads the status of solenoid & IR beam, then throws an error if they don't match
Returns
-------
bool
True if the hopper is up.
Raises
------
HopperActiveError
The Hopper is up and it shouldn't be. (The IR beam is tripped, but the solenoid is not active.)
HopperInactiveError
The Hopper is down and it shouldn't be. (The IR beam is not tripped, but the solenoid is active.)
"""
IR_status = self.IR.read()
solenoid_status = self.solenoid.read()
if IR_status != solenoid_status:
if IR_status:
raise HopperActiveError
elif solenoid_status:
raise HopperInactiveError
else:
raise ComponentError("the IR & solenoid don't match: IR:%s,solenoid:%s" % (IR_status,solenoid_status))
else:
return IR_status
def up(self):
"""Raises the hopper up.
Returns
-------
bool
True if the hopper comes up.
Raises
------
HopperWontComeUpError
The Hopper did not raise.
"""
self.solenoid.write(True)
time_up = self.IR.poll(timeout=self.max_lag)
if time_up is None: # poll timed out
self.solenoid.write(False)
raise HopperWontComeUpError
else:
return time_up
def down(self):
"""Lowers the hopper.
Returns
-------
bool
True if the hopper drops.
Raises
------
HopperWontDropError
The Hopper did not drop.
"""
self.solenoid.write(False)
time_down = datetime.datetime.now()
utils.wait(self.max_lag)
try:
self.check()
except HopperActiveError as e:
raise HopperWontDropError(e)
return time_down
def feed(self,dur=2.0,error_check=True):
"""Performs a feed
Parameters
---------
dur : float, optional
duration of feed in seconds
Returns
-------
(datetime, float)
Timestamp of the feed and the feed duration
Raises
------
HopperAlreadyUpError
The Hopper was already up at the beginning of the feed.
HopperWontComeUpError
The Hopper did not raise for the feed.
HopperWontDropError
The Hopper did not drop fater the feed.
"""
assert self.max_lag < dur, "max_lag (%ss) must be shorter than duration (%ss)" % (self.max_lag,dur)
try:
self.check()
except HopperActiveError as e:
self.solenoid.write(False)
raise HopperAlreadyUpError(e)
feed_time = self.up()
utils.wait(dur)
feed_over = self.down()
feed_duration = feed_over - feed_time
return (feed_time,feed_duration)
def reward(self,value=2.0):
"""wrapper for `feed`, passes *value* into *dur* """
return self.feed(dur=value)
## Peck Port ##
class PeckPort(BaseComponent):
""" Class which holds information about peck ports
Parameters
----------
LED : hwio.BooleanOutput
output channel to activate the LED in the peck port
IR : hwio.BooleanInput
input channel for the IR beam to check for a peck
Attributes
----------
LED : hwio.BooleanOutput
output channel to activate the LED in the peck port
IR : hwio.BooleanInput
input channel for the IR beam to check for a peck
"""
def __init__(self,IR,LED,*args,**kwargs):
super(PeckPort, self).__init__(*args,**kwargs)
if isinstance(IR,hwio.BooleanInput):
self.IR = IR
else:
raise ValueError('%s is not an input channel' % IR)
if isinstance(LED,hwio.BooleanOutput):
self.LED = LED
else:
raise ValueError('%s is not an output channel' % LED)
def status(self):
"""reads the status of the IR beam
Returns
-------
bool
True if beam is broken
"""
return self.IR.read()
def off(self):
""" Turns the LED off
Returns
-------
bool
True if successful
"""
self.LED.write(False)
return True
def on(self):
"""Turns the LED on
Returns
-------
bool
True if successful
"""
self.LED.write(True)
return True
def flash(self,dur=1.0,isi=0.1):
"""Flashes the LED on and off with *isi* seconds high and low for *dur* seconds, then revert LED to prior state.
Parameters
----------
dur : float, optional
Duration of the light flash in seconds.
isi : float,optional
Time interval between toggles. (0.5 * period)
Returns
-------
(datetime, float)
Timestamp of the flash and the flash duration
"""
LED_state = self.LED.read()
flash_time = datetime.datetime.now()
flash_duration = datetime.datetime.now() - flash_time
while flash_duration < datetime.timedelta(seconds=dur):
self.LED.toggle()
utils.wait(isi)
flash_duration = datetime.datetime.now() - flash_time
self.LED.write(LED_state)
return (flash_time,flash_duration)
def poll(self,timeout=None):
""" Polls the peck port until there is a peck
Returns
-------
datetime
Timestamp of the IR beam being broken.
"""
return self.IR.poll(timeout)
## House Light ##
class HouseLight(BaseComponent):
""" Class which holds information about the house light
Keywords
--------
light : hwio.BooleanOutput
output channel to turn the light on and off
Methods:
on() --
off() --
timeout(dur) -- turns off the house light for 'dur' seconds (default=10.0)
punish() -- calls timeout() for 'value' as 'dur'
"""
def __init__(self,light,*args,**kwargs):
super(HouseLight, self).__init__(*args,**kwargs)
if isinstance(light,hwio.BooleanOutput):
self.light = light
else:
raise ValueError('%s is not an output channel' % light)
def off(self):
"""Turns the house light off.
Returns
-------
bool
True if successful.
"""
self.light.write(False)
return True
def on(self):
"""Turns the house light on.
Returns
-------
bool
True if successful.
"""
self.light.write(True)
return True
def timeout(self,dur=10.0):
"""Turn off the light for *dur* seconds
Keywords
-------
dur : float, optional
The amount of time (in seconds) to turn off the light.
Returns
-------
(datetime, float)
Timestamp of the timeout and the timeout duration
"""
timeout_time = datetime.datetime.now()
self.light.write(False)
utils.wait(dur)
timeout_duration = datetime.datetime.now() - timeout_time
self.light.write(True)
return (timeout_time,timeout_duration)
def punish(self,value=10.0):
"""Calls `timeout(dur)` with *value* as *dur* """
return self.timeout(dur=value)
## Cue Light ##
class RGBLight(BaseComponent):
""" Class which holds information about an RGB cue light
Keywords
--------
red : hwio.BooleanOutput
output channel for the red LED
green : hwio.BooleanOutput
output channel for the green LED
blue : hwio.BooleanOutput
output channel for the blue LED
"""
def __init__(self,red,green,blue,*args,**kwargs):
super(RGBLight, self).__init__(*args,**kwargs)
if isinstance(red,hwio.BooleanOutput):
self._red = red
else:
raise ValueError('%s is not an output channel' % red)
if isinstance(green,hwio.BooleanOutput):
self._green = green
else:
raise ValueError('%s is not an output channel' % green)
if isinstance(blue,hwio.BooleanOutput):
self._blue = blue
else:
raise ValueError('%s is not an output channel' % blue)
def red(self):
"""Turns the cue light to red
Returns
-------
bool
`True` if successful.
"""
self._green.write(False)
self._blue.write(False)
return self._red.write(True)
def green(self):
"""Turns the cue light to green
Returns
-------
bool
`True` if successful.
"""
self._red.write(False)
self._blue.write(False)
return self._green.write(True)
def blue(self):
"""Turns the cue light to blue
Returns
-------
bool
`True` if successful.
"""
self._red.write(False)
self._green.write(False)
return self._blue.write(True)
def off(self):
"""Turns the cue light off
Returns
-------
bool
`True` if successful.
"""
self._red.write(False)
self._green.write(False)
self._blue.write(False)
return True
# ## Perch ##
# class Perch(BaseComponent):
# """Class which holds information about a perch
# Has parts:
# - IR Beam (input)
# - speaker
# """
# def __init__(self,*args,**kwargs):
# super(Perch, self).__init__(*args,**kwargs)
| {
"content_hash": "9edf739e1d4460866f519369384c5651",
"timestamp": "",
"source": "github",
"line_count": 438,
"max_line_length": 120,
"avg_line_length": 27.006849315068493,
"alnum_prop": 0.5571899568856201,
"repo_name": "MarvinT/pyoperant",
"id": "cef3f698a6570bee4351c58937ad02de374f103a",
"size": "11829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyoperant/components.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Arduino",
"bytes": "1870"
},
{
"name": "C",
"bytes": "2143"
},
{
"name": "Jupyter Notebook",
"bytes": "43178"
},
{
"name": "Makefile",
"bytes": "150"
},
{
"name": "Perl",
"bytes": "8609"
},
{
"name": "Python",
"bytes": "155694"
}
],
"symlink_target": ""
} |
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from orcid_api_v3.models.created_date_v30_rc2 import CreatedDateV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.external_i_ds_v30_rc2 import ExternalIDsV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.fuzzy_date_v30_rc2 import FuzzyDateV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.last_modified_date_v30_rc2 import LastModifiedDateV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.organization_v30_rc2 import OrganizationV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.source_v30_rc2 import SourceV30Rc2 # noqa: F401,E501
from orcid_api_v3.models.url_v30_rc2 import UrlV30Rc2 # noqa: F401,E501
class DistinctionV30Rc2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created_date': 'CreatedDateV30Rc2',
'last_modified_date': 'LastModifiedDateV30Rc2',
'source': 'SourceV30Rc2',
'put_code': 'int',
'path': 'str',
'department_name': 'str',
'role_title': 'str',
'start_date': 'FuzzyDateV30Rc2',
'end_date': 'FuzzyDateV30Rc2',
'organization': 'OrganizationV30Rc2',
'url': 'UrlV30Rc2',
'external_ids': 'ExternalIDsV30Rc2',
'display_index': 'str',
'visibility': 'str'
}
attribute_map = {
'created_date': 'created-date',
'last_modified_date': 'last-modified-date',
'source': 'source',
'put_code': 'put-code',
'path': 'path',
'department_name': 'department-name',
'role_title': 'role-title',
'start_date': 'start-date',
'end_date': 'end-date',
'organization': 'organization',
'url': 'url',
'external_ids': 'external-ids',
'display_index': 'display-index',
'visibility': 'visibility'
}
def __init__(self, created_date=None, last_modified_date=None, source=None, put_code=None, path=None, department_name=None, role_title=None, start_date=None, end_date=None, organization=None, url=None, external_ids=None, display_index=None, visibility=None): # noqa: E501
"""DistinctionV30Rc2 - a model defined in Swagger""" # noqa: E501
self._created_date = None
self._last_modified_date = None
self._source = None
self._put_code = None
self._path = None
self._department_name = None
self._role_title = None
self._start_date = None
self._end_date = None
self._organization = None
self._url = None
self._external_ids = None
self._display_index = None
self._visibility = None
self.discriminator = None
if created_date is not None:
self.created_date = created_date
if last_modified_date is not None:
self.last_modified_date = last_modified_date
if source is not None:
self.source = source
if put_code is not None:
self.put_code = put_code
if path is not None:
self.path = path
if department_name is not None:
self.department_name = department_name
if role_title is not None:
self.role_title = role_title
self.start_date = start_date
if end_date is not None:
self.end_date = end_date
self.organization = organization
if url is not None:
self.url = url
if external_ids is not None:
self.external_ids = external_ids
if display_index is not None:
self.display_index = display_index
if visibility is not None:
self.visibility = visibility
@property
def created_date(self):
"""Gets the created_date of this DistinctionV30Rc2. # noqa: E501
:return: The created_date of this DistinctionV30Rc2. # noqa: E501
:rtype: CreatedDateV30Rc2
"""
return self._created_date
@created_date.setter
def created_date(self, created_date):
"""Sets the created_date of this DistinctionV30Rc2.
:param created_date: The created_date of this DistinctionV30Rc2. # noqa: E501
:type: CreatedDateV30Rc2
"""
self._created_date = created_date
@property
def last_modified_date(self):
"""Gets the last_modified_date of this DistinctionV30Rc2. # noqa: E501
:return: The last_modified_date of this DistinctionV30Rc2. # noqa: E501
:rtype: LastModifiedDateV30Rc2
"""
return self._last_modified_date
@last_modified_date.setter
def last_modified_date(self, last_modified_date):
"""Sets the last_modified_date of this DistinctionV30Rc2.
:param last_modified_date: The last_modified_date of this DistinctionV30Rc2. # noqa: E501
:type: LastModifiedDateV30Rc2
"""
self._last_modified_date = last_modified_date
@property
def source(self):
"""Gets the source of this DistinctionV30Rc2. # noqa: E501
:return: The source of this DistinctionV30Rc2. # noqa: E501
:rtype: SourceV30Rc2
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this DistinctionV30Rc2.
:param source: The source of this DistinctionV30Rc2. # noqa: E501
:type: SourceV30Rc2
"""
self._source = source
@property
def put_code(self):
"""Gets the put_code of this DistinctionV30Rc2. # noqa: E501
:return: The put_code of this DistinctionV30Rc2. # noqa: E501
:rtype: int
"""
return self._put_code
@put_code.setter
def put_code(self, put_code):
"""Sets the put_code of this DistinctionV30Rc2.
:param put_code: The put_code of this DistinctionV30Rc2. # noqa: E501
:type: int
"""
self._put_code = put_code
@property
def path(self):
"""Gets the path of this DistinctionV30Rc2. # noqa: E501
:return: The path of this DistinctionV30Rc2. # noqa: E501
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""Sets the path of this DistinctionV30Rc2.
:param path: The path of this DistinctionV30Rc2. # noqa: E501
:type: str
"""
self._path = path
@property
def department_name(self):
"""Gets the department_name of this DistinctionV30Rc2. # noqa: E501
:return: The department_name of this DistinctionV30Rc2. # noqa: E501
:rtype: str
"""
return self._department_name
@department_name.setter
def department_name(self, department_name):
"""Sets the department_name of this DistinctionV30Rc2.
:param department_name: The department_name of this DistinctionV30Rc2. # noqa: E501
:type: str
"""
self._department_name = department_name
@property
def role_title(self):
"""Gets the role_title of this DistinctionV30Rc2. # noqa: E501
:return: The role_title of this DistinctionV30Rc2. # noqa: E501
:rtype: str
"""
return self._role_title
@role_title.setter
def role_title(self, role_title):
"""Sets the role_title of this DistinctionV30Rc2.
:param role_title: The role_title of this DistinctionV30Rc2. # noqa: E501
:type: str
"""
self._role_title = role_title
@property
def start_date(self):
"""Gets the start_date of this DistinctionV30Rc2. # noqa: E501
:return: The start_date of this DistinctionV30Rc2. # noqa: E501
:rtype: FuzzyDateV30Rc2
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this DistinctionV30Rc2.
:param start_date: The start_date of this DistinctionV30Rc2. # noqa: E501
:type: FuzzyDateV30Rc2
"""
if start_date is None:
raise ValueError("Invalid value for `start_date`, must not be `None`") # noqa: E501
self._start_date = start_date
@property
def end_date(self):
"""Gets the end_date of this DistinctionV30Rc2. # noqa: E501
:return: The end_date of this DistinctionV30Rc2. # noqa: E501
:rtype: FuzzyDateV30Rc2
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""Sets the end_date of this DistinctionV30Rc2.
:param end_date: The end_date of this DistinctionV30Rc2. # noqa: E501
:type: FuzzyDateV30Rc2
"""
self._end_date = end_date
@property
def organization(self):
"""Gets the organization of this DistinctionV30Rc2. # noqa: E501
:return: The organization of this DistinctionV30Rc2. # noqa: E501
:rtype: OrganizationV30Rc2
"""
return self._organization
@organization.setter
def organization(self, organization):
"""Sets the organization of this DistinctionV30Rc2.
:param organization: The organization of this DistinctionV30Rc2. # noqa: E501
:type: OrganizationV30Rc2
"""
if organization is None:
raise ValueError("Invalid value for `organization`, must not be `None`") # noqa: E501
self._organization = organization
@property
def url(self):
"""Gets the url of this DistinctionV30Rc2. # noqa: E501
:return: The url of this DistinctionV30Rc2. # noqa: E501
:rtype: UrlV30Rc2
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this DistinctionV30Rc2.
:param url: The url of this DistinctionV30Rc2. # noqa: E501
:type: UrlV30Rc2
"""
self._url = url
@property
def external_ids(self):
"""Gets the external_ids of this DistinctionV30Rc2. # noqa: E501
:return: The external_ids of this DistinctionV30Rc2. # noqa: E501
:rtype: ExternalIDsV30Rc2
"""
return self._external_ids
@external_ids.setter
def external_ids(self, external_ids):
"""Sets the external_ids of this DistinctionV30Rc2.
:param external_ids: The external_ids of this DistinctionV30Rc2. # noqa: E501
:type: ExternalIDsV30Rc2
"""
self._external_ids = external_ids
@property
def display_index(self):
"""Gets the display_index of this DistinctionV30Rc2. # noqa: E501
:return: The display_index of this DistinctionV30Rc2. # noqa: E501
:rtype: str
"""
return self._display_index
@display_index.setter
def display_index(self, display_index):
"""Sets the display_index of this DistinctionV30Rc2.
:param display_index: The display_index of this DistinctionV30Rc2. # noqa: E501
:type: str
"""
self._display_index = display_index
@property
def visibility(self):
"""Gets the visibility of this DistinctionV30Rc2. # noqa: E501
:return: The visibility of this DistinctionV30Rc2. # noqa: E501
:rtype: str
"""
return self._visibility
@visibility.setter
def visibility(self, visibility):
"""Sets the visibility of this DistinctionV30Rc2.
:param visibility: The visibility of this DistinctionV30Rc2. # noqa: E501
:type: str
"""
allowed_values = ["LIMITED", "REGISTERED_ONLY", "PUBLIC", "PRIVATE"] # noqa: E501
if visibility not in allowed_values:
raise ValueError(
"Invalid value for `visibility` ({0}), must be one of {1}" # noqa: E501
.format(visibility, allowed_values)
)
self._visibility = visibility
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DistinctionV30Rc2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DistinctionV30Rc2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| {
"content_hash": "b75a38da1a2307a8a2c12738ff21c011",
"timestamp": "",
"source": "github",
"line_count": 462,
"max_line_length": 276,
"avg_line_length": 30.125541125541126,
"alnum_prop": 0.5963500502945825,
"repo_name": "Royal-Society-of-New-Zealand/NZ-ORCID-Hub",
"id": "d6136c099959326c5aa9819cdc23e3da227ad6f7",
"size": "13935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orcid_api_v3/models/distinction_v30_rc2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20266"
},
{
"name": "Dockerfile",
"bytes": "3303"
},
{
"name": "HTML",
"bytes": "239338"
},
{
"name": "JavaScript",
"bytes": "2240"
},
{
"name": "Makefile",
"bytes": "600"
},
{
"name": "PLpgSQL",
"bytes": "2581"
},
{
"name": "Python",
"bytes": "7935510"
},
{
"name": "Shell",
"bytes": "12088"
}
],
"symlink_target": ""
} |
import json
import logging
import urllib2
from celery.schedules import crontab
from celery.task import periodic_task
from django.conf import settings
from corehq.apps.accounting.models import Currency
update_logger = logging.getLogger("currency_update")
smsbillables_logger = logging.getLogger("smsbillables")
@periodic_task(run_every=crontab(minute=0, hour=9), queue=getattr(settings, 'CELERY_PERIODIC_QUEUE','celery'))
def update_exchange_rates(app_id=settings.OPEN_EXCHANGE_RATES_ID):
try:
update_logger.info("Updating exchange rates...")
rates = json.load(urllib2.urlopen(
'https://openexchangerates.org/api/latest.json?app_id=%s' % app_id))['rates']
default_rate = float(rates[Currency.get_default().code])
for code, rate in rates.items():
currency, _ = Currency.objects.get_or_create(code=code)
currency.rate_to_default = float(rate) / default_rate
currency.save()
update_logger.info("Exchange rates updated.")
except Exception as e:
smsbillables_logger.error(e.message)
| {
"content_hash": "8bd7db392400bf34d8ee02a34f337b79",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 110,
"avg_line_length": 38.785714285714285,
"alnum_prop": 0.7044198895027625,
"repo_name": "gmimano/commcaretest",
"id": "af37f354577181f1a35dac725828ac1ce367b7bf",
"size": "1086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/smsbillables/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "282577"
},
{
"name": "JavaScript",
"bytes": "2731012"
},
{
"name": "Python",
"bytes": "4738450"
},
{
"name": "Shell",
"bytes": "22454"
}
],
"symlink_target": ""
} |
def quicksort(myList, start, end):
if start < end:
# partition the list
pivot = partition(myList, start, end)
# sort both halves
quicksort(myList, start, pivot-1)
quicksort(myList, pivot+1, end)
return myList
def partition(myList, start, end):
pivot = myList[start]
left = start+1
right = end
done = False
while not done:
while left <= right and myList[left] <= pivot:
left = left + 1
while myList[right] >= pivot and right >=left:
right = right -1
if right < left:
done= True
else:
# swap places
temp=myList[left]
myList[left]=myList[right]
myList[right]=temp
# swap start with myList[right]
temp=myList[start]
myList[start]=myList[right]
myList[right]=temp
return right
if __name__=='__main__':
# Use case description
array = [2,5,1,3,4,8,11,9,10,12]
print quicksort(array, 0, 9)
| {
"content_hash": "33c5b63fbdf7856c1bb7bc3afb765c8f",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 54,
"avg_line_length": 27.77777777777778,
"alnum_prop": 0.559,
"repo_name": "salman-bhai/DS-Algo-Handbook",
"id": "cdd384606d209d39215ee3f6bab445681dc61c80",
"size": "1000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Algorithms/Sort_Algorithms/Quick_Sort/QuickSort.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "72652"
},
{
"name": "C++",
"bytes": "100390"
},
{
"name": "Java",
"bytes": "84294"
},
{
"name": "Python",
"bytes": "45391"
}
],
"symlink_target": ""
} |
from OpenGLCffi.GLES1 import params
@params(api='gles1', prms=['mode'])
def glBlendEquationOES(mode):
pass
| {
"content_hash": "162b0498b7529ddbecf86b22c242739c",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 35,
"avg_line_length": 18.333333333333332,
"alnum_prop": 0.7454545454545455,
"repo_name": "cydenix/OpenGLCffi",
"id": "8e336a510399d976cc9cb841e675ca7e6866d44e",
"size": "110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OpenGLCffi/GLES1/EXT/OES/blend_subtract.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1646"
},
{
"name": "C++",
"bytes": "188"
},
{
"name": "Python",
"bytes": "1853617"
}
],
"symlink_target": ""
} |
from abc import ABC, abstractmethod
class Serializable(ABC):
def __init__(self):
super(Serializable, self).__init__()
@abstractmethod
def serialize(self):
pass
@staticmethod
@abstractmethod
def unserialize(marshall):
pass
| {
"content_hash": "a340f313603881c8a46daaa22874d8d4",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 44,
"avg_line_length": 18.266666666666666,
"alnum_prop": 0.6277372262773723,
"repo_name": "ismtabo/file-mixer",
"id": "82961f9c29d43822243dc7056ebd29cb78a3a74b",
"size": "274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "file_mixer/main_view/patterns/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44995"
}
],
"symlink_target": ""
} |
"""
Yet Another Django Profiler middleware implementation
"""
from __future__ import unicode_literals
import cProfile
import logging
import marshal
import os
import pstats
import subprocess
import tempfile
try:
from unittest import mock
except ImportError:
import mock
from django.core.exceptions import MiddlewareNotUsed
from django.utils.six.moves import cStringIO as StringIO
from django.utils.translation import ugettext as _
from .conf import settings
log = logging.getLogger(__name__)
def func_strip_path(func_name):
"""Replacement for pstats.func_strip_path which yields qualified module names"""
filename, line, name = func_name
return settings.path_to_module_function(filename), line, name
def text_response(response, content):
"""Return a plain text message as the response content."""
response.content = content
response['Content-type'] = 'text/plain'
return response
def which(program):
"""Return the path of the named program in the PATH, or None if no such
executable program can be found Used to make sure that required binaries
are in place before attempting to call them."""
def is_exe(file_path):
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
program_path, _name = os.path.split(program)
if program_path:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
class ProfilerMiddleware(object):
"""
Code profiling middleware which can display either a call graph PDF or
a table of called functions ordered by the desired statistic. For the call
graph, just append "?profile" to the URL. For the graph generation to
work, install Graphviz from http://www.graphviz.org/Download.php
For a statistics table, use the statistic you want to sort by as the
parameter (such as "?profile=time"). Sorting options include:
* calls (call count)
* cumulative (cumulative time)
* file (file name)
* module (file name)
* pcalls (primitive call count)
* line (line number)
* name (function name)
* nfl (name/file/line)
* stdname (standard name)
* time (internal time)
Additional parameters can be added when generating a statistics table:
* fraction - The fraction of total function calls to display (the default of .2 is omitted if max_calls or pattern are specified)
* max_calls - The maximum number of function calls to display
* pattern - Regular expression filter for function display names
To get these instructions in the app if you forget the usage options, use
"?profile=help" in the URL.
Inspiration:
* https://gist.github.com/kesor/1229681
* https://bitbucket.org/brodie/geordi
"""
def __init__(self):
if not settings.YADP_ENABLED:
# Disable the middleware completely when YADP_ENABLED = False
raise MiddlewareNotUsed()
self.error = None
self.profiler = None
def process_view(self, request, callback, callback_args, callback_kwargs):
if settings.YADP_ENABLED and (settings.YADP_PROFILE_PARAMETER in request.REQUEST):
self.error = None
if settings.YADP_PROFILER_BACKEND == 'yappi':
try:
from .yadp_yappi import YappiProfile
wall = request.REQUEST.get(settings.YADP_CLOCK_PARAMETER, None) == 'wall'
self.profiler = YappiProfile(wall=wall)
except Exception as e:
log.exception(e)
self.error = _('Could not find Yappi; please install Yappi to be able to use it for profiling')
return None
else:
self.profiler = cProfile.Profile()
args = (request,) + callback_args
return self.profiler.runcall(callback, *args, **callback_kwargs)
def process_response(self, request, response):
if settings.YADP_ENABLED and settings.YADP_PROFILE_PARAMETER in request.REQUEST:
if self.error:
return text_response(response, self.error)
self.profiler.create_stats()
mode = request.REQUEST[settings.YADP_PROFILE_PARAMETER]
if mode == 'file':
# Work around bug on Python versions >= 2.7.4
mode = 'fil'
if not mode:
if not which('dot'):
return text_response(response, _('Could not find "dot" from Graphviz; please install Graphviz to enable call graph generation'))
if not which('gprof2dot.py'):
return text_response(response, _('Could not find gprof2dot.py, which should have been installed by yet-another-django-profiler'))
with tempfile.NamedTemporaryFile() as stats:
stats.write(marshal.dumps(self.profiler.stats))
stats.flush()
cmd = ('gprof2dot.py -f pstats {} | dot -Tpdf'.format(stats.name))
process = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
output = process.communicate()[0]
return_code = process.poll()
if return_code:
raise Exception(_('gprof2dot.py exited with {return_code}').format(return_code=return_code))
response.content = output
response['Content-Type'] = 'application/pdf'
return response
elif mode == 'help':
return text_response(response, ProfilerMiddleware.__doc__)
else:
out = StringIO()
stats = pstats.Stats(self.profiler, stream=out)
with mock.patch('pstats.func_strip_path') as mock_func_strip_path:
mock_func_strip_path.side_effect = func_strip_path
stats.strip_dirs()
restrictions = []
if settings.YADP_PATTERN_PARAMETER in request.REQUEST:
restrictions.append(request.REQUEST[settings.YADP_PATTERN_PARAMETER])
if settings.YADP_FRACTION_PARAMETER in request.REQUEST:
restrictions.append(float(request.REQUEST[settings.YADP_FRACTION_PARAMETER]))
elif settings.YADP_MAX_CALLS_PARAMETER in request.REQUEST:
restrictions.append(int(request.REQUEST[settings.YADP_MAX_CALLS_PARAMETER]))
elif settings.YADP_PATTERN_PARAMETER not in request.REQUEST:
restrictions.append(.2)
stats.sort_stats(mode).print_stats(*restrictions)
return text_response(response, out.getvalue())
return response
| {
"content_hash": "b475bcf436622c6821f879f60fe4cf9e",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 149,
"avg_line_length": 40.83040935672515,
"alnum_prop": 0.621741621311945,
"repo_name": "wiliamsouza/yet-another-django-profiler",
"id": "9ca14e12f70558c08d28285cb7d24b5cc2967a5b",
"size": "7260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yet_another_django_profiler/middleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "146314"
}
],
"symlink_target": ""
} |
import dragonfly
import bee
from bee import *
from bee.spyderhive.hivemaphive import hivemapframe
import Spyder
from dragonfly.commandhive import commandhive, commandapp
from panda3d.core import getModelPath
import os
getModelPath().prependPath(os.getcwd())
from bee import hivemodule
class myhivemapframe(hivemapframe):
hm = Spyder.Hivemap.fromfile("tut-worker-4b.web")
class myapp(commandapp):
def on_tick(self):
taskMgr.step()
taskMgr.step()
class myhive(commandhive):
_hivecontext = hivemodule.appcontext(myapp)
h = myhivemapframe()
raiser = bee.raiser()
bee.connect("evexc", raiser)
m = myhive().getinstance()
m.build("m")
m.place()
m.close()
m.init()
m.run()
| {
"content_hash": "392ef92987408e005b29739796c1ae7a",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 57,
"avg_line_length": 16.40909090909091,
"alnum_prop": 0.7188365650969529,
"repo_name": "agoose77/hivesystem",
"id": "b6507d1cc06e597fba60ac89c8c76b06e6b34bbf",
"size": "722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manual/chess/tut-worker-4b.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "2491478"
},
{
"name": "Shell",
"bytes": "1164"
}
],
"symlink_target": ""
} |
''' This script shows how to use a Motor Stage
'''
from basil.dut import Dut
dut = Dut('mercury_pyserial.yaml')
dut.init()
print(dut["MotorStage"].get_position())
# dut["MotorStage"].set_position(100000)
| {
"content_hash": "1869225768785a2f28ed050815a56b54",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 46,
"avg_line_length": 20.7,
"alnum_prop": 0.7053140096618358,
"repo_name": "SiLab-Bonn/basil",
"id": "81e530b35c76fd35344aaa7237cbc1bfeb5bba2a",
"size": "424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/lab_devices/MotorStage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "17821"
},
{
"name": "Python",
"bytes": "502781"
},
{
"name": "SystemVerilog",
"bytes": "2358"
},
{
"name": "Verilog",
"bytes": "428771"
}
],
"symlink_target": ""
} |
"""FixMatch with Distribution Alignment and Adaptative Confidence Ratio.
"""
import os
import sys
from typing import Callable
import jax
import jax.numpy as jn
import objax
from absl import app
from absl import flags
from absl.flags import FLAGS
from objax.typing import JaxArray
from semi_supervised_domain_adaptation.lib.data import MixData, CTAData
from semi_supervised_domain_adaptation.lib.train import TrainableSSDAModule
from shared.data.fsl import DATASETS as FSL_DATASETS
from shared.data.ssl import DATASETS as SSL_DATASETS, DataSetSSL
from shared.train import ScheduleCos
from shared.util import setup_tf, MyParallel
from shared.zoo.models import network, ARCHS
class Baseline(TrainableSSDAModule):
def __init__(self, nclass: int, model: Callable, **kwargs):
super().__init__(nclass, kwargs)
self.model: objax.Module = model(colors=3, nclass=nclass, **kwargs)
self.model_ema = objax.optimizer.ExponentialMovingAverageModule(self.model, momentum=0.999)
if FLAGS.arch.endswith('pretrain'):
# Initialize weights of EMA with pretrained model's weights.
self.model_ema.ema.momentum = 0
self.model_ema.update_ema()
self.model_ema.ema.momentum = 0.999
self.stats = objax.Module()
self.stats.keygen = objax.random.DEFAULT_GENERATOR
self.stats.p_labeled = objax.nn.ExponentialMovingAverage((nclass,), init_value=1 / nclass)
self.stats.p_unlabeled = objax.nn.MovingAverage((nclass,), buffer_size=128, init_value=1 / nclass)
train_vars = self.model.vars() + self.stats.vars()
self.opt = objax.optimizer.Momentum(train_vars)
self.lr = ScheduleCos(self.params.lr, self.params.lr_decay)
@objax.Function.with_vars(self.model_ema.vars())
def eval_op(x: JaxArray, domain: int) -> JaxArray:
return objax.functional.softmax(self.model_ema(x, training=False, domain=domain))
def loss_function(sx, sy, tu):
c, h, w = sx.shape[-3:]
xu = jn.concatenate((sx, tu)).reshape((-1, c, h, w))
logit = self.model(xu, training=True)
logit_sx = jn.split(logit, (2 * sx.shape[0],))[0]
logit_sx_weak, logit_sx_strong = logit_sx[::2], logit_sx[1::2]
xe = 0.5 * (objax.functional.loss.cross_entropy_logits(logit_sx_weak, sy).mean() +
objax.functional.loss.cross_entropy_logits(logit_sx_strong, sy).mean())
wd = 0.5 * sum((v.value ** 2).sum() for k, v in train_vars.items() if k.endswith('.w'))
loss = xe + self.params.wd * wd
return loss, {'losses/xe': xe, 'losses/wd': wd}
gv = objax.GradValues(loss_function, train_vars)
@objax.Function.with_vars(self.vars())
def train_op(step, sx, sy, tx, ty, tu, probe=None):
y_probe = eval_op(probe, 1) if probe is not None else None
p = step / (FLAGS.train_mimg << 20)
lr = self.lr(p)
g, v = gv(jn.concatenate((sx, tx)), jn.concatenate((sy, ty)), tu)
self.opt(lr, objax.functional.parallel.pmean(g))
self.model_ema.update_ema()
return objax.functional.parallel.pmean({'monitors/lr': lr, **v[1]}), y_probe
self.train_op = MyParallel(train_op, reduce=lambda x: x)
self.eval_op = MyParallel(eval_op, static_argnums=(1,))
def main(argv):
del argv
print('JAX host: %d / %d' % (jax.host_id(), jax.host_count()))
print('JAX devices:\n%s' % '\n'.join(str(d) for d in jax.devices()), flush=True)
setup_tf()
source = FSL_DATASETS()[f'{FLAGS.dataset}_{FLAGS.source}-0']()
target_name, target_samples_per_class, target_seed = DataSetSSL.parse_name(f'{FLAGS.dataset}_{FLAGS.target}')
target_labeled = SSL_DATASETS()[target_name](target_samples_per_class, target_seed)
target_unlabeled = FSL_DATASETS()[f'{target_name}-0']()
testsets = [target_unlabeled.test, source.test] # Ordered by domain (unlabeled always first)
module = Baseline(source.nclass, network(FLAGS.arch),
lr=FLAGS.lr,
lr_decay=FLAGS.lr_decay,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
uratio=FLAGS.uratio)
logdir = f'SSDA/{FLAGS.dataset}/{FLAGS.source}/{FLAGS.target}/{FLAGS.augment}/{module.__class__.__name__}/'
logdir += '_'.join(sorted('%s%s' % k for k in module.params.items()))
logdir = os.path.join(FLAGS.logdir, logdir)
test = {}
for domain, testset in enumerate(testsets):
test.update((k, v.parse().batch(FLAGS.batch).nchw().map(lambda d: {**d, 'domain': domain}).prefetch(16))
for k, v in testset.items())
if FLAGS.augment.startswith('('):
train = MixData(source.train, target_labeled.train, target_unlabeled.train, source.nclass, FLAGS.batch,
FLAGS.uratio)
elif FLAGS.augment.startswith('CTA('):
train = CTAData(source.train, target_labeled.train, target_unlabeled.train, source.nclass, FLAGS.batch,
FLAGS.uratio)
else:
raise ValueError(f'Augment flag value {FLAGS.augment} not supported.')
module.train(FLAGS.train_mimg << 10, FLAGS.report_kimg, train, test, logdir, FLAGS.keep_ckpts)
train.stop()
if __name__ == '__main__':
flags.DEFINE_enum('arch', 'wrn28-2', ARCHS, 'Model architecture.')
flags.DEFINE_float('lr', 0.03, 'Learning rate.')
flags.DEFINE_float('lr_decay', 0.25, 'Learning rate decay.')
flags.DEFINE_float('wd', 0.001, 'Weight decay.')
flags.DEFINE_integer('batch', 64, 'Batch size')
flags.DEFINE_integer('uratio', 3, 'Unlabeled batch size ratio')
flags.DEFINE_integer('report_kimg', 64, 'Reporting period in kibi-images.')
flags.DEFINE_integer('train_mimg', 8, 'Training duration in mega-images.')
flags.DEFINE_integer('keep_ckpts', 5, 'Number of checkpoints to keep (0 for all).')
flags.DEFINE_string('logdir', 'experiments', 'Directory where to save checkpoints and tensorboard data.')
flags.DEFINE_string('dataset', 'domainnet32', 'Source data to train on.')
flags.DEFINE_string('source', 'clipart', 'Source data to train on.')
flags.DEFINE_string('target', 'infograph(10,seed=1)', 'Target data to train on.')
FLAGS.set_default('augment', 'CTA(sm,sm,probe=1)')
FLAGS.set_default('para_augment', 8)
app.run(main)
| {
"content_hash": "63d87706836801f56e8c252eff9a6bbb",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 113,
"avg_line_length": 50.3203125,
"alnum_prop": 0.6365471200124204,
"repo_name": "google-research/adamatch",
"id": "e2bc3c7611420e935ba3cb73273c9ffac921ccbd",
"size": "7016",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "semi_supervised_domain_adaptation/baseline.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from os.path import abspath, dirname, join
import astropy.config as _config
import astropy.io.registry as io_registry
from astropy import extern
from .table import Table
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.table.jsviewer`.
"""
jquery_url = _config.ConfigItem(
'https://code.jquery.com/jquery-3.6.0.min.js',
'The URL to the jquery library.')
datatables_url = _config.ConfigItem(
'https://cdn.datatables.net/1.10.12/js/jquery.dataTables.min.js',
'The URL to the jquery datatables library.')
css_urls = _config.ConfigItem(
['https://cdn.datatables.net/1.10.12/css/jquery.dataTables.css'],
'The URLs to the css file(s) to include.', cfgtype='string_list')
conf = Conf()
EXTERN_JS_DIR = abspath(join(dirname(extern.__file__), 'jquery', 'data', 'js'))
EXTERN_CSS_DIR = abspath(join(dirname(extern.__file__), 'jquery', 'data', 'css'))
_SORTING_SCRIPT_PART_1 = """
var astropy_sort_num = function(a, b) {{
var a_num = parseFloat(a);
var b_num = parseFloat(b);
if (isNaN(a_num) && isNaN(b_num))
return ((a < b) ? -1 : ((a > b) ? 1 : 0));
else if (!isNaN(a_num) && !isNaN(b_num))
return ((a_num < b_num) ? -1 : ((a_num > b_num) ? 1 : 0));
else
return isNaN(a_num) ? -1 : 1;
}}
"""
_SORTING_SCRIPT_PART_2 = """
jQuery.extend( jQuery.fn.dataTableExt.oSort, {{
"optionalnum-asc": astropy_sort_num,
"optionalnum-desc": function (a,b) {{ return -astropy_sort_num(a, b); }}
}});
"""
IPYNB_JS_SCRIPT = """
<script>
%(sorting_script1)s
require.config({{paths: {{
datatables: '{datatables_url}'
}}}});
require(["datatables"], function(){{
console.log("$('#{tid}').dataTable()");
%(sorting_script2)s
$('#{tid}').dataTable({{
order: [],
pageLength: {display_length},
lengthMenu: {display_length_menu},
pagingType: "full_numbers",
columnDefs: [{{targets: {sort_columns}, type: "optionalnum"}}]
}});
}});
</script>
""" % dict(sorting_script1=_SORTING_SCRIPT_PART_1,
sorting_script2=_SORTING_SCRIPT_PART_2)
HTML_JS_SCRIPT = _SORTING_SCRIPT_PART_1 + _SORTING_SCRIPT_PART_2 + """
$(document).ready(function() {{
$('#{tid}').dataTable({{
order: [],
pageLength: {display_length},
lengthMenu: {display_length_menu},
pagingType: "full_numbers",
columnDefs: [{{targets: {sort_columns}, type: "optionalnum"}}]
}});
}} );
"""
# Default CSS for the JSViewer writer
DEFAULT_CSS = """\
body {font-family: sans-serif;}
table.dataTable {width: auto !important; margin: 0 !important;}
.dataTables_filter, .dataTables_paginate {float: left !important; margin-left:1em}
"""
# Default CSS used when rendering a table in the IPython notebook
DEFAULT_CSS_NB = """\
table.dataTable {clear: both; width: auto !important; margin: 0 !important;}
.dataTables_info, .dataTables_length, .dataTables_filter, .dataTables_paginate{
display: inline-block; margin-right: 1em; }
.paginate_button { margin-right: 5px; }
"""
class JSViewer:
"""Provides an interactive HTML export of a Table.
This class provides an interface to the `DataTables
<https://datatables.net/>`_ library, which allow to visualize interactively
an HTML table. It is used by the `~astropy.table.Table.show_in_browser`
method.
Parameters
----------
use_local_files : bool, optional
Use local files or a CDN for JavaScript libraries. Default False.
display_length : int, optional
Number or rows to show. Default to 50.
"""
def __init__(self, use_local_files=False, display_length=50):
self._use_local_files = use_local_files
self.display_length_menu = [[10, 25, 50, 100, 500, 1000, -1],
[10, 25, 50, 100, 500, 1000, "All"]]
self.display_length = display_length
for L in self.display_length_menu:
if display_length not in L:
L.insert(0, display_length)
@property
def jquery_urls(self):
if self._use_local_files:
return ['file://' + join(EXTERN_JS_DIR, 'jquery-3.6.0.min.js'),
'file://' + join(EXTERN_JS_DIR, 'jquery.dataTables.min.js')]
else:
return [conf.jquery_url, conf.datatables_url]
@property
def css_urls(self):
if self._use_local_files:
return ['file://' + join(EXTERN_CSS_DIR,
'jquery.dataTables.css')]
else:
return conf.css_urls
def _jstable_file(self):
if self._use_local_files:
return 'file://' + join(EXTERN_JS_DIR, 'jquery.dataTables.min')
else:
return conf.datatables_url[:-3]
def ipynb(self, table_id, css=None, sort_columns='[]'):
html = f'<style>{css if css is not None else DEFAULT_CSS_NB}</style>'
html += IPYNB_JS_SCRIPT.format(
display_length=self.display_length,
display_length_menu=self.display_length_menu,
datatables_url=self._jstable_file(),
tid=table_id, sort_columns=sort_columns)
return html
def html_js(self, table_id='table0', sort_columns='[]'):
return HTML_JS_SCRIPT.format(
display_length=self.display_length,
display_length_menu=self.display_length_menu,
tid=table_id, sort_columns=sort_columns).strip()
def write_table_jsviewer(table, filename, table_id=None, max_lines=5000,
table_class="display compact", jskwargs=None,
css=DEFAULT_CSS, htmldict=None, overwrite=False):
if table_id is None:
table_id = f'table{id(table)}'
jskwargs = jskwargs or {}
jsv = JSViewer(**jskwargs)
sortable_columns = [i for i, col in enumerate(table.columns.values())
if col.info.dtype.kind in 'iufc']
html_options = {
'table_id': table_id,
'table_class': table_class,
'css': css,
'cssfiles': jsv.css_urls,
'jsfiles': jsv.jquery_urls,
'js': jsv.html_js(table_id=table_id, sort_columns=sortable_columns)
}
if htmldict:
html_options.update(htmldict)
if max_lines < len(table):
table = table[:max_lines]
table.write(filename, format='html', htmldict=html_options,
overwrite=overwrite)
io_registry.register_writer('jsviewer', Table, write_table_jsviewer)
| {
"content_hash": "0529103c8c444d9c599b506ed999e3d0",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 82,
"avg_line_length": 32.717171717171716,
"alnum_prop": 0.6023464032108675,
"repo_name": "lpsinger/astropy",
"id": "bc36e63812f9e0272cbd3041246ad0bf66b600f9",
"size": "6543",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "astropy/table/jsviewer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040074"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78755"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12323563"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse
from django.shortcuts import render
from django.contrib.auth import authenticate, login
def persona_login(request):
user = authenticate(assertion=request.POST['assertion'])
if user:
login(request, user)
return HttpResponse('OK')
| {
"content_hash": "e2f72d0da1b66ba43b14920b46b873c8",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 60,
"avg_line_length": 28.8,
"alnum_prop": 0.75,
"repo_name": "PeterHo/mysite",
"id": "ea5cd1464cf140183ad93054c141df6dbd7869f4",
"size": "288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accounts/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "53762"
},
{
"name": "HTML",
"bytes": "35270"
},
{
"name": "JavaScript",
"bytes": "411445"
},
{
"name": "Python",
"bytes": "138911"
}
],
"symlink_target": ""
} |
from django.db import models
class Election(models.Model):
name = models.CharField(max_length=128)
description = models.TextField()
country = models.ForeignKey('core.Country', null=True, blank=True)
live_date = models.DateTimeField()
dead_date = models.DateTimeField()
active = models.BooleanField(default=True)
def __unicode__(self):
return self.name
| {
"content_hash": "006d24280f6dfa1e9801de1bc9e1b4ad",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 70,
"avg_line_length": 26.2,
"alnum_prop": 0.6946564885496184,
"repo_name": "JustinWingChungHui/electionleaflets",
"id": "c1385bd687aef0b14f9d920f57fb220fb7fc3c1c",
"size": "393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electionleaflets/apps/elections/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23074"
},
{
"name": "Cucumber",
"bytes": "7808"
},
{
"name": "HTML",
"bytes": "121455"
},
{
"name": "Handlebars",
"bytes": "446"
},
{
"name": "JavaScript",
"bytes": "69039"
},
{
"name": "Python",
"bytes": "160654"
},
{
"name": "Ruby",
"bytes": "165"
}
],
"symlink_target": ""
} |
import collections
import time
from logging import getLogger
import django
from django.db import models
from django.db.models import query
from django.db.models.fields.related_descriptors import ForwardManyToOneDescriptor
__version__ = '1.2.3'
logger = getLogger(__name__)
class PrefetchManagerMixin(models.Manager):
use_for_related_fields = True
prefetch_definitions = {}
@classmethod
def get_queryset_class(cls):
return PrefetchQuerySet
def __init__(self):
super(PrefetchManagerMixin, self).__init__()
for name, prefetcher in self.prefetch_definitions.items():
if prefetcher.__class__ is not Prefetcher and not callable(prefetcher):
raise InvalidPrefetch("Invalid prefetch definition %s. This prefetcher needs to be a class not an instance." % name)
def get_queryset(self):
qs = self.get_queryset_class()(
self.model, prefetch_definitions=self.prefetch_definitions
)
if getattr(self, '_db', None) is not None:
qs = qs.using(self._db)
return qs
def prefetch(self, *args):
return self.get_queryset().prefetch(*args)
class PrefetchManager(PrefetchManagerMixin):
def __init__(self, **kwargs):
self.prefetch_definitions = kwargs
super(PrefetchManager, self).__init__()
class PrefetchIterable(query.ModelIterable):
def __iter__(self):
data = list(super(PrefetchIterable, self).__iter__())
for name, (forwarders, prefetcher) in self.queryset._prefetch.items():
prefetcher.fetch(data, name, self.queryset.model, forwarders,
getattr(self.queryset, '_db', None))
return iter(data)
class InvalidPrefetch(Exception):
pass
class PrefetchOption(object):
def __init__(self, name, *args, **kwargs):
self.name = name
self.args = args
self.kwargs = kwargs
P = PrefetchOption
class PrefetchQuerySet(query.QuerySet):
def __init__(self, model=None, query=None, using=None,
prefetch_definitions=None, **kwargs):
super(PrefetchQuerySet, self).__init__(model, query, using, **kwargs)
self._prefetch = {}
self.prefetch_definitions = prefetch_definitions
self._iterable_class = PrefetchIterable
if django.VERSION < (2, 0):
def _clone(self, **kwargs):
return super(PrefetchQuerySet, self). \
_clone(_prefetch=self._prefetch,
prefetch_definitions=self.prefetch_definitions, **kwargs)
else:
def _clone(self):
c = super(PrefetchQuerySet, self)._clone()
c._prefetch = self._prefetch
c.prefetch_definitions = self.prefetch_definitions
return c
def prefetch(self, *names):
obj = self._clone()
for opt in names:
if isinstance(opt, PrefetchOption):
name = opt.name
else:
name = opt
opt = None
parts = name.split('__')
forwarders = []
prefetcher = None
model = self.model
prefetch_definitions = self.prefetch_definitions
for what in parts:
if not prefetcher:
if what in prefetch_definitions:
prefetcher = prefetch_definitions[what]
continue
descriptor = getattr(model, what, None)
if isinstance(descriptor, ForwardManyToOneDescriptor):
field = descriptor.field
forwarders.append(field.name)
model = field.remote_field.model
manager = model.objects
if not isinstance(manager, PrefetchManagerMixin):
raise InvalidPrefetch('Manager for %s is not a PrefetchManagerMixin instance.' % model)
prefetch_definitions = manager.prefetch_definitions
else:
raise InvalidPrefetch("Invalid part %s in prefetch call for %s on model %s. "
"The name is not a prefetcher nor a forward relation (fk)." % (
what, name, self.model))
else:
raise InvalidPrefetch("Invalid part %s in prefetch call for %s on model %s. "
"You cannot have any more relations after the prefetcher." % (
what, name, self.model))
if not prefetcher:
raise InvalidPrefetch("Invalid prefetch call with %s for on model %s. "
"The last part isn't a prefetch definition." % (name, self.model))
if opt:
if prefetcher.__class__ is Prefetcher:
raise InvalidPrefetch("Invalid prefetch call with %s for on model %s. "
"This prefetcher (%s) needs to be a subclass of Prefetcher." % (
name, self.model, prefetcher))
obj._prefetch[name] = forwarders, prefetcher(*opt.args, **opt.kwargs)
else:
obj._prefetch[name] = forwarders, prefetcher if prefetcher.__class__ is Prefetcher else prefetcher()
for forwarders, prefetcher in obj._prefetch.values():
if forwarders:
obj = obj.select_related('__'.join(forwarders))
return obj
def iterator(self):
return self._iterable_class(self)
class Prefetcher(object):
"""
Prefetch definitition. For convenience you can either subclass this and
define the methods on the subclass or just pass the functions to the
contructor.
Eg, subclassing::
class GroupPrefetcher(Prefetcher):
@staticmethod
def filter(ids):
return User.groups.through.objects.filter(user__in=ids).select_related('group')
@staticmethod
def reverse_mapper(user_group_association):
return [user_group_association.user_id]
@staticmethod
def decorator(user, user_group_associations=()):
setattr(user, 'prefetched_groups', [i.group for i in user_group_associations])
Or with contructor::
Prefetcher(
filter = lambda ids: User.groups.through.objects.filter(user__in=ids).select_related('group'),
reverse_mapper = lambda user_group_association: [user_group_association.user_id],
decorator = lambda user, user_group_associations=(): setattr(user, 'prefetched_groups', [
i.group for i in user_group_associations
])
)
Glossary:
* filter(list_of_ids):
A function that returns a queryset containing all the related data for a given list of keys.
Takes a list of ids as argument.
* reverse_mapper(related_object):
A function that takes the related object as argument and returns a list
of keys that maps that related object to the objects in the queryset.
* mapper(object):
Optional (defaults to ``lambda obj: obj.pk``).
A function that returns the key for a given object in your query set.
* decorator(object, list_of_related_objects):
A function that will save the related data on each of your objects in
your queryset. Takes the object and a list of related objects as
arguments. Note that you should not override existing attributes on the
model instance here.
"""
collect = False
def __init__(self, filter=None, reverse_mapper=None, decorator=None, mapper=None, collect=None):
if filter:
self.filter = filter
elif not hasattr(self, 'filter'):
raise RuntimeError("You must define a filter function")
if reverse_mapper:
self.reverse_mapper = reverse_mapper
elif not hasattr(self, 'reverse_mapper'):
raise RuntimeError("You must define a reverse_mapper function")
if decorator:
self.decorator = decorator
elif not hasattr(self, 'decorator'):
raise RuntimeError("You must define a decorator function")
if mapper:
self.mapper = mapper
if collect is not None:
self.collect = collect
@staticmethod
def mapper(obj):
return obj.pk
def fetch(self, dataset, name, model, forwarders, db):
collect = self.collect or forwarders
try:
data_mapping = collections.defaultdict(list)
t1 = time.time()
for obj in dataset:
for field in forwarders:
obj = getattr(obj, field, None)
if not obj:
continue
if collect:
data_mapping[self.mapper(obj)].append(obj)
else:
data_mapping[self.mapper(obj)] = obj
self.decorator(obj)
t2 = time.time()
logger.debug("Creating data_mapping for %s query took %.3f secs for the %s prefetcher.",
model.__name__, t2-t1, name)
t1 = time.time()
related_data = self.filter(data_mapping.keys())
if db is not None:
related_data = related_data.using(db)
related_data_len = len(related_data)
t2 = time.time()
logger.debug("Filtering for %s related objects for %s query took %.3f secs for the %s prefetcher.",
related_data_len, model.__name__, t2-t1, name)
relation_mapping = collections.defaultdict(list)
t1 = time.time()
for obj in related_data:
for id_ in self.reverse_mapper(obj):
if id_:
relation_mapping[id_].append(obj)
for id_, related_items in relation_mapping.items():
if id_ in data_mapping:
if collect:
for item in data_mapping[id_]:
self.decorator(item, related_items)
else:
self.decorator(data_mapping[id_], related_items)
t2 = time.time()
logger.debug("Adding the related objects on the %s query took %.3f secs for the %s prefetcher.",
model.__name__, t2-t1, name)
return dataset
except Exception:
logger.exception("Prefetch failed for %s prefetch on the %s model:", name, model.__name__)
raise
| {
"content_hash": "7071afd20e09843ca64cd28ad1f0ba68",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 132,
"avg_line_length": 37.259515570934255,
"alnum_prop": 0.5653789004457652,
"repo_name": "ionelmc/django-prefetch",
"id": "5a70c6815c735fef784cb66681a97bd08d1c78f7",
"size": "10768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/prefetch.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "37626"
}
],
"symlink_target": ""
} |
"""SCons.Tool.sunf95
Tool-specific initialization for sunf95, the Sun Studio F95 compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunf95.py 2014/03/02 14:18:15 garyo"
import SCons.Util
from FortranCommon import add_all_to_env
compilers = ['sunf95', 'f95']
def generate(env):
"""Add Builders and construction variables for sunf95 to an
Environment."""
add_all_to_env(env)
fcomp = env.Detect(compilers) or 'f95'
env['FORTRAN'] = fcomp
env['F95'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['SHF95'] = '$F95'
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC')
env['SHF95FLAGS'] = SCons.Util.CLVar('$F95FLAGS -KPIC')
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "e39c1a966f8bd3dc79f0a33bf219db06",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 119,
"avg_line_length": 34.1875,
"alnum_prop": 0.729890310786106,
"repo_name": "sftd/scons",
"id": "fd0be66ea802cec53afdc9604557e85983a0de43",
"size": "2188",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "scons-local/SCons/Tool/sunf95.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1913081"
}
],
"symlink_target": ""
} |
from datetime import datetime, timedelta
import json
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from corehq.apps.domain.models import Domain
from dimagi.utils.couch.cache import cache_core
from dimagi.utils.logging import notify_exception
from dimagi.utils.web import json_handler
register = template.Library()
@register.filter
def JSON(obj):
return mark_safe(json.dumps(obj, default=json_handler))
@register.filter
def to_javascript_string(obj):
# seriously: http://stackoverflow.com/a/1068548/8207
return mark_safe(JSON(obj).replace('</script>', '<" + "/script>'))
@register.filter
def BOOL(obj):
try:
obj = obj.to_json()
except AttributeError:
pass
return 'true' if obj else 'false'
@register.filter
def dict_lookup(dict, key):
'''Get an item from a dictionary.'''
return dict.get(key)
@register.filter
def array_lookup(array, index):
'''Get an item from an array.'''
if index < len(array):
return array[index]
@register.simple_tag
def dict_as_query_string(dict, prefix=""):
'''Convert a dictionary to a query string, minus the initial ?'''
return "&".join(["%s%s=%s" % (prefix, key, value) for key, value in dict.items()])
@register.filter
def add_days(date, days=1):
'''Return a date with some days added'''
span = timedelta(days=days)
try:
return date + span
except:
return datetime.strptime(date,'%m/%d/%Y').date() + span
@register.filter
def concat(str1, str2):
"""Concatenate two strings"""
return "%s%s" % (str1, str2)
try:
from resource_versions import resource_versions
except (ImportError, SyntaxError):
resource_versions = {}
@register.simple_tag
def static(url):
resource_url = url
version = resource_versions.get(resource_url)
url = settings.STATIC_URL + url
if version:
url += "?version=%s" % version
return url
@register.simple_tag
def get_report_analytics_tag(request):
# todo: change this to takes_context=True and check the active_tab context
# variable to see exactly whether the reports tab is active
if 'reports' in request.path_info:
try:
report_name = request.path_info.split('reports/')[1][:-1].replace('_', ' ')
except IndexError:
return ''
return "_gaq.push(['_setCustomVar', 2, 'report', '%s', 3]);\n_gaq.push(['_trackEvent', 'Viewed Report', '%s']);" % (report_name, report_name)
return ''
@register.simple_tag
def domains_for_user(request, selected_domain=None):
"""
Generate pulldown menu for domains.
Cache the entire string alongside the couch_user's doc_id that can get invalidated when
the user doc updates via save.
"""
lst = list()
lst.append('<ul class="dropdown-menu nav-list dropdown-orange">')
new_domain_url = reverse("registration_domain")
if selected_domain == 'public':
# viewing the public domain with a different db, so the user's domains can't readily be accessed.
lst.append('<li><a href="%s">%s...</a></li>' % (reverse("domain_select"), _("Back to My Projects")))
lst.append('<li class="divider"></li>')
else:
cached_domains = cache_core.get_cached_prop(request.couch_user.get_id, 'domain_list')
if cached_domains:
domain_list = [Domain.wrap(x) for x in cached_domains]
else:
try:
domain_list = Domain.active_for_user(request.couch_user)
cache_core.cache_doc_prop(request.couch_user.get_id, 'domain_list', [x.to_json() for x in domain_list])
except Exception:
if settings.DEBUG:
raise
else:
domain_list = Domain.active_for_user(request.user)
notify_exception(request)
if len(domain_list) > 0:
lst.append('<li class="nav-header">%s</li>' % _('My Projects'))
for domain in domain_list:
default_url = reverse("domain_homepage", args=[domain.name])
lst.append('<li><a href="%s">%s</a></li>' % (default_url, domain.long_display_name()))
else:
lst.append('<li class="nav-header">No Projects</li>')
lst.append('<li class="divider"></li>')
lst.append('<li><a href="%s">%s...</a></li>' % (new_domain_url, _('New Project')))
lst.append('<li><a href="%s">%s...</a></li>' % (reverse("appstore"), _('CommCare Exchange')))
lst.append("</ul>")
domain_list_str = "".join(lst)
return domain_list_str
@register.simple_tag
def list_my_domains(request):
cached_val = cache_core.get_cached_prop(request.couch_user.get_id, 'list_my_domains')
if cached_val:
return cached_val.get('list_my_domains', "")
domain_list = Domain.active_for_user(request.user)
lst = list()
lst.append('<ul class="nav nav-pills nav-stacked">')
for domain in domain_list:
default_url = reverse("domain_homepage", args=[domain.name])
lst.append('<li><a href="%s">%s</a></li>' % (default_url, domain.display_name()))
lst.append('</ul>')
my_domain_list_str = "".join(lst)
ret = {"list_my_domains": my_domain_list_str}
cache_core.cache_doc_prop(request.couch_user.get_id, 'list_my_domains', ret)
return my_domain_list_str
@register.simple_tag
def list_my_orgs(request):
org_list = request.couch_user.get_organizations()
lst = list()
lst.append('<ul class="nav nav-pills nav-stacked">')
for org in org_list:
default_url = reverse("orgs_landing", args=[org.name])
lst.append('<li><a href="%s">%s</a></li>' % (default_url, org.title))
lst.append('</ul>')
return "".join(lst)
@register.simple_tag
def commcare_user():
return _(settings.COMMCARE_USER_TERM)
@register.simple_tag
def hq_web_user():
return _(settings.WEB_USER_TERM)
@register.filter
def mod(value, arg):
return value % arg
# This is taken verbatim from https://code.djangoproject.com/ticket/15583
@register.filter(name='sort')
def listsort(value):
if isinstance(value,dict):
new_dict = SortedDict()
key_list = value.keys()
key_list.sort()
for key in key_list:
new_dict[key] = value[key]
return new_dict
elif isinstance(value, list):
new_list = list(value)
new_list.sort()
return new_list
else:
return value
listsort.is_safe = True
@register.filter(name='getattr')
def get_attribute(obj, arg):
""" Get attribute from obj
Usage: {{ couch_user|getattr:"full_name" }}
"""
return getattr(obj, arg, None)
@register.filter
def pretty_doc_info(doc_info):
return render_to_string('hqwebapp/pretty_doc_info.html', {
'doc_info': doc_info,
})
| {
"content_hash": "2e2054e613f67181ecb589e1301353bc",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 149,
"avg_line_length": 32.4046511627907,
"alnum_prop": 0.6306875269125879,
"repo_name": "gmimano/commcaretest",
"id": "95c96c82abab2a2b7785f6a0c435c406235fed3a",
"size": "6967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/hqwebapp/templatetags/hq_shared_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "282577"
},
{
"name": "JavaScript",
"bytes": "2731012"
},
{
"name": "Python",
"bytes": "4738450"
},
{
"name": "Shell",
"bytes": "22454"
}
],
"symlink_target": ""
} |
import time
from struct import pack
from electrum_arg.i18n import _
from electrum_arg.util import PrintError, UserCancelled
from electrum_arg.keystore import bip39_normalize_passphrase
from electrum_arg.bitcoin import serialize_xpub
class GuiMixin(object):
# Requires: self.proto, self.device
messages = {
3: _("Confirm the transaction output on your %s device"),
4: _("Confirm internal entropy on your %s device to begin"),
5: _("Write down the seed word shown on your %s"),
6: _("Confirm on your %s that you want to wipe it clean"),
7: _("Confirm on your %s device the message to sign"),
8: _("Confirm the total amount spent and the transaction fee on your "
"%s device"),
10: _("Confirm wallet address on your %s device"),
'default': _("Check your %s device to continue"),
}
def callback_Failure(self, msg):
# BaseClient's unfortunate call() implementation forces us to
# raise exceptions on failure in order to unwind the stack.
# However, making the user acknowledge they cancelled
# gets old very quickly, so we suppress those. The NotInitialized
# one is misnamed and indicates a passphrase request was cancelled.
if msg.code in (self.types.Failure_PinCancelled,
self.types.Failure_ActionCancelled,
self.types.Failure_NotInitialized):
raise UserCancelled()
raise RuntimeError(msg.message)
def callback_ButtonRequest(self, msg):
message = self.msg
if not message:
message = self.messages.get(msg.code, self.messages['default'])
self.handler.show_message(message % self.device, self.cancel)
return self.proto.ButtonAck()
def callback_PinMatrixRequest(self, msg):
if msg.type == 2:
msg = _("Enter a new PIN for your %s:")
elif msg.type == 3:
msg = (_("Re-enter the new PIN for your %s.\n\n"
"NOTE: the positions of the numbers have changed!"))
else:
msg = _("Enter your current %s PIN:")
pin = self.handler.get_pin(msg % self.device)
if not pin:
return self.proto.Cancel()
return self.proto.PinMatrixAck(pin=pin)
def callback_PassphraseRequest(self, req):
if self.creating_wallet:
msg = _("Enter a passphrase to generate this wallet. Each time "
"you use this wallet your %s will prompt you for the "
"passphrase. If you forget the passphrase you cannot "
"access the argentums in the wallet.") % self.device
else:
msg = _("Enter the passphrase to unlock this wallet:")
passphrase = self.handler.get_passphrase(msg, self.creating_wallet)
if passphrase is None:
return self.proto.Cancel()
passphrase = bip39_normalize_passphrase(passphrase)
return self.proto.PassphraseAck(passphrase=passphrase)
def callback_WordRequest(self, msg):
self.step += 1
msg = _("Step %d/24. Enter seed word as explained on "
"your %s:") % (self.step, self.device)
word = self.handler.get_word(msg)
# Unfortunately the device can't handle self.proto.Cancel()
return self.proto.WordAck(word=word)
def callback_CharacterRequest(self, msg):
char_info = self.handler.get_char(msg)
if not char_info:
return self.proto.Cancel()
return self.proto.CharacterAck(**char_info)
class TrezorClientBase(GuiMixin, PrintError):
def __init__(self, handler, plugin, proto):
assert hasattr(self, 'tx_api') # ProtocolMixin already constructed?
self.proto = proto
self.device = plugin.device
self.handler = handler
self.tx_api = plugin
self.types = plugin.types
self.msg = None
self.creating_wallet = False
self.used()
def __str__(self):
return "%s/%s" % (self.label(), self.features.device_id)
def label(self):
'''The name given by the user to the device.'''
return self.features.label
def is_initialized(self):
'''True if initialized, False if wiped.'''
return self.features.initialized
def is_pairable(self):
return not self.features.bootloader_mode
def used(self):
self.last_operation = time.time()
def prevent_timeouts(self):
self.last_operation = float('inf')
def timeout(self, cutoff):
'''Time out the client if the last operation was before cutoff.'''
if self.last_operation < cutoff:
self.print_error("timed out")
self.clear_session()
@staticmethod
def expand_path(n):
'''Convert bip32 path to list of uint32 integers with prime flags
0/-1/1' -> [0, 0x80000001, 0x80000001]'''
# This code is similar to code in trezorlib where it unforunately
# is not declared as a staticmethod. Our n has an extra element.
PRIME_DERIVATION_FLAG = 0x80000000
path = []
for x in n.split('/')[1:]:
prime = 0
if x.endswith("'"):
x = x.replace('\'', '')
prime = PRIME_DERIVATION_FLAG
if x.startswith('-'):
prime = PRIME_DERIVATION_FLAG
path.append(abs(int(x)) | prime)
return path
def cancel(self):
'''Provided here as in keepkeylib but not trezorlib.'''
self.transport.write(self.proto.Cancel())
def i4b(self, x):
return pack('>I', x)
def address_from_derivation(self, derivation):
return self.get_address('Argentum', self.expand_path(derivation))
def toggle_passphrase(self):
if self.features.passphrase_protection:
self.msg = _("Confirm on your %s device to disable passphrases")
else:
self.msg = _("Confirm on your %s device to enable passphrases")
enabled = not self.features.passphrase_protection
self.apply_settings(use_passphrase=enabled)
def change_label(self, label):
self.msg = _("Confirm the new label on your %s device")
self.apply_settings(label=label)
def change_homescreen(self, homescreen):
self.msg = _("Confirm on your %s device to change your home screen")
self.apply_settings(homescreen=homescreen)
def set_pin(self, remove):
if remove:
self.msg = _("Confirm on your %s device to disable PIN protection")
elif self.features.pin_protection:
self.msg = _("Confirm on your %s device to change your PIN")
else:
self.msg = _("Confirm on your %s device to set a PIN")
self.change_pin(remove)
def clear_session(self):
'''Clear the session to force pin (and passphrase if enabled)
re-entry. Does not leak exceptions.'''
self.print_error("clear session:", self)
self.prevent_timeouts()
try:
super(TrezorClientBase, self).clear_session()
except BaseException as e:
# If the device was removed it has the same effect...
self.print_error("clear_session: ignoring error", str(e))
pass
def get_public_node(self, address_n, creating):
self.creating_wallet = creating
return super(TrezorClientBase, self).get_public_node(address_n)
def close(self):
'''Called when Our wallet was closed or the device removed.'''
self.print_error("closing client")
self.clear_session()
# Release the device
self.transport.close()
def firmware_version(self):
f = self.features
return (f.major_version, f.minor_version, f.patch_version)
def atleast_version(self, major, minor=0, patch=0):
return cmp(self.firmware_version(), (major, minor, patch)) >= 0
@staticmethod
def wrapper(func):
'''Wrap methods to clear any message box they opened.'''
def wrapped(self, *args, **kwargs):
try:
self.prevent_timeouts()
return func(self, *args, **kwargs)
finally:
self.used()
self.handler.finished()
self.creating_wallet = False
self.msg = None
return wrapped
@staticmethod
def wrap_methods(cls):
for method in ['apply_settings', 'change_pin', 'decrypt_message',
'get_address', 'get_public_node',
'load_device_by_mnemonic', 'load_device_by_xprv',
'recovery_device', 'reset_device', 'sign_message',
'sign_tx', 'wipe_device']:
setattr(cls, method, cls.wrapper(getattr(cls, method)))
| {
"content_hash": "cd7ff575cd4aab5d6960bf6c5b6aeffa",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 79,
"avg_line_length": 38.23376623376623,
"alnum_prop": 0.5996376811594203,
"repo_name": "argentumproject/electrum-arg",
"id": "ae200355641cccac14281509332cbc7fc0c8894f",
"size": "8832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/trezor/clientbase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "HTML",
"bytes": "3869"
},
{
"name": "Makefile",
"bytes": "848"
},
{
"name": "NSIS",
"bytes": "7179"
},
{
"name": "PHP",
"bytes": "404"
},
{
"name": "Python",
"bytes": "1244527"
},
{
"name": "Shell",
"bytes": "7098"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
with open('README.md') as f:
long_description = f.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
with open('extra_requirements.txt') as f:
extra_requirements = f.read().splitlines()
setup(
name='nussl',
version="1.1.9",
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7',
'Topic :: Artistic Software',
'Topic :: Multimedia',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Editors',
'Topic :: Software Development :: Libraries',
],
description='A flexible sound source separation library.',
long_description=long_description,
long_description_content_type='text/markdown',
author='E. Manilow, P. Seetharaman, F. Pishdadian, N. Shelly, A. Bugler, B. Pardo',
author_email='ethanmanilow@u.northwestern.edu',
maintainer='E. Manilow, P. Seetharaman, F. Pishdadian, N. Shelly, A. Bugler, B. Pardo',
maintainer_email='ethanmanilow@u.northwestern.edu',
url='https://github.com/interactiveaudiolab/nussl',
license='MIT',
packages=find_packages(),
package_data={'': ['core/templates/multitrack.html']},
keywords=['audio', 'source', 'separation', 'music', 'sound', 'source separation'],
install_requires=requirements,
extras_require={
'melodia': [
'vamp'
],
'extras': extra_requirements,
}
)
| {
"content_hash": "444974e37fb5d8e6dd9b4d2ba5e156a2",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 91,
"avg_line_length": 35.26,
"alnum_prop": 0.627906976744186,
"repo_name": "interactiveaudiolab/nussl",
"id": "944a3efe37c786bb84de1fa6669863e754a092ae",
"size": "1763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "MATLAB",
"bytes": "11692"
},
{
"name": "Python",
"bytes": "591205"
},
{
"name": "Shell",
"bytes": "26"
}
],
"symlink_target": ""
} |
"""Wrapper for pulse
Generated with:
tools/genwrappers.py pulseaudio
Do not modify this file.
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id: wrap.py 1694 2008-01-30 23:12:00Z Alex.Holkner $'
import ctypes
from ctypes import *
import pyglet.lib
_lib = pyglet.lib.load_library('pulse')
_int_types = (c_int16, c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (ctypes.c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
class c_void(Structure):
# c_void_p is a buggy return type, converting to int, so
# POINTER(None) == c_void_p is actually written as
# POINTER(c_void), so it can be treated as a real pointer.
_fields_ = [('dummy', c_int)]
class struct_pa_mainloop_api(Structure):
__slots__ = [
]
struct_pa_mainloop_api._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_mainloop_api(Structure):
__slots__ = [
]
struct_pa_mainloop_api._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/mainloop-api.h:51
pa_mainloop_api = struct_pa_mainloop_api
enum_pa_io_event_flags = c_int
PA_IO_EVENT_NULL = 0
PA_IO_EVENT_INPUT = 1
PA_IO_EVENT_OUTPUT = 2
PA_IO_EVENT_HANGUP = 4
PA_IO_EVENT_ERROR = 8
# /usr/include/pulse/mainloop-api.h:60
pa_io_event_flags_t = enum_pa_io_event_flags
class struct_pa_io_event(Structure):
__slots__ = [
]
struct_pa_io_event._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_io_event(Structure):
__slots__ = [
]
struct_pa_io_event._fields_ = [
('_opaque_struct', c_int)
]
pa_io_event = struct_pa_io_event # /usr/include/pulse/mainloop-api.h:63
pa_io_event_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api),
POINTER(pa_io_event), c_int, pa_io_event_flags_t,
POINTER(
None)) # /usr/include/pulse/mainloop-api.h:65
pa_io_event_destroy_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api),
POINTER(pa_io_event), POINTER(
None)) # /usr/include/pulse/mainloop-api.h:67
class struct_pa_time_event(Structure):
__slots__ = [
]
struct_pa_time_event._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_time_event(Structure):
__slots__ = [
]
struct_pa_time_event._fields_ = [
('_opaque_struct', c_int)
]
pa_time_event = struct_pa_time_event # /usr/include/pulse/mainloop-api.h:70
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
pa_time_event_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api),
POINTER(pa_time_event), POINTER(struct_timeval),
POINTER(
None)) # /usr/include/pulse/mainloop-api.h:72
pa_time_event_destroy_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api),
POINTER(pa_time_event), POINTER(
None)) # /usr/include/pulse/mainloop-api.h:74
class struct_pa_defer_event(Structure):
__slots__ = [
]
struct_pa_defer_event._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_defer_event(Structure):
__slots__ = [
]
struct_pa_defer_event._fields_ = [
('_opaque_struct', c_int)
]
pa_defer_event = struct_pa_defer_event # /usr/include/pulse/mainloop-api.h:77
pa_defer_event_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api),
POINTER(pa_defer_event), POINTER(
None)) # /usr/include/pulse/mainloop-api.h:79
pa_defer_event_destroy_cb_t = CFUNCTYPE(None, POINTER(pa_mainloop_api),
POINTER(pa_defer_event), POINTER(
None)) # /usr/include/pulse/mainloop-api.h:81
# /usr/include/pulse/mainloop-api.h:120
pa_mainloop_api_once = _lib.pa_mainloop_api_once
pa_mainloop_api_once.restype = None
pa_mainloop_api_once.argtypes = [POINTER(pa_mainloop_api),
CFUNCTYPE(None, POINTER(pa_mainloop_api),
POINTER(None)), POINTER(None)]
PA_CHANNELS_MAX = 32 # /usr/include/pulse/sample.h:117
PA_RATE_MAX = 192000 # /usr/include/pulse/sample.h:120
enum_pa_sample_format = c_int
PA_SAMPLE_U8 = 0
PA_SAMPLE_ALAW = 1
PA_SAMPLE_ULAW = 2
PA_SAMPLE_S16LE = 3
PA_SAMPLE_S16BE = 4
PA_SAMPLE_FLOAT32LE = 5
PA_SAMPLE_FLOAT32BE = 6
PA_SAMPLE_S32LE = 7
PA_SAMPLE_S32BE = 8
PA_SAMPLE_MAX = 9
PA_SAMPLE_INVALID = 10
pa_sample_format_t = enum_pa_sample_format # /usr/include/pulse/sample.h:135
class struct_pa_sample_spec(Structure):
__slots__ = [
'format',
'rate',
'channels',
]
struct_pa_sample_spec._fields_ = [
('format', pa_sample_format_t),
('rate', c_uint32),
('channels', c_uint8),
]
pa_sample_spec = struct_pa_sample_spec # /usr/include/pulse/sample.h:173
pa_usec_t = c_uint64 # /usr/include/pulse/sample.h:176
# /usr/include/pulse/sample.h:179
pa_bytes_per_second = _lib.pa_bytes_per_second
pa_bytes_per_second.restype = c_size_t
pa_bytes_per_second.argtypes = [POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:182
pa_frame_size = _lib.pa_frame_size
pa_frame_size.restype = c_size_t
pa_frame_size.argtypes = [POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:185
pa_sample_size = _lib.pa_sample_size
pa_sample_size.restype = c_size_t
pa_sample_size.argtypes = [POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:188
pa_bytes_to_usec = _lib.pa_bytes_to_usec
pa_bytes_to_usec.restype = pa_usec_t
pa_bytes_to_usec.argtypes = [c_uint64, POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:191
pa_usec_to_bytes = _lib.pa_usec_to_bytes
pa_usec_to_bytes.restype = c_size_t
pa_usec_to_bytes.argtypes = [pa_usec_t, POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:194
pa_sample_spec_valid = _lib.pa_sample_spec_valid
pa_sample_spec_valid.restype = c_int
pa_sample_spec_valid.argtypes = [POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:197
pa_sample_spec_equal = _lib.pa_sample_spec_equal
pa_sample_spec_equal.restype = c_int
pa_sample_spec_equal.argtypes = [POINTER(pa_sample_spec),
POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:200
pa_sample_format_to_string = _lib.pa_sample_format_to_string
pa_sample_format_to_string.restype = c_char_p
pa_sample_format_to_string.argtypes = [pa_sample_format_t]
# /usr/include/pulse/sample.h:203
pa_parse_sample_format = _lib.pa_parse_sample_format
pa_parse_sample_format.restype = pa_sample_format_t
pa_parse_sample_format.argtypes = [c_char_p]
PA_SAMPLE_SPEC_SNPRINT_MAX = 32 # /usr/include/pulse/sample.h:206
# /usr/include/pulse/sample.h:209
pa_sample_spec_snprint = _lib.pa_sample_spec_snprint
pa_sample_spec_snprint.restype = c_char_p
pa_sample_spec_snprint.argtypes = [c_char_p, c_size_t, POINTER(pa_sample_spec)]
# /usr/include/pulse/sample.h:212
pa_bytes_snprint = _lib.pa_bytes_snprint
pa_bytes_snprint.restype = c_char_p
pa_bytes_snprint.argtypes = [c_char_p, c_size_t, c_uint]
enum_pa_context_state = c_int
PA_CONTEXT_UNCONNECTED = 0
PA_CONTEXT_CONNECTING = 1
PA_CONTEXT_AUTHORIZING = 2
PA_CONTEXT_SETTING_NAME = 3
PA_CONTEXT_READY = 4
PA_CONTEXT_FAILED = 5
PA_CONTEXT_TERMINATED = 6
pa_context_state_t = enum_pa_context_state # /usr/include/pulse/def.h:49
enum_pa_stream_state = c_int
PA_STREAM_UNCONNECTED = 0
PA_STREAM_CREATING = 1
PA_STREAM_READY = 2
PA_STREAM_FAILED = 3
PA_STREAM_TERMINATED = 4
pa_stream_state_t = enum_pa_stream_state # /usr/include/pulse/def.h:58
enum_pa_operation_state = c_int
PA_OPERATION_RUNNING = 0
PA_OPERATION_DONE = 1
PA_OPERATION_CANCELED = 2
pa_operation_state_t = enum_pa_operation_state # /usr/include/pulse/def.h:65
enum_pa_context_flags = c_int
PA_CONTEXT_NOAUTOSPAWN = 1
pa_context_flags_t = enum_pa_context_flags # /usr/include/pulse/def.h:73
enum_pa_stream_direction = c_int
PA_STREAM_NODIRECTION = 0
PA_STREAM_PLAYBACK = 1
PA_STREAM_RECORD = 2
PA_STREAM_UPLOAD = 3
pa_stream_direction_t = enum_pa_stream_direction # /usr/include/pulse/def.h:81
enum_pa_stream_flags = c_int
PA_STREAM_START_CORKED = 1
PA_STREAM_INTERPOLATE_TIMING = 2
PA_STREAM_NOT_MONOTONOUS = 4
PA_STREAM_AUTO_TIMING_UPDATE = 8
PA_STREAM_NO_REMAP_CHANNELS = 16
PA_STREAM_NO_REMIX_CHANNELS = 32
PA_STREAM_FIX_FORMAT = 64
PA_STREAM_FIX_RATE = 128
PA_STREAM_FIX_CHANNELS = 256
PA_STREAM_DONT_MOVE = 512
PA_STREAM_VARIABLE_RATE = 1024
pa_stream_flags_t = enum_pa_stream_flags # /usr/include/pulse/def.h:212
class struct_pa_buffer_attr(Structure):
__slots__ = [
'maxlength',
'tlength',
'prebuf',
'minreq',
'fragsize',
]
struct_pa_buffer_attr._fields_ = [
('maxlength', c_uint32),
('tlength', c_uint32),
('prebuf', c_uint32),
('minreq', c_uint32),
('fragsize', c_uint32),
]
pa_buffer_attr = struct_pa_buffer_attr # /usr/include/pulse/def.h:221
enum_pa_subscription_mask = c_int
PA_SUBSCRIPTION_MASK_NULL = 0
PA_SUBSCRIPTION_MASK_SINK = 1
PA_SUBSCRIPTION_MASK_SOURCE = 2
PA_SUBSCRIPTION_MASK_SINK_INPUT = 4
PA_SUBSCRIPTION_MASK_SOURCE_OUTPUT = 8
PA_SUBSCRIPTION_MASK_MODULE = 16
PA_SUBSCRIPTION_MASK_CLIENT = 32
PA_SUBSCRIPTION_MASK_SAMPLE_CACHE = 64
PA_SUBSCRIPTION_MASK_SERVER = 128
PA_SUBSCRIPTION_MASK_AUTOLOAD = 256
PA_SUBSCRIPTION_MASK_ALL = 511
# /usr/include/pulse/def.h:261
pa_subscription_mask_t = enum_pa_subscription_mask
enum_pa_subscription_event_type = c_int
PA_SUBSCRIPTION_EVENT_SINK = 0
PA_SUBSCRIPTION_EVENT_SOURCE = 1
PA_SUBSCRIPTION_EVENT_SINK_INPUT = 2
PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT = 3
PA_SUBSCRIPTION_EVENT_MODULE = 4
PA_SUBSCRIPTION_EVENT_CLIENT = 5
PA_SUBSCRIPTION_EVENT_SAMPLE_CACHE = 6
PA_SUBSCRIPTION_EVENT_SERVER = 7
PA_SUBSCRIPTION_EVENT_AUTOLOAD = 8
PA_SUBSCRIPTION_EVENT_FACILITY_MASK = 15
PA_SUBSCRIPTION_EVENT_NEW = 0
PA_SUBSCRIPTION_EVENT_CHANGE = 16
PA_SUBSCRIPTION_EVENT_REMOVE = 32
PA_SUBSCRIPTION_EVENT_TYPE_MASK = 1632
# /usr/include/pulse/def.h:280
pa_subscription_event_type_t = enum_pa_subscription_event_type
class struct_pa_timing_info(Structure):
__slots__ = [
'timestamp',
'synchronized_clocks',
'sink_usec',
'source_usec',
'transport_usec',
'playing',
'write_index_corrupt',
'write_index',
'read_index_corrupt',
'read_index',
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
# TODO: HACK struct timeval wasn't picked up by wraptypes
# ('_opaque_struct', c_int)
('tv_sec', c_long),
('tv_usec', c_long),
]
struct_pa_timing_info._fields_ = [
('timestamp', struct_timeval),
('synchronized_clocks', c_int),
('sink_usec', pa_usec_t),
('source_usec', pa_usec_t),
('transport_usec', pa_usec_t),
('playing', c_int),
('write_index_corrupt', c_int),
('write_index', c_int64),
('read_index_corrupt', c_int),
('read_index', c_int64),
]
pa_timing_info = struct_pa_timing_info # /usr/include/pulse/def.h:347
class struct_pa_spawn_api(Structure):
__slots__ = [
'prefork',
'postfork',
'atfork',
]
struct_pa_spawn_api._fields_ = [
('prefork', POINTER(CFUNCTYPE(None))),
('postfork', POINTER(CFUNCTYPE(None))),
('atfork', POINTER(CFUNCTYPE(None))),
]
pa_spawn_api = struct_pa_spawn_api # /usr/include/pulse/def.h:366
enum_pa_seek_mode = c_int
PA_SEEK_RELATIVE = 0
PA_SEEK_ABSOLUTE = 1
PA_SEEK_RELATIVE_ON_READ = 2
PA_SEEK_RELATIVE_END = 3
pa_seek_mode_t = enum_pa_seek_mode # /usr/include/pulse/def.h:374
enum_pa_sink_flags = c_int
PA_SINK_HW_VOLUME_CTRL = 1
PA_SINK_LATENCY = 2
PA_SINK_HARDWARE = 4
PA_SINK_NETWORK = 8
pa_sink_flags_t = enum_pa_sink_flags # /usr/include/pulse/def.h:382
enum_pa_source_flags = c_int
PA_SOURCE_HW_VOLUME_CTRL = 1
PA_SOURCE_LATENCY = 2
PA_SOURCE_HARDWARE = 4
PA_SOURCE_NETWORK = 8
pa_source_flags_t = enum_pa_source_flags # /usr/include/pulse/def.h:390
pa_free_cb_t = CFUNCTYPE(None, POINTER(None)) # /usr/include/pulse/def.h:393
class struct_pa_operation(Structure):
__slots__ = [
]
struct_pa_operation._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_operation(Structure):
__slots__ = [
]
struct_pa_operation._fields_ = [
('_opaque_struct', c_int)
]
pa_operation = struct_pa_operation # /usr/include/pulse/operation.h:36
# /usr/include/pulse/operation.h:39
pa_operation_ref = _lib.pa_operation_ref
pa_operation_ref.restype = POINTER(pa_operation)
pa_operation_ref.argtypes = [POINTER(pa_operation)]
# /usr/include/pulse/operation.h:42
pa_operation_unref = _lib.pa_operation_unref
pa_operation_unref.restype = None
pa_operation_unref.argtypes = [POINTER(pa_operation)]
# /usr/include/pulse/operation.h:45
pa_operation_cancel = _lib.pa_operation_cancel
pa_operation_cancel.restype = None
pa_operation_cancel.argtypes = [POINTER(pa_operation)]
# /usr/include/pulse/operation.h:48
pa_operation_get_state = _lib.pa_operation_get_state
pa_operation_get_state.restype = pa_operation_state_t
pa_operation_get_state.argtypes = [POINTER(pa_operation)]
class struct_pa_context(Structure):
__slots__ = [
]
struct_pa_context._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_context(Structure):
__slots__ = [
]
struct_pa_context._fields_ = [
('_opaque_struct', c_int)
]
pa_context = struct_pa_context # /usr/include/pulse/context.h:160
pa_context_notify_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(
None)) # /usr/include/pulse/context.h:163
pa_context_success_cb_t = CFUNCTYPE(None, POINTER(pa_context), c_int, POINTER(
None)) # /usr/include/pulse/context.h:166
# /usr/include/pulse/context.h:170
pa_context_new = _lib.pa_context_new
pa_context_new.restype = POINTER(pa_context)
pa_context_new.argtypes = [POINTER(pa_mainloop_api), c_char_p]
# /usr/include/pulse/context.h:173
pa_context_unref = _lib.pa_context_unref
pa_context_unref.restype = None
pa_context_unref.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:176
pa_context_ref = _lib.pa_context_ref
pa_context_ref.restype = POINTER(pa_context)
pa_context_ref.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:179
pa_context_set_state_callback = _lib.pa_context_set_state_callback
pa_context_set_state_callback.restype = None
pa_context_set_state_callback.argtypes = [POINTER(pa_context),
pa_context_notify_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:182
pa_context_errno = _lib.pa_context_errno
pa_context_errno.restype = c_int
pa_context_errno.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:185
pa_context_is_pending = _lib.pa_context_is_pending
pa_context_is_pending.restype = c_int
pa_context_is_pending.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:188
pa_context_get_state = _lib.pa_context_get_state
pa_context_get_state.restype = pa_context_state_t
pa_context_get_state.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:197
pa_context_connect = _lib.pa_context_connect
pa_context_connect.restype = c_int
pa_context_connect.argtypes = [POINTER(pa_context), c_char_p,
pa_context_flags_t, POINTER(pa_spawn_api)]
# /usr/include/pulse/context.h:200
pa_context_disconnect = _lib.pa_context_disconnect
pa_context_disconnect.restype = None
pa_context_disconnect.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:203
pa_context_drain = _lib.pa_context_drain
pa_context_drain.restype = POINTER(pa_operation)
pa_context_drain.argtypes = [POINTER(pa_context), pa_context_notify_cb_t,
POINTER(None)]
# /usr/include/pulse/context.h:208
pa_context_exit_daemon = _lib.pa_context_exit_daemon
pa_context_exit_daemon.restype = POINTER(pa_operation)
pa_context_exit_daemon.argtypes = [POINTER(pa_context), pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/context.h:211
pa_context_set_default_sink = _lib.pa_context_set_default_sink
pa_context_set_default_sink.restype = POINTER(pa_operation)
pa_context_set_default_sink.argtypes = [POINTER(pa_context), c_char_p,
pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:214
pa_context_set_default_source = _lib.pa_context_set_default_source
pa_context_set_default_source.restype = POINTER(pa_operation)
pa_context_set_default_source.argtypes = [POINTER(pa_context), c_char_p,
pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/context.h:217
pa_context_is_local = _lib.pa_context_is_local
pa_context_is_local.restype = c_int
pa_context_is_local.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:220
pa_context_set_name = _lib.pa_context_set_name
pa_context_set_name.restype = POINTER(pa_operation)
pa_context_set_name.argtypes = [POINTER(pa_context), c_char_p,
pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/context.h:223
pa_context_get_server = _lib.pa_context_get_server
pa_context_get_server.restype = c_char_p
pa_context_get_server.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:226
pa_context_get_protocol_version = _lib.pa_context_get_protocol_version
pa_context_get_protocol_version.restype = c_uint32
pa_context_get_protocol_version.argtypes = [POINTER(pa_context)]
# /usr/include/pulse/context.h:229
pa_context_get_server_protocol_version = _lib.pa_context_get_server_protocol_version
pa_context_get_server_protocol_version.restype = c_uint32
pa_context_get_server_protocol_version.argtypes = [POINTER(pa_context)]
enum_pa_channel_position = c_int
PA_CHANNEL_POSITION_INVALID = 0
PA_CHANNEL_POSITION_MONO = 0
PA_CHANNEL_POSITION_LEFT = 1
PA_CHANNEL_POSITION_RIGHT = 2
PA_CHANNEL_POSITION_CENTER = 3
PA_CHANNEL_POSITION_FRONT_LEFT = 0
PA_CHANNEL_POSITION_FRONT_RIGHT = 0
PA_CHANNEL_POSITION_FRONT_CENTER = 0
PA_CHANNEL_POSITION_REAR_CENTER = 1
PA_CHANNEL_POSITION_REAR_LEFT = 2
PA_CHANNEL_POSITION_REAR_RIGHT = 3
PA_CHANNEL_POSITION_LFE = 4
PA_CHANNEL_POSITION_SUBWOOFER = 0
PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER = 1
PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER = 2
PA_CHANNEL_POSITION_SIDE_LEFT = 3
PA_CHANNEL_POSITION_SIDE_RIGHT = 4
PA_CHANNEL_POSITION_AUX0 = 5
PA_CHANNEL_POSITION_AUX1 = 6
PA_CHANNEL_POSITION_AUX2 = 7
PA_CHANNEL_POSITION_AUX3 = 8
PA_CHANNEL_POSITION_AUX4 = 9
PA_CHANNEL_POSITION_AUX5 = 10
PA_CHANNEL_POSITION_AUX6 = 11
PA_CHANNEL_POSITION_AUX7 = 12
PA_CHANNEL_POSITION_AUX8 = 13
PA_CHANNEL_POSITION_AUX9 = 14
PA_CHANNEL_POSITION_AUX10 = 15
PA_CHANNEL_POSITION_AUX11 = 16
PA_CHANNEL_POSITION_AUX12 = 17
PA_CHANNEL_POSITION_AUX13 = 18
PA_CHANNEL_POSITION_AUX14 = 19
PA_CHANNEL_POSITION_AUX15 = 20
PA_CHANNEL_POSITION_AUX16 = 21
PA_CHANNEL_POSITION_AUX17 = 22
PA_CHANNEL_POSITION_AUX18 = 23
PA_CHANNEL_POSITION_AUX19 = 24
PA_CHANNEL_POSITION_AUX20 = 25
PA_CHANNEL_POSITION_AUX21 = 26
PA_CHANNEL_POSITION_AUX22 = 27
PA_CHANNEL_POSITION_AUX23 = 28
PA_CHANNEL_POSITION_AUX24 = 29
PA_CHANNEL_POSITION_AUX25 = 30
PA_CHANNEL_POSITION_AUX26 = 31
PA_CHANNEL_POSITION_AUX27 = 32
PA_CHANNEL_POSITION_AUX28 = 33
PA_CHANNEL_POSITION_AUX29 = 34
PA_CHANNEL_POSITION_AUX30 = 35
PA_CHANNEL_POSITION_AUX31 = 36
PA_CHANNEL_POSITION_TOP_CENTER = 37
PA_CHANNEL_POSITION_TOP_FRONT_LEFT = 38
PA_CHANNEL_POSITION_TOP_FRONT_RIGHT = 39
PA_CHANNEL_POSITION_TOP_FRONT_CENTER = 40
PA_CHANNEL_POSITION_TOP_REAR_LEFT = 41
PA_CHANNEL_POSITION_TOP_REAR_RIGHT = 42
PA_CHANNEL_POSITION_TOP_REAR_CENTER = 43
PA_CHANNEL_POSITION_MAX = 44
# /usr/include/pulse/channelmap.h:140
pa_channel_position_t = enum_pa_channel_position
enum_pa_channel_map_def = c_int
PA_CHANNEL_MAP_AIFF = 0
PA_CHANNEL_MAP_ALSA = 1
PA_CHANNEL_MAP_AUX = 2
PA_CHANNEL_MAP_WAVEEX = 3
PA_CHANNEL_MAP_OSS = 4
PA_CHANNEL_MAP_DEFAULT = 0
# /usr/include/pulse/channelmap.h:151
pa_channel_map_def_t = enum_pa_channel_map_def
class struct_pa_channel_map(Structure):
__slots__ = [
'channels',
'map',
]
struct_pa_channel_map._fields_ = [
('channels', c_uint8),
('map', pa_channel_position_t * 32),
]
pa_channel_map = struct_pa_channel_map # /usr/include/pulse/channelmap.h:159
# /usr/include/pulse/channelmap.h:162
pa_channel_map_init = _lib.pa_channel_map_init
pa_channel_map_init.restype = POINTER(pa_channel_map)
pa_channel_map_init.argtypes = [POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:165
pa_channel_map_init_mono = _lib.pa_channel_map_init_mono
pa_channel_map_init_mono.restype = POINTER(pa_channel_map)
pa_channel_map_init_mono.argtypes = [POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:168
pa_channel_map_init_stereo = _lib.pa_channel_map_init_stereo
pa_channel_map_init_stereo.restype = POINTER(pa_channel_map)
pa_channel_map_init_stereo.argtypes = [POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:172
pa_channel_map_init_auto = _lib.pa_channel_map_init_auto
pa_channel_map_init_auto.restype = POINTER(pa_channel_map)
pa_channel_map_init_auto.argtypes = [POINTER(pa_channel_map), c_uint,
pa_channel_map_def_t]
# /usr/include/pulse/channelmap.h:175
pa_channel_position_to_string = _lib.pa_channel_position_to_string
pa_channel_position_to_string.restype = c_char_p
pa_channel_position_to_string.argtypes = [pa_channel_position_t]
# /usr/include/pulse/channelmap.h:178
pa_channel_position_to_pretty_string = _lib.pa_channel_position_to_pretty_string
pa_channel_position_to_pretty_string.restype = c_char_p
pa_channel_position_to_pretty_string.argtypes = [pa_channel_position_t]
PA_CHANNEL_MAP_SNPRINT_MAX = 336 # /usr/include/pulse/channelmap.h:181
# /usr/include/pulse/channelmap.h:184
pa_channel_map_snprint = _lib.pa_channel_map_snprint
pa_channel_map_snprint.restype = c_char_p
pa_channel_map_snprint.argtypes = [c_char_p, c_size_t, POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:187
pa_channel_map_parse = _lib.pa_channel_map_parse
pa_channel_map_parse.restype = POINTER(pa_channel_map)
pa_channel_map_parse.argtypes = [POINTER(pa_channel_map), c_char_p]
# /usr/include/pulse/channelmap.h:190
pa_channel_map_equal = _lib.pa_channel_map_equal
pa_channel_map_equal.restype = c_int
pa_channel_map_equal.argtypes = [POINTER(pa_channel_map),
POINTER(pa_channel_map)]
# /usr/include/pulse/channelmap.h:193
pa_channel_map_valid = _lib.pa_channel_map_valid
pa_channel_map_valid.restype = c_int
pa_channel_map_valid.argtypes = [POINTER(pa_channel_map)]
pa_volume_t = c_uint32 # /usr/include/pulse/volume.h:101
PA_VOLUME_NORM = 65536 # /usr/include/pulse/volume.h:104
PA_VOLUME_MUTED = 0 # /usr/include/pulse/volume.h:107
class struct_pa_cvolume(Structure):
__slots__ = [
'channels',
'values',
]
struct_pa_cvolume._fields_ = [
('channels', c_uint8),
('values', pa_volume_t * 32),
]
pa_cvolume = struct_pa_cvolume # /usr/include/pulse/volume.h:113
# /usr/include/pulse/volume.h:116
pa_cvolume_equal = _lib.pa_cvolume_equal
pa_cvolume_equal.restype = c_int
pa_cvolume_equal.argtypes = [POINTER(pa_cvolume), POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:125
pa_cvolume_set = _lib.pa_cvolume_set
pa_cvolume_set.restype = POINTER(pa_cvolume)
pa_cvolume_set.argtypes = [POINTER(pa_cvolume), c_uint, pa_volume_t]
PA_CVOLUME_SNPRINT_MAX = 64 # /usr/include/pulse/volume.h:128
# /usr/include/pulse/volume.h:131
pa_cvolume_snprint = _lib.pa_cvolume_snprint
pa_cvolume_snprint.restype = c_char_p
pa_cvolume_snprint.argtypes = [c_char_p, c_size_t, POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:134
pa_cvolume_avg = _lib.pa_cvolume_avg
pa_cvolume_avg.restype = pa_volume_t
pa_cvolume_avg.argtypes = [POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:137
pa_cvolume_valid = _lib.pa_cvolume_valid
pa_cvolume_valid.restype = c_int
pa_cvolume_valid.argtypes = [POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:140
pa_cvolume_channels_equal_to = _lib.pa_cvolume_channels_equal_to
pa_cvolume_channels_equal_to.restype = c_int
pa_cvolume_channels_equal_to.argtypes = [POINTER(pa_cvolume), pa_volume_t]
# /usr/include/pulse/volume.h:149
pa_sw_volume_multiply = _lib.pa_sw_volume_multiply
pa_sw_volume_multiply.restype = pa_volume_t
pa_sw_volume_multiply.argtypes = [pa_volume_t, pa_volume_t]
# /usr/include/pulse/volume.h:152
pa_sw_cvolume_multiply = _lib.pa_sw_cvolume_multiply
pa_sw_cvolume_multiply.restype = POINTER(pa_cvolume)
pa_sw_cvolume_multiply.argtypes = [POINTER(pa_cvolume), POINTER(pa_cvolume),
POINTER(pa_cvolume)]
# /usr/include/pulse/volume.h:155
pa_sw_volume_from_dB = _lib.pa_sw_volume_from_dB
pa_sw_volume_from_dB.restype = pa_volume_t
pa_sw_volume_from_dB.argtypes = [c_double]
# /usr/include/pulse/volume.h:158
pa_sw_volume_to_dB = _lib.pa_sw_volume_to_dB
pa_sw_volume_to_dB.restype = c_double
pa_sw_volume_to_dB.argtypes = [pa_volume_t]
# /usr/include/pulse/volume.h:161
pa_sw_volume_from_linear = _lib.pa_sw_volume_from_linear
pa_sw_volume_from_linear.restype = pa_volume_t
pa_sw_volume_from_linear.argtypes = [c_double]
# /usr/include/pulse/volume.h:164
pa_sw_volume_to_linear = _lib.pa_sw_volume_to_linear
pa_sw_volume_to_linear.restype = c_double
pa_sw_volume_to_linear.argtypes = [pa_volume_t]
PA_DECIBEL_MININFTY = -200 # /usr/include/pulse/volume.h:170
class struct_pa_stream(Structure):
__slots__ = [
]
struct_pa_stream._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_stream(Structure):
__slots__ = [
]
struct_pa_stream._fields_ = [
('_opaque_struct', c_int)
]
pa_stream = struct_pa_stream # /usr/include/pulse/stream.h:268
pa_stream_success_cb_t = CFUNCTYPE(None, POINTER(pa_stream), c_int, POINTER(
None)) # /usr/include/pulse/stream.h:271
pa_stream_request_cb_t = CFUNCTYPE(None, POINTER(pa_stream), c_size_t, POINTER(
None)) # /usr/include/pulse/stream.h:274
pa_stream_notify_cb_t = CFUNCTYPE(None, POINTER(pa_stream), POINTER(
None)) # /usr/include/pulse/stream.h:277
# /usr/include/pulse/stream.h:280
pa_stream_new = _lib.pa_stream_new
pa_stream_new.restype = POINTER(pa_stream)
pa_stream_new.argtypes = [POINTER(pa_context), c_char_p,
POINTER(pa_sample_spec), POINTER(pa_channel_map)]
# /usr/include/pulse/stream.h:287
pa_stream_unref = _lib.pa_stream_unref
pa_stream_unref.restype = None
pa_stream_unref.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:290
pa_stream_ref = _lib.pa_stream_ref
pa_stream_ref.restype = POINTER(pa_stream)
pa_stream_ref.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:293
pa_stream_get_state = _lib.pa_stream_get_state
pa_stream_get_state.restype = pa_stream_state_t
pa_stream_get_state.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:296
pa_stream_get_context = _lib.pa_stream_get_context
pa_stream_get_context.restype = POINTER(pa_context)
pa_stream_get_context.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:302
pa_stream_get_index = _lib.pa_stream_get_index
pa_stream_get_index.restype = c_uint32
pa_stream_get_index.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:312
pa_stream_get_device_index = _lib.pa_stream_get_device_index
pa_stream_get_device_index.restype = c_uint32
pa_stream_get_device_index.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:322
pa_stream_get_device_name = _lib.pa_stream_get_device_name
pa_stream_get_device_name.restype = c_char_p
pa_stream_get_device_name.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:328
pa_stream_is_suspended = _lib.pa_stream_is_suspended
pa_stream_is_suspended.restype = c_int
pa_stream_is_suspended.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:331
pa_stream_connect_playback = _lib.pa_stream_connect_playback
pa_stream_connect_playback.restype = c_int
pa_stream_connect_playback.argtypes = [POINTER(pa_stream), c_char_p,
POINTER(pa_buffer_attr),
pa_stream_flags_t, POINTER(pa_cvolume),
POINTER(pa_stream)]
# /usr/include/pulse/stream.h:340
pa_stream_connect_record = _lib.pa_stream_connect_record
pa_stream_connect_record.restype = c_int
pa_stream_connect_record.argtypes = [POINTER(pa_stream), c_char_p,
POINTER(pa_buffer_attr), pa_stream_flags_t]
# /usr/include/pulse/stream.h:347
pa_stream_disconnect = _lib.pa_stream_disconnect
pa_stream_disconnect.restype = c_int
pa_stream_disconnect.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:356
pa_stream_write = _lib.pa_stream_write
pa_stream_write.restype = c_int
pa_stream_write.argtypes = [POINTER(pa_stream), POINTER(None), c_size_t,
pa_free_cb_t, c_int64, pa_seek_mode_t]
# /usr/include/pulse/stream.h:369
pa_stream_peek = _lib.pa_stream_peek
pa_stream_peek.restype = c_int
pa_stream_peek.argtypes = [POINTER(pa_stream), POINTER(POINTER(None)),
POINTER(c_size_t)]
# /usr/include/pulse/stream.h:376
pa_stream_drop = _lib.pa_stream_drop
pa_stream_drop.restype = c_int
pa_stream_drop.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:379
pa_stream_writable_size = _lib.pa_stream_writable_size
pa_stream_writable_size.restype = c_size_t
pa_stream_writable_size.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:382
pa_stream_readable_size = _lib.pa_stream_readable_size
pa_stream_readable_size.restype = c_size_t
pa_stream_readable_size.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:385
pa_stream_drain = _lib.pa_stream_drain
pa_stream_drain.restype = POINTER(pa_operation)
pa_stream_drain.argtypes = [POINTER(pa_stream), pa_stream_success_cb_t,
POINTER(None)]
# /usr/include/pulse/stream.h:391
pa_stream_update_timing_info = _lib.pa_stream_update_timing_info
pa_stream_update_timing_info.restype = POINTER(pa_operation)
pa_stream_update_timing_info.argtypes = [POINTER(pa_stream),
pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:394
pa_stream_set_state_callback = _lib.pa_stream_set_state_callback
pa_stream_set_state_callback.restype = None
pa_stream_set_state_callback.argtypes = [POINTER(pa_stream),
pa_stream_notify_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:398
pa_stream_set_write_callback = _lib.pa_stream_set_write_callback
pa_stream_set_write_callback.restype = None
pa_stream_set_write_callback.argtypes = [POINTER(pa_stream),
pa_stream_request_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:402
pa_stream_set_read_callback = _lib.pa_stream_set_read_callback
pa_stream_set_read_callback.restype = None
pa_stream_set_read_callback.argtypes = [POINTER(pa_stream),
pa_stream_request_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:405
pa_stream_set_overflow_callback = _lib.pa_stream_set_overflow_callback
pa_stream_set_overflow_callback.restype = None
pa_stream_set_overflow_callback.argtypes = [POINTER(pa_stream),
pa_stream_notify_cb_t,
POINTER(None)]
# /usr/include/pulse/stream.h:408
pa_stream_set_underflow_callback = _lib.pa_stream_set_underflow_callback
pa_stream_set_underflow_callback.restype = None
pa_stream_set_underflow_callback.argtypes = [POINTER(pa_stream),
pa_stream_notify_cb_t,
POINTER(None)]
# /usr/include/pulse/stream.h:413
pa_stream_set_latency_update_callback = _lib.pa_stream_set_latency_update_callback
pa_stream_set_latency_update_callback.restype = None
pa_stream_set_latency_update_callback.argtypes = [POINTER(pa_stream),
pa_stream_notify_cb_t,
POINTER(None)]
# /usr/include/pulse/stream.h:420
pa_stream_set_moved_callback = _lib.pa_stream_set_moved_callback
pa_stream_set_moved_callback.restype = None
pa_stream_set_moved_callback.argtypes = [POINTER(pa_stream),
pa_stream_notify_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:430
pa_stream_set_suspended_callback = _lib.pa_stream_set_suspended_callback
pa_stream_set_suspended_callback.restype = None
pa_stream_set_suspended_callback.argtypes = [POINTER(pa_stream),
pa_stream_notify_cb_t,
POINTER(None)]
# /usr/include/pulse/stream.h:433
pa_stream_cork = _lib.pa_stream_cork
pa_stream_cork.restype = POINTER(pa_operation)
pa_stream_cork.argtypes = [POINTER(pa_stream), c_int, pa_stream_success_cb_t,
POINTER(None)]
# /usr/include/pulse/stream.h:438
pa_stream_flush = _lib.pa_stream_flush
pa_stream_flush.restype = POINTER(pa_operation)
pa_stream_flush.argtypes = [POINTER(pa_stream), pa_stream_success_cb_t,
POINTER(None)]
# /usr/include/pulse/stream.h:442
pa_stream_prebuf = _lib.pa_stream_prebuf
pa_stream_prebuf.restype = POINTER(pa_operation)
pa_stream_prebuf.argtypes = [POINTER(pa_stream), pa_stream_success_cb_t,
POINTER(None)]
# /usr/include/pulse/stream.h:447
pa_stream_trigger = _lib.pa_stream_trigger
pa_stream_trigger.restype = POINTER(pa_operation)
pa_stream_trigger.argtypes = [POINTER(pa_stream), pa_stream_success_cb_t,
POINTER(None)]
# /usr/include/pulse/stream.h:450
pa_stream_set_name = _lib.pa_stream_set_name
pa_stream_set_name.restype = POINTER(pa_operation)
pa_stream_set_name.argtypes = [POINTER(pa_stream), c_char_p,
pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:467
pa_stream_get_time = _lib.pa_stream_get_time
pa_stream_get_time.restype = c_int
pa_stream_get_time.argtypes = [POINTER(pa_stream), POINTER(pa_usec_t)]
# /usr/include/pulse/stream.h:473
pa_stream_get_latency = _lib.pa_stream_get_latency
pa_stream_get_latency.restype = c_int
pa_stream_get_latency.argtypes = [POINTER(pa_stream), POINTER(pa_usec_t),
POINTER(c_int)]
# /usr/include/pulse/stream.h:485
pa_stream_get_timing_info = _lib.pa_stream_get_timing_info
pa_stream_get_timing_info.restype = POINTER(pa_timing_info)
pa_stream_get_timing_info.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:488
pa_stream_get_sample_spec = _lib.pa_stream_get_sample_spec
pa_stream_get_sample_spec.restype = POINTER(pa_sample_spec)
pa_stream_get_sample_spec.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:491
pa_stream_get_channel_map = _lib.pa_stream_get_channel_map
pa_stream_get_channel_map.restype = POINTER(pa_channel_map)
pa_stream_get_channel_map.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:496
pa_stream_get_buffer_attr = _lib.pa_stream_get_buffer_attr
pa_stream_get_buffer_attr.restype = POINTER(pa_buffer_attr)
pa_stream_get_buffer_attr.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/stream.h:504
pa_stream_set_buffer_attr = _lib.pa_stream_set_buffer_attr
pa_stream_set_buffer_attr.restype = POINTER(pa_operation)
pa_stream_set_buffer_attr.argtypes = [POINTER(pa_stream),
POINTER(pa_buffer_attr),
pa_stream_success_cb_t, POINTER(None)]
# /usr/include/pulse/stream.h:511
pa_stream_update_sample_rate = _lib.pa_stream_update_sample_rate
pa_stream_update_sample_rate.restype = POINTER(pa_operation)
pa_stream_update_sample_rate.argtypes = [POINTER(pa_stream), c_uint32,
pa_stream_success_cb_t, POINTER(None)]
class struct_pa_sink_info(Structure):
__slots__ = [
'name',
'index',
'description',
'sample_spec',
'channel_map',
'owner_module',
'volume',
'mute',
'monitor_source',
'monitor_source_name',
'latency',
'driver',
'flags',
]
struct_pa_sink_info._fields_ = [
('name', c_char_p),
('index', c_uint32),
('description', c_char_p),
('sample_spec', pa_sample_spec),
('channel_map', pa_channel_map),
('owner_module', c_uint32),
('volume', pa_cvolume),
('mute', c_int),
('monitor_source', c_uint32),
('monitor_source_name', c_char_p),
('latency', pa_usec_t),
('driver', c_char_p),
('flags', pa_sink_flags_t),
]
pa_sink_info = struct_pa_sink_info # /usr/include/pulse/introspect.h:224
pa_sink_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_sink_info),
c_int, POINTER(
None)) # /usr/include/pulse/introspect.h:227
# /usr/include/pulse/introspect.h:230
pa_context_get_sink_info_by_name = _lib.pa_context_get_sink_info_by_name
pa_context_get_sink_info_by_name.restype = POINTER(pa_operation)
pa_context_get_sink_info_by_name.argtypes = [POINTER(pa_context), c_char_p,
pa_sink_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:233
pa_context_get_sink_info_by_index = _lib.pa_context_get_sink_info_by_index
pa_context_get_sink_info_by_index.restype = POINTER(pa_operation)
pa_context_get_sink_info_by_index.argtypes = [POINTER(pa_context), c_uint32,
pa_sink_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:236
pa_context_get_sink_info_list = _lib.pa_context_get_sink_info_list
pa_context_get_sink_info_list.restype = POINTER(pa_operation)
pa_context_get_sink_info_list.argtypes = [POINTER(pa_context),
pa_sink_info_cb_t, POINTER(None)]
class struct_pa_source_info(Structure):
__slots__ = [
'name',
'index',
'description',
'sample_spec',
'channel_map',
'owner_module',
'volume',
'mute',
'monitor_of_sink',
'monitor_of_sink_name',
'latency',
'driver',
'flags',
]
struct_pa_source_info._fields_ = [
('name', c_char_p),
('index', c_uint32),
('description', c_char_p),
('sample_spec', pa_sample_spec),
('channel_map', pa_channel_map),
('owner_module', c_uint32),
('volume', pa_cvolume),
('mute', c_int),
('monitor_of_sink', c_uint32),
('monitor_of_sink_name', c_char_p),
('latency', pa_usec_t),
('driver', c_char_p),
('flags', pa_source_flags_t),
]
pa_source_info = struct_pa_source_info # /usr/include/pulse/introspect.h:253
pa_source_info_cb_t = CFUNCTYPE(None, POINTER(pa_context),
POINTER(pa_source_info), c_int, POINTER(
None)) # /usr/include/pulse/introspect.h:256
# /usr/include/pulse/introspect.h:259
pa_context_get_source_info_by_name = _lib.pa_context_get_source_info_by_name
pa_context_get_source_info_by_name.restype = POINTER(pa_operation)
pa_context_get_source_info_by_name.argtypes = [POINTER(pa_context), c_char_p,
pa_source_info_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:262
pa_context_get_source_info_by_index = _lib.pa_context_get_source_info_by_index
pa_context_get_source_info_by_index.restype = POINTER(pa_operation)
pa_context_get_source_info_by_index.argtypes = [POINTER(pa_context), c_uint32,
pa_source_info_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:265
pa_context_get_source_info_list = _lib.pa_context_get_source_info_list
pa_context_get_source_info_list.restype = POINTER(pa_operation)
pa_context_get_source_info_list.argtypes = [POINTER(pa_context),
pa_source_info_cb_t, POINTER(None)]
class struct_pa_server_info(Structure):
__slots__ = [
'user_name',
'host_name',
'server_version',
'server_name',
'sample_spec',
'default_sink_name',
'default_source_name',
'cookie',
]
struct_pa_server_info._fields_ = [
('user_name', c_char_p),
('host_name', c_char_p),
('server_version', c_char_p),
('server_name', c_char_p),
('sample_spec', pa_sample_spec),
('default_sink_name', c_char_p),
('default_source_name', c_char_p),
('cookie', c_uint32),
]
pa_server_info = struct_pa_server_info # /usr/include/pulse/introspect.h:277
pa_server_info_cb_t = CFUNCTYPE(None, POINTER(pa_context),
POINTER(pa_server_info), POINTER(
None)) # /usr/include/pulse/introspect.h:280
# /usr/include/pulse/introspect.h:283
pa_context_get_server_info = _lib.pa_context_get_server_info
pa_context_get_server_info.restype = POINTER(pa_operation)
pa_context_get_server_info.argtypes = [POINTER(pa_context), pa_server_info_cb_t,
POINTER(None)]
class struct_pa_module_info(Structure):
__slots__ = [
'index',
'name',
'argument',
'n_used',
'auto_unload',
]
struct_pa_module_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('argument', c_char_p),
('n_used', c_uint32),
('auto_unload', c_int),
]
pa_module_info = struct_pa_module_info # /usr/include/pulse/introspect.h:292
pa_module_info_cb_t = CFUNCTYPE(None, POINTER(pa_context),
POINTER(pa_module_info), c_int, POINTER(
None)) # /usr/include/pulse/introspect.h:295
# /usr/include/pulse/introspect.h:298
pa_context_get_module_info = _lib.pa_context_get_module_info
pa_context_get_module_info.restype = POINTER(pa_operation)
pa_context_get_module_info.argtypes = [POINTER(pa_context), c_uint32,
pa_module_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:301
pa_context_get_module_info_list = _lib.pa_context_get_module_info_list
pa_context_get_module_info_list.restype = POINTER(pa_operation)
pa_context_get_module_info_list.argtypes = [POINTER(pa_context),
pa_module_info_cb_t, POINTER(None)]
class struct_pa_client_info(Structure):
__slots__ = [
'index',
'name',
'owner_module',
'driver',
]
struct_pa_client_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('owner_module', c_uint32),
('driver', c_char_p),
]
pa_client_info = struct_pa_client_info # /usr/include/pulse/introspect.h:309
pa_client_info_cb_t = CFUNCTYPE(None, POINTER(pa_context),
POINTER(pa_client_info), c_int, POINTER(
None)) # /usr/include/pulse/introspect.h:312
# /usr/include/pulse/introspect.h:315
pa_context_get_client_info = _lib.pa_context_get_client_info
pa_context_get_client_info.restype = POINTER(pa_operation)
pa_context_get_client_info.argtypes = [POINTER(pa_context), c_uint32,
pa_client_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:318
pa_context_get_client_info_list = _lib.pa_context_get_client_info_list
pa_context_get_client_info_list.restype = POINTER(pa_operation)
pa_context_get_client_info_list.argtypes = [POINTER(pa_context),
pa_client_info_cb_t, POINTER(None)]
class struct_pa_sink_input_info(Structure):
__slots__ = [
'index',
'name',
'owner_module',
'client',
'sink',
'sample_spec',
'channel_map',
'volume',
'buffer_usec',
'sink_usec',
'resample_method',
'driver',
'mute',
]
struct_pa_sink_input_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('owner_module', c_uint32),
('client', c_uint32),
('sink', c_uint32),
('sample_spec', pa_sample_spec),
('channel_map', pa_channel_map),
('volume', pa_cvolume),
('buffer_usec', pa_usec_t),
('sink_usec', pa_usec_t),
('resample_method', c_char_p),
('driver', c_char_p),
('mute', c_int),
]
# /usr/include/pulse/introspect.h:335
pa_sink_input_info = struct_pa_sink_input_info
pa_sink_input_info_cb_t = CFUNCTYPE(None, POINTER(pa_context),
POINTER(pa_sink_input_info), c_int, POINTER(
None)) # /usr/include/pulse/introspect.h:338
# /usr/include/pulse/introspect.h:341
pa_context_get_sink_input_info = _lib.pa_context_get_sink_input_info
pa_context_get_sink_input_info.restype = POINTER(pa_operation)
pa_context_get_sink_input_info.argtypes = [POINTER(pa_context), c_uint32,
pa_sink_input_info_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:344
pa_context_get_sink_input_info_list = _lib.pa_context_get_sink_input_info_list
pa_context_get_sink_input_info_list.restype = POINTER(pa_operation)
pa_context_get_sink_input_info_list.argtypes = [POINTER(pa_context),
pa_sink_input_info_cb_t,
POINTER(None)]
class struct_pa_source_output_info(Structure):
__slots__ = [
'index',
'name',
'owner_module',
'client',
'source',
'sample_spec',
'channel_map',
'buffer_usec',
'source_usec',
'resample_method',
'driver',
]
struct_pa_source_output_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('owner_module', c_uint32),
('client', c_uint32),
('source', c_uint32),
('sample_spec', pa_sample_spec),
('channel_map', pa_channel_map),
('buffer_usec', pa_usec_t),
('source_usec', pa_usec_t),
('resample_method', c_char_p),
('driver', c_char_p),
]
# /usr/include/pulse/introspect.h:359
pa_source_output_info = struct_pa_source_output_info
pa_source_output_info_cb_t = CFUNCTYPE(None, POINTER(pa_context),
POINTER(pa_source_output_info), c_int,
POINTER(
None)) # /usr/include/pulse/introspect.h:362
# /usr/include/pulse/introspect.h:365
pa_context_get_source_output_info = _lib.pa_context_get_source_output_info
pa_context_get_source_output_info.restype = POINTER(pa_operation)
pa_context_get_source_output_info.argtypes = [POINTER(pa_context), c_uint32,
pa_source_output_info_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:368
pa_context_get_source_output_info_list = _lib.pa_context_get_source_output_info_list
pa_context_get_source_output_info_list.restype = POINTER(pa_operation)
pa_context_get_source_output_info_list.argtypes = [POINTER(pa_context),
pa_source_output_info_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:371
pa_context_set_sink_volume_by_index = _lib.pa_context_set_sink_volume_by_index
pa_context_set_sink_volume_by_index.restype = POINTER(pa_operation)
pa_context_set_sink_volume_by_index.argtypes = [POINTER(pa_context), c_uint32,
POINTER(pa_cvolume),
pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:374
pa_context_set_sink_volume_by_name = _lib.pa_context_set_sink_volume_by_name
pa_context_set_sink_volume_by_name.restype = POINTER(pa_operation)
pa_context_set_sink_volume_by_name.argtypes = [POINTER(pa_context), c_char_p,
POINTER(pa_cvolume),
pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:377
pa_context_set_sink_mute_by_index = _lib.pa_context_set_sink_mute_by_index
pa_context_set_sink_mute_by_index.restype = POINTER(pa_operation)
pa_context_set_sink_mute_by_index.argtypes = [POINTER(pa_context), c_uint32,
c_int, pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:380
pa_context_set_sink_mute_by_name = _lib.pa_context_set_sink_mute_by_name
pa_context_set_sink_mute_by_name.restype = POINTER(pa_operation)
pa_context_set_sink_mute_by_name.argtypes = [POINTER(pa_context), c_char_p,
c_int, pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:383
pa_context_set_sink_input_volume = _lib.pa_context_set_sink_input_volume
pa_context_set_sink_input_volume.restype = POINTER(pa_operation)
pa_context_set_sink_input_volume.argtypes = [POINTER(pa_context), c_uint32,
POINTER(pa_cvolume),
pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:386
pa_context_set_sink_input_mute = _lib.pa_context_set_sink_input_mute
pa_context_set_sink_input_mute.restype = POINTER(pa_operation)
pa_context_set_sink_input_mute.argtypes = [POINTER(pa_context), c_uint32, c_int,
pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:389
pa_context_set_source_volume_by_index = _lib.pa_context_set_source_volume_by_index
pa_context_set_source_volume_by_index.restype = POINTER(pa_operation)
pa_context_set_source_volume_by_index.argtypes = [POINTER(pa_context), c_uint32,
POINTER(pa_cvolume),
pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:392
pa_context_set_source_volume_by_name = _lib.pa_context_set_source_volume_by_name
pa_context_set_source_volume_by_name.restype = POINTER(pa_operation)
pa_context_set_source_volume_by_name.argtypes = [POINTER(pa_context), c_char_p,
POINTER(pa_cvolume),
pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:395
pa_context_set_source_mute_by_index = _lib.pa_context_set_source_mute_by_index
pa_context_set_source_mute_by_index.restype = POINTER(pa_operation)
pa_context_set_source_mute_by_index.argtypes = [POINTER(pa_context), c_uint32,
c_int, pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:398
pa_context_set_source_mute_by_name = _lib.pa_context_set_source_mute_by_name
pa_context_set_source_mute_by_name.restype = POINTER(pa_operation)
pa_context_set_source_mute_by_name.argtypes = [POINTER(pa_context), c_char_p,
c_int, pa_context_success_cb_t,
POINTER(None)]
class struct_pa_stat_info(Structure):
__slots__ = [
'memblock_total',
'memblock_total_size',
'memblock_allocated',
'memblock_allocated_size',
'scache_size',
]
struct_pa_stat_info._fields_ = [
('memblock_total', c_uint32),
('memblock_total_size', c_uint32),
('memblock_allocated', c_uint32),
('memblock_allocated_size', c_uint32),
('scache_size', c_uint32),
]
pa_stat_info = struct_pa_stat_info # /usr/include/pulse/introspect.h:407
pa_stat_info_cb_t = CFUNCTYPE(None, POINTER(pa_context), POINTER(pa_stat_info),
POINTER(
None)) # /usr/include/pulse/introspect.h:410
# /usr/include/pulse/introspect.h:413
pa_context_stat = _lib.pa_context_stat
pa_context_stat.restype = POINTER(pa_operation)
pa_context_stat.argtypes = [POINTER(pa_context), pa_stat_info_cb_t,
POINTER(None)]
class struct_pa_sample_info(Structure):
__slots__ = [
'index',
'name',
'volume',
'sample_spec',
'channel_map',
'duration',
'bytes',
'lazy',
'filename',
]
struct_pa_sample_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('volume', pa_cvolume),
('sample_spec', pa_sample_spec),
('channel_map', pa_channel_map),
('duration', pa_usec_t),
('bytes', c_uint32),
('lazy', c_int),
('filename', c_char_p),
]
pa_sample_info = struct_pa_sample_info # /usr/include/pulse/introspect.h:426
pa_sample_info_cb_t = CFUNCTYPE(None, POINTER(pa_context),
POINTER(pa_sample_info), c_int, POINTER(
None)) # /usr/include/pulse/introspect.h:429
# /usr/include/pulse/introspect.h:432
pa_context_get_sample_info_by_name = _lib.pa_context_get_sample_info_by_name
pa_context_get_sample_info_by_name.restype = POINTER(pa_operation)
pa_context_get_sample_info_by_name.argtypes = [POINTER(pa_context), c_char_p,
pa_sample_info_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:435
pa_context_get_sample_info_by_index = _lib.pa_context_get_sample_info_by_index
pa_context_get_sample_info_by_index.restype = POINTER(pa_operation)
pa_context_get_sample_info_by_index.argtypes = [POINTER(pa_context), c_uint32,
pa_sample_info_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:438
pa_context_get_sample_info_list = _lib.pa_context_get_sample_info_list
pa_context_get_sample_info_list.restype = POINTER(pa_operation)
pa_context_get_sample_info_list.argtypes = [POINTER(pa_context),
pa_sample_info_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:441
pa_context_kill_client = _lib.pa_context_kill_client
pa_context_kill_client.restype = POINTER(pa_operation)
pa_context_kill_client.argtypes = [POINTER(pa_context), c_uint32,
pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:444
pa_context_kill_sink_input = _lib.pa_context_kill_sink_input
pa_context_kill_sink_input.restype = POINTER(pa_operation)
pa_context_kill_sink_input.argtypes = [POINTER(pa_context), c_uint32,
pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:447
pa_context_kill_source_output = _lib.pa_context_kill_source_output
pa_context_kill_source_output.restype = POINTER(pa_operation)
pa_context_kill_source_output.argtypes = [POINTER(pa_context), c_uint32,
pa_context_success_cb_t,
POINTER(None)]
pa_context_index_cb_t = CFUNCTYPE(None, POINTER(pa_context), c_uint32, POINTER(
None)) # /usr/include/pulse/introspect.h:450
# /usr/include/pulse/introspect.h:453
pa_context_load_module = _lib.pa_context_load_module
pa_context_load_module.restype = POINTER(pa_operation)
pa_context_load_module.argtypes = [POINTER(pa_context), c_char_p, c_char_p,
pa_context_index_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:456
pa_context_unload_module = _lib.pa_context_unload_module
pa_context_unload_module.restype = POINTER(pa_operation)
pa_context_unload_module.argtypes = [POINTER(pa_context), c_uint32,
pa_context_success_cb_t, POINTER(None)]
enum_pa_autoload_type = c_int
PA_AUTOLOAD_SINK = 0
PA_AUTOLOAD_SOURCE = 1
# /usr/include/pulse/introspect.h:462
pa_autoload_type_t = enum_pa_autoload_type
class struct_pa_autoload_info(Structure):
__slots__ = [
'index',
'name',
'type',
'module',
'argument',
]
struct_pa_autoload_info._fields_ = [
('index', c_uint32),
('name', c_char_p),
('type', pa_autoload_type_t),
('module', c_char_p),
('argument', c_char_p),
]
# /usr/include/pulse/introspect.h:471
pa_autoload_info = struct_pa_autoload_info
pa_autoload_info_cb_t = CFUNCTYPE(None, POINTER(pa_context),
POINTER(pa_autoload_info), c_int, POINTER(
None)) # /usr/include/pulse/introspect.h:474
# /usr/include/pulse/introspect.h:477
pa_context_get_autoload_info_by_name = _lib.pa_context_get_autoload_info_by_name
pa_context_get_autoload_info_by_name.restype = POINTER(pa_operation)
pa_context_get_autoload_info_by_name.argtypes = [POINTER(pa_context), c_char_p,
pa_autoload_type_t,
pa_autoload_info_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:480
pa_context_get_autoload_info_by_index = _lib.pa_context_get_autoload_info_by_index
pa_context_get_autoload_info_by_index.restype = POINTER(pa_operation)
pa_context_get_autoload_info_by_index.argtypes = [POINTER(pa_context), c_uint32,
pa_autoload_info_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:483
pa_context_get_autoload_info_list = _lib.pa_context_get_autoload_info_list
pa_context_get_autoload_info_list.restype = POINTER(pa_operation)
pa_context_get_autoload_info_list.argtypes = [POINTER(pa_context),
pa_autoload_info_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:486
pa_context_add_autoload = _lib.pa_context_add_autoload
pa_context_add_autoload.restype = POINTER(pa_operation)
pa_context_add_autoload.argtypes = [POINTER(pa_context), c_char_p,
pa_autoload_type_t, c_char_p, c_char_p,
pa_context_index_cb_t, POINTER(None)]
# /usr/include/pulse/introspect.h:489
pa_context_remove_autoload_by_name = _lib.pa_context_remove_autoload_by_name
pa_context_remove_autoload_by_name.restype = POINTER(pa_operation)
pa_context_remove_autoload_by_name.argtypes = [POINTER(pa_context), c_char_p,
pa_autoload_type_t,
pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:492
pa_context_remove_autoload_by_index = _lib.pa_context_remove_autoload_by_index
pa_context_remove_autoload_by_index.restype = POINTER(pa_operation)
pa_context_remove_autoload_by_index.argtypes = [POINTER(pa_context), c_uint32,
pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:495
pa_context_move_sink_input_by_name = _lib.pa_context_move_sink_input_by_name
pa_context_move_sink_input_by_name.restype = POINTER(pa_operation)
pa_context_move_sink_input_by_name.argtypes = [POINTER(pa_context), c_uint32,
c_char_p,
pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:498
pa_context_move_sink_input_by_index = _lib.pa_context_move_sink_input_by_index
pa_context_move_sink_input_by_index.restype = POINTER(pa_operation)
pa_context_move_sink_input_by_index.argtypes = [POINTER(pa_context), c_uint32,
c_uint32,
pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:501
pa_context_move_source_output_by_name = _lib.pa_context_move_source_output_by_name
pa_context_move_source_output_by_name.restype = POINTER(pa_operation)
pa_context_move_source_output_by_name.argtypes = [POINTER(pa_context), c_uint32,
c_char_p,
pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:504
pa_context_move_source_output_by_index = _lib.pa_context_move_source_output_by_index
pa_context_move_source_output_by_index.restype = POINTER(pa_operation)
pa_context_move_source_output_by_index.argtypes = [POINTER(pa_context),
c_uint32, c_uint32,
pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:507
pa_context_suspend_sink_by_name = _lib.pa_context_suspend_sink_by_name
pa_context_suspend_sink_by_name.restype = POINTER(pa_operation)
pa_context_suspend_sink_by_name.argtypes = [POINTER(pa_context), c_char_p,
c_int, pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:510
pa_context_suspend_sink_by_index = _lib.pa_context_suspend_sink_by_index
pa_context_suspend_sink_by_index.restype = POINTER(pa_operation)
pa_context_suspend_sink_by_index.argtypes = [POINTER(pa_context), c_uint32,
c_int, pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:513
pa_context_suspend_source_by_name = _lib.pa_context_suspend_source_by_name
pa_context_suspend_source_by_name.restype = POINTER(pa_operation)
pa_context_suspend_source_by_name.argtypes = [POINTER(pa_context), c_char_p,
c_int, pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/introspect.h:516
pa_context_suspend_source_by_index = _lib.pa_context_suspend_source_by_index
pa_context_suspend_source_by_index.restype = POINTER(pa_operation)
pa_context_suspend_source_by_index.argtypes = [POINTER(pa_context), c_uint32,
c_int, pa_context_success_cb_t,
POINTER(None)]
pa_context_subscribe_cb_t = CFUNCTYPE(None, POINTER(pa_context),
pa_subscription_event_type_t, c_uint32,
POINTER(
None)) # /usr/include/pulse/subscribe.h:54
# /usr/include/pulse/subscribe.h:57
pa_context_subscribe = _lib.pa_context_subscribe
pa_context_subscribe.restype = POINTER(pa_operation)
pa_context_subscribe.argtypes = [POINTER(pa_context), pa_subscription_mask_t,
pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/subscribe.h:60
pa_context_set_subscribe_callback = _lib.pa_context_set_subscribe_callback
pa_context_set_subscribe_callback.restype = None
pa_context_set_subscribe_callback.argtypes = [POINTER(pa_context),
pa_context_subscribe_cb_t,
POINTER(None)]
# /usr/include/pulse/scache.h:83
pa_stream_connect_upload = _lib.pa_stream_connect_upload
pa_stream_connect_upload.restype = c_int
pa_stream_connect_upload.argtypes = [POINTER(pa_stream), c_size_t]
# /usr/include/pulse/scache.h:87
pa_stream_finish_upload = _lib.pa_stream_finish_upload
pa_stream_finish_upload.restype = c_int
pa_stream_finish_upload.argtypes = [POINTER(pa_stream)]
# /usr/include/pulse/scache.h:90
pa_context_play_sample = _lib.pa_context_play_sample
pa_context_play_sample.restype = POINTER(pa_operation)
pa_context_play_sample.argtypes = [POINTER(pa_context), c_char_p, c_char_p,
pa_volume_t, pa_context_success_cb_t,
POINTER(None)]
# /usr/include/pulse/scache.h:99
pa_context_remove_sample = _lib.pa_context_remove_sample
pa_context_remove_sample.restype = POINTER(pa_operation)
pa_context_remove_sample.argtypes = [POINTER(pa_context), c_char_p,
pa_context_success_cb_t, POINTER(None)]
# /usr/include/pulse/version.h:43
pa_get_library_version = _lib.pa_get_library_version
pa_get_library_version.restype = c_char_p
pa_get_library_version.argtypes = list()
PA_API_VERSION = 11 # /usr/include/pulse/version.h:48
PA_PROTOCOL_VERSION = 12 # /usr/include/pulse/version.h:52
# /usr/include/pulse/error.h:37
pa_strerror = _lib.pa_strerror
pa_strerror.restype = c_char_p
pa_strerror.argtypes = [c_int]
# /usr/include/pulse/xmalloc.h:40
pa_xmalloc = _lib.pa_xmalloc
pa_xmalloc.restype = POINTER(c_void)
pa_xmalloc.argtypes = [c_size_t]
# /usr/include/pulse/xmalloc.h:43
pa_xmalloc0 = _lib.pa_xmalloc0
pa_xmalloc0.restype = POINTER(c_void)
pa_xmalloc0.argtypes = [c_size_t]
# /usr/include/pulse/xmalloc.h:46
pa_xrealloc = _lib.pa_xrealloc
pa_xrealloc.restype = POINTER(c_void)
pa_xrealloc.argtypes = [POINTER(None), c_size_t]
# /usr/include/pulse/xmalloc.h:49
pa_xfree = _lib.pa_xfree
pa_xfree.restype = None
pa_xfree.argtypes = [POINTER(None)]
# /usr/include/pulse/xmalloc.h:52
pa_xstrdup = _lib.pa_xstrdup
pa_xstrdup.restype = c_char_p
pa_xstrdup.argtypes = [c_char_p]
# /usr/include/pulse/xmalloc.h:55
pa_xstrndup = _lib.pa_xstrndup
pa_xstrndup.restype = c_char_p
pa_xstrndup.argtypes = [c_char_p, c_size_t]
# /usr/include/pulse/xmalloc.h:58
pa_xmemdup = _lib.pa_xmemdup
pa_xmemdup.restype = POINTER(c_void)
pa_xmemdup.argtypes = [POINTER(None), c_size_t]
# /usr/include/pulse/utf8.h:37
pa_utf8_valid = _lib.pa_utf8_valid
pa_utf8_valid.restype = c_char_p
pa_utf8_valid.argtypes = [c_char_p]
# /usr/include/pulse/utf8.h:40
pa_utf8_filter = _lib.pa_utf8_filter
pa_utf8_filter.restype = c_char_p
pa_utf8_filter.argtypes = [c_char_p]
# /usr/include/pulse/utf8.h:43
pa_utf8_to_locale = _lib.pa_utf8_to_locale
pa_utf8_to_locale.restype = c_char_p
pa_utf8_to_locale.argtypes = [c_char_p]
# /usr/include/pulse/utf8.h:46
pa_locale_to_utf8 = _lib.pa_locale_to_utf8
pa_locale_to_utf8.restype = c_char_p
pa_locale_to_utf8.argtypes = [c_char_p]
class struct_pa_threaded_mainloop(Structure):
__slots__ = [
]
struct_pa_threaded_mainloop._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_threaded_mainloop(Structure):
__slots__ = [
]
struct_pa_threaded_mainloop._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/thread-mainloop.h:242
pa_threaded_mainloop = struct_pa_threaded_mainloop
# /usr/include/pulse/thread-mainloop.h:247
pa_threaded_mainloop_new = _lib.pa_threaded_mainloop_new
pa_threaded_mainloop_new.restype = POINTER(pa_threaded_mainloop)
pa_threaded_mainloop_new.argtypes = list()
# /usr/include/pulse/thread-mainloop.h:252
pa_threaded_mainloop_free = _lib.pa_threaded_mainloop_free
pa_threaded_mainloop_free.restype = None
pa_threaded_mainloop_free.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:255
pa_threaded_mainloop_start = _lib.pa_threaded_mainloop_start
pa_threaded_mainloop_start.restype = c_int
pa_threaded_mainloop_start.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:259
pa_threaded_mainloop_stop = _lib.pa_threaded_mainloop_stop
pa_threaded_mainloop_stop.restype = None
pa_threaded_mainloop_stop.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:267
pa_threaded_mainloop_lock = _lib.pa_threaded_mainloop_lock
pa_threaded_mainloop_lock.restype = None
pa_threaded_mainloop_lock.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:270
pa_threaded_mainloop_unlock = _lib.pa_threaded_mainloop_unlock
pa_threaded_mainloop_unlock.restype = None
pa_threaded_mainloop_unlock.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:279
pa_threaded_mainloop_wait = _lib.pa_threaded_mainloop_wait
pa_threaded_mainloop_wait.restype = None
pa_threaded_mainloop_wait.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:286
pa_threaded_mainloop_signal = _lib.pa_threaded_mainloop_signal
pa_threaded_mainloop_signal.restype = None
pa_threaded_mainloop_signal.argtypes = [POINTER(pa_threaded_mainloop), c_int]
# /usr/include/pulse/thread-mainloop.h:292
pa_threaded_mainloop_accept = _lib.pa_threaded_mainloop_accept
pa_threaded_mainloop_accept.restype = None
pa_threaded_mainloop_accept.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:295
pa_threaded_mainloop_get_retval = _lib.pa_threaded_mainloop_get_retval
pa_threaded_mainloop_get_retval.restype = c_int
pa_threaded_mainloop_get_retval.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:298
pa_threaded_mainloop_get_api = _lib.pa_threaded_mainloop_get_api
pa_threaded_mainloop_get_api.restype = POINTER(pa_mainloop_api)
pa_threaded_mainloop_get_api.argtypes = [POINTER(pa_threaded_mainloop)]
# /usr/include/pulse/thread-mainloop.h:301
pa_threaded_mainloop_in_thread = _lib.pa_threaded_mainloop_in_thread
pa_threaded_mainloop_in_thread.restype = c_int
pa_threaded_mainloop_in_thread.argtypes = [POINTER(pa_threaded_mainloop)]
class struct_pa_mainloop(Structure):
__slots__ = [
]
struct_pa_mainloop._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_mainloop(Structure):
__slots__ = [
]
struct_pa_mainloop._fields_ = [
('_opaque_struct', c_int)
]
pa_mainloop = struct_pa_mainloop # /usr/include/pulse/mainloop.h:79
# /usr/include/pulse/mainloop.h:82
pa_mainloop_new = _lib.pa_mainloop_new
pa_mainloop_new.restype = POINTER(pa_mainloop)
pa_mainloop_new.argtypes = list()
# /usr/include/pulse/mainloop.h:85
pa_mainloop_free = _lib.pa_mainloop_free
pa_mainloop_free.restype = None
pa_mainloop_free.argtypes = [POINTER(pa_mainloop)]
# /usr/include/pulse/mainloop.h:90
pa_mainloop_prepare = _lib.pa_mainloop_prepare
pa_mainloop_prepare.restype = c_int
pa_mainloop_prepare.argtypes = [POINTER(pa_mainloop), c_int]
# /usr/include/pulse/mainloop.h:93
pa_mainloop_poll = _lib.pa_mainloop_poll
pa_mainloop_poll.restype = c_int
pa_mainloop_poll.argtypes = [POINTER(pa_mainloop)]
# /usr/include/pulse/mainloop.h:97
pa_mainloop_dispatch = _lib.pa_mainloop_dispatch
pa_mainloop_dispatch.restype = c_int
pa_mainloop_dispatch.argtypes = [POINTER(pa_mainloop)]
# /usr/include/pulse/mainloop.h:100
pa_mainloop_get_retval = _lib.pa_mainloop_get_retval
pa_mainloop_get_retval.restype = c_int
pa_mainloop_get_retval.argtypes = [POINTER(pa_mainloop)]
# /usr/include/pulse/mainloop.h:108
pa_mainloop_iterate = _lib.pa_mainloop_iterate
pa_mainloop_iterate.restype = c_int
pa_mainloop_iterate.argtypes = [POINTER(pa_mainloop), c_int, POINTER(c_int)]
# /usr/include/pulse/mainloop.h:111
pa_mainloop_run = _lib.pa_mainloop_run
pa_mainloop_run.restype = c_int
pa_mainloop_run.argtypes = [POINTER(pa_mainloop), POINTER(c_int)]
# /usr/include/pulse/mainloop.h:114
pa_mainloop_get_api = _lib.pa_mainloop_get_api
pa_mainloop_get_api.restype = POINTER(pa_mainloop_api)
pa_mainloop_get_api.argtypes = [POINTER(pa_mainloop)]
# /usr/include/pulse/mainloop.h:117
pa_mainloop_quit = _lib.pa_mainloop_quit
pa_mainloop_quit.restype = None
pa_mainloop_quit.argtypes = [POINTER(pa_mainloop), c_int]
# /usr/include/pulse/mainloop.h:120
pa_mainloop_wakeup = _lib.pa_mainloop_wakeup
pa_mainloop_wakeup.restype = None
pa_mainloop_wakeup.argtypes = [POINTER(pa_mainloop)]
class struct_pollfd(Structure):
__slots__ = [
]
struct_pollfd._fields_ = [
('_opaque_struct', c_int)
]
class struct_pollfd(Structure):
__slots__ = [
]
struct_pollfd._fields_ = [
('_opaque_struct', c_int)
]
pa_poll_func = CFUNCTYPE(c_int, POINTER(struct_pollfd), c_ulong, c_int,
POINTER(None)) # /usr/include/pulse/mainloop.h:123
# /usr/include/pulse/mainloop.h:126
pa_mainloop_set_poll_func = _lib.pa_mainloop_set_poll_func
pa_mainloop_set_poll_func.restype = None
pa_mainloop_set_poll_func.argtypes = [POINTER(pa_mainloop), pa_poll_func,
POINTER(None)]
# /usr/include/pulse/mainloop-signal.h:43
pa_signal_init = _lib.pa_signal_init
pa_signal_init.restype = c_int
pa_signal_init.argtypes = [POINTER(pa_mainloop_api)]
# /usr/include/pulse/mainloop-signal.h:46
pa_signal_done = _lib.pa_signal_done
pa_signal_done.restype = None
pa_signal_done.argtypes = list()
class struct_pa_signal_event(Structure):
__slots__ = [
]
struct_pa_signal_event._fields_ = [
('_opaque_struct', c_int)
]
class struct_pa_signal_event(Structure):
__slots__ = [
]
struct_pa_signal_event._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/mainloop-signal.h:49
pa_signal_event = struct_pa_signal_event
# /usr/include/pulse/mainloop-signal.h:52
pa_signal_new = _lib.pa_signal_new
pa_signal_new.restype = POINTER(pa_signal_event)
pa_signal_new.argtypes = [c_int, CFUNCTYPE(None, POINTER(pa_mainloop_api),
POINTER(pa_signal_event), c_int,
POINTER(None)), POINTER(None)]
# /usr/include/pulse/mainloop-signal.h:55
pa_signal_free = _lib.pa_signal_free
pa_signal_free.restype = None
pa_signal_free.argtypes = [POINTER(pa_signal_event)]
# /usr/include/pulse/mainloop-signal.h:58
pa_signal_set_destroy = _lib.pa_signal_set_destroy
pa_signal_set_destroy.restype = None
pa_signal_set_destroy.argtypes = [POINTER(pa_signal_event),
CFUNCTYPE(None, POINTER(pa_mainloop_api),
POINTER(pa_signal_event),
POINTER(None))]
# /usr/include/pulse/util.h:38
pa_get_user_name = _lib.pa_get_user_name
pa_get_user_name.restype = c_char_p
pa_get_user_name.argtypes = [c_char_p, c_size_t]
# /usr/include/pulse/util.h:41
pa_get_host_name = _lib.pa_get_host_name
pa_get_host_name.restype = c_char_p
pa_get_host_name.argtypes = [c_char_p, c_size_t]
# /usr/include/pulse/util.h:44
pa_get_fqdn = _lib.pa_get_fqdn
pa_get_fqdn.restype = c_char_p
pa_get_fqdn.argtypes = [c_char_p, c_size_t]
# /usr/include/pulse/util.h:47
pa_get_home_dir = _lib.pa_get_home_dir
pa_get_home_dir.restype = c_char_p
pa_get_home_dir.argtypes = [c_char_p, c_size_t]
# /usr/include/pulse/util.h:51
pa_get_binary_name = _lib.pa_get_binary_name
pa_get_binary_name.restype = c_char_p
pa_get_binary_name.argtypes = [c_char_p, c_size_t]
# /usr/include/pulse/util.h:55
pa_path_get_filename = _lib.pa_path_get_filename
pa_path_get_filename.restype = c_char_p
pa_path_get_filename.argtypes = [c_char_p]
# /usr/include/pulse/util.h:58
pa_msleep = _lib.pa_msleep
pa_msleep.restype = c_int
pa_msleep.argtypes = [c_ulong]
PA_MSEC_PER_SEC = 1000 # /usr/include/pulse/timeval.h:36
PA_USEC_PER_SEC = 1000000 # /usr/include/pulse/timeval.h:37
PA_NSEC_PER_SEC = 1000000000 # /usr/include/pulse/timeval.h:38
PA_USEC_PER_MSEC = 1000 # /usr/include/pulse/timeval.h:39
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:44
pa_gettimeofday = _lib.pa_gettimeofday
pa_gettimeofday.restype = POINTER(struct_timeval)
pa_gettimeofday.argtypes = [POINTER(struct_timeval)]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:48
pa_timeval_diff = _lib.pa_timeval_diff
pa_timeval_diff.restype = pa_usec_t
pa_timeval_diff.argtypes = [POINTER(struct_timeval), POINTER(struct_timeval)]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:51
pa_timeval_cmp = _lib.pa_timeval_cmp
pa_timeval_cmp.restype = c_int
pa_timeval_cmp.argtypes = [POINTER(struct_timeval), POINTER(struct_timeval)]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:54
pa_timeval_age = _lib.pa_timeval_age
pa_timeval_age.restype = pa_usec_t
pa_timeval_age.argtypes = [POINTER(struct_timeval)]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:57
pa_timeval_add = _lib.pa_timeval_add
pa_timeval_add.restype = POINTER(struct_timeval)
pa_timeval_add.argtypes = [POINTER(struct_timeval), pa_usec_t]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:60
pa_timeval_store = _lib.pa_timeval_store
pa_timeval_store.restype = POINTER(struct_timeval)
pa_timeval_store.argtypes = [POINTER(struct_timeval), pa_usec_t]
class struct_timeval(Structure):
__slots__ = [
]
struct_timeval._fields_ = [
('_opaque_struct', c_int)
]
# /usr/include/pulse/timeval.h:63
pa_timeval_load = _lib.pa_timeval_load
pa_timeval_load.restype = pa_usec_t
pa_timeval_load.argtypes = [POINTER(struct_timeval)]
__all__ = ['pa_mainloop_api', 'pa_io_event_flags_t', 'PA_IO_EVENT_NULL',
'PA_IO_EVENT_INPUT', 'PA_IO_EVENT_OUTPUT', 'PA_IO_EVENT_HANGUP',
'PA_IO_EVENT_ERROR', 'pa_io_event', 'pa_io_event_cb_t',
'pa_io_event_destroy_cb_t', 'pa_time_event', 'pa_time_event_cb_t',
'pa_time_event_destroy_cb_t', 'pa_defer_event',
'pa_defer_event_cb_t',
'pa_defer_event_destroy_cb_t', 'pa_mainloop_api_once',
'PA_CHANNELS_MAX',
'PA_RATE_MAX', 'pa_sample_format_t', 'PA_SAMPLE_U8',
'PA_SAMPLE_ALAW',
'PA_SAMPLE_ULAW', 'PA_SAMPLE_S16LE', 'PA_SAMPLE_S16BE',
'PA_SAMPLE_FLOAT32LE',
'PA_SAMPLE_FLOAT32BE', 'PA_SAMPLE_S32LE', 'PA_SAMPLE_S32BE',
'PA_SAMPLE_MAX',
'PA_SAMPLE_INVALID', 'pa_sample_spec', 'pa_usec_t',
'pa_bytes_per_second',
'pa_frame_size', 'pa_sample_size', 'pa_bytes_to_usec',
'pa_usec_to_bytes',
'pa_sample_spec_valid', 'pa_sample_spec_equal',
'pa_sample_format_to_string',
'pa_parse_sample_format', 'PA_SAMPLE_SPEC_SNPRINT_MAX',
'pa_sample_spec_snprint', 'pa_bytes_snprint', 'pa_context_state_t',
'PA_CONTEXT_UNCONNECTED', 'PA_CONTEXT_CONNECTING',
'PA_CONTEXT_AUTHORIZING',
'PA_CONTEXT_SETTING_NAME', 'PA_CONTEXT_READY', 'PA_CONTEXT_FAILED',
'PA_CONTEXT_TERMINATED', 'pa_stream_state_t',
'PA_STREAM_UNCONNECTED',
'PA_STREAM_CREATING', 'PA_STREAM_READY', 'PA_STREAM_FAILED',
'PA_STREAM_TERMINATED', 'pa_operation_state_t',
'PA_OPERATION_RUNNING',
'PA_OPERATION_DONE', 'PA_OPERATION_CANCELED', 'pa_context_flags_t',
'PA_CONTEXT_NOAUTOSPAWN', 'pa_stream_direction_t',
'PA_STREAM_NODIRECTION',
'PA_STREAM_PLAYBACK', 'PA_STREAM_RECORD', 'PA_STREAM_UPLOAD',
'pa_stream_flags_t', 'PA_STREAM_START_CORKED',
'PA_STREAM_INTERPOLATE_TIMING',
'PA_STREAM_NOT_MONOTONOUS', 'PA_STREAM_AUTO_TIMING_UPDATE',
'PA_STREAM_NO_REMAP_CHANNELS', 'PA_STREAM_NO_REMIX_CHANNELS',
'PA_STREAM_FIX_FORMAT', 'PA_STREAM_FIX_RATE',
'PA_STREAM_FIX_CHANNELS',
'PA_STREAM_DONT_MOVE', 'PA_STREAM_VARIABLE_RATE', 'pa_buffer_attr',
'pa_subscription_mask_t', 'PA_SUBSCRIPTION_MASK_NULL',
'PA_SUBSCRIPTION_MASK_SINK', 'PA_SUBSCRIPTION_MASK_SOURCE',
'PA_SUBSCRIPTION_MASK_SINK_INPUT',
'PA_SUBSCRIPTION_MASK_SOURCE_OUTPUT',
'PA_SUBSCRIPTION_MASK_MODULE', 'PA_SUBSCRIPTION_MASK_CLIENT',
'PA_SUBSCRIPTION_MASK_SAMPLE_CACHE', 'PA_SUBSCRIPTION_MASK_SERVER',
'PA_SUBSCRIPTION_MASK_AUTOLOAD', 'PA_SUBSCRIPTION_MASK_ALL',
'pa_subscription_event_type_t', 'PA_SUBSCRIPTION_EVENT_SINK',
'PA_SUBSCRIPTION_EVENT_SOURCE', 'PA_SUBSCRIPTION_EVENT_SINK_INPUT',
'PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT',
'PA_SUBSCRIPTION_EVENT_MODULE',
'PA_SUBSCRIPTION_EVENT_CLIENT', 'PA_SUBSCRIPTION_EVENT_SAMPLE_CACHE',
'PA_SUBSCRIPTION_EVENT_SERVER', 'PA_SUBSCRIPTION_EVENT_AUTOLOAD',
'PA_SUBSCRIPTION_EVENT_FACILITY_MASK', 'PA_SUBSCRIPTION_EVENT_NEW',
'PA_SUBSCRIPTION_EVENT_CHANGE', 'PA_SUBSCRIPTION_EVENT_REMOVE',
'PA_SUBSCRIPTION_EVENT_TYPE_MASK', 'pa_timing_info', 'pa_spawn_api',
'pa_seek_mode_t', 'PA_SEEK_RELATIVE', 'PA_SEEK_ABSOLUTE',
'PA_SEEK_RELATIVE_ON_READ', 'PA_SEEK_RELATIVE_END',
'pa_sink_flags_t',
'PA_SINK_HW_VOLUME_CTRL', 'PA_SINK_LATENCY', 'PA_SINK_HARDWARE',
'PA_SINK_NETWORK', 'pa_source_flags_t', 'PA_SOURCE_HW_VOLUME_CTRL',
'PA_SOURCE_LATENCY', 'PA_SOURCE_HARDWARE', 'PA_SOURCE_NETWORK',
'pa_free_cb_t', 'pa_operation', 'pa_operation_ref',
'pa_operation_unref',
'pa_operation_cancel', 'pa_operation_get_state', 'pa_context',
'pa_context_notify_cb_t', 'pa_context_success_cb_t',
'pa_context_new',
'pa_context_unref', 'pa_context_ref',
'pa_context_set_state_callback',
'pa_context_errno', 'pa_context_is_pending', 'pa_context_get_state',
'pa_context_connect', 'pa_context_disconnect', 'pa_context_drain',
'pa_context_exit_daemon', 'pa_context_set_default_sink',
'pa_context_set_default_source', 'pa_context_is_local',
'pa_context_set_name',
'pa_context_get_server', 'pa_context_get_protocol_version',
'pa_context_get_server_protocol_version', 'pa_channel_position_t',
'PA_CHANNEL_POSITION_INVALID', 'PA_CHANNEL_POSITION_MONO',
'PA_CHANNEL_POSITION_LEFT', 'PA_CHANNEL_POSITION_RIGHT',
'PA_CHANNEL_POSITION_CENTER', 'PA_CHANNEL_POSITION_FRONT_LEFT',
'PA_CHANNEL_POSITION_FRONT_RIGHT',
'PA_CHANNEL_POSITION_FRONT_CENTER',
'PA_CHANNEL_POSITION_REAR_CENTER', 'PA_CHANNEL_POSITION_REAR_LEFT',
'PA_CHANNEL_POSITION_REAR_RIGHT', 'PA_CHANNEL_POSITION_LFE',
'PA_CHANNEL_POSITION_SUBWOOFER',
'PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER',
'PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER',
'PA_CHANNEL_POSITION_SIDE_LEFT',
'PA_CHANNEL_POSITION_SIDE_RIGHT', 'PA_CHANNEL_POSITION_AUX0',
'PA_CHANNEL_POSITION_AUX1', 'PA_CHANNEL_POSITION_AUX2',
'PA_CHANNEL_POSITION_AUX3', 'PA_CHANNEL_POSITION_AUX4',
'PA_CHANNEL_POSITION_AUX5', 'PA_CHANNEL_POSITION_AUX6',
'PA_CHANNEL_POSITION_AUX7', 'PA_CHANNEL_POSITION_AUX8',
'PA_CHANNEL_POSITION_AUX9', 'PA_CHANNEL_POSITION_AUX10',
'PA_CHANNEL_POSITION_AUX11', 'PA_CHANNEL_POSITION_AUX12',
'PA_CHANNEL_POSITION_AUX13', 'PA_CHANNEL_POSITION_AUX14',
'PA_CHANNEL_POSITION_AUX15', 'PA_CHANNEL_POSITION_AUX16',
'PA_CHANNEL_POSITION_AUX17', 'PA_CHANNEL_POSITION_AUX18',
'PA_CHANNEL_POSITION_AUX19', 'PA_CHANNEL_POSITION_AUX20',
'PA_CHANNEL_POSITION_AUX21', 'PA_CHANNEL_POSITION_AUX22',
'PA_CHANNEL_POSITION_AUX23', 'PA_CHANNEL_POSITION_AUX24',
'PA_CHANNEL_POSITION_AUX25', 'PA_CHANNEL_POSITION_AUX26',
'PA_CHANNEL_POSITION_AUX27', 'PA_CHANNEL_POSITION_AUX28',
'PA_CHANNEL_POSITION_AUX29', 'PA_CHANNEL_POSITION_AUX30',
'PA_CHANNEL_POSITION_AUX31', 'PA_CHANNEL_POSITION_TOP_CENTER',
'PA_CHANNEL_POSITION_TOP_FRONT_LEFT',
'PA_CHANNEL_POSITION_TOP_FRONT_RIGHT',
'PA_CHANNEL_POSITION_TOP_FRONT_CENTER',
'PA_CHANNEL_POSITION_TOP_REAR_LEFT',
'PA_CHANNEL_POSITION_TOP_REAR_RIGHT',
'PA_CHANNEL_POSITION_TOP_REAR_CENTER',
'PA_CHANNEL_POSITION_MAX', 'pa_channel_map_def_t',
'PA_CHANNEL_MAP_AIFF',
'PA_CHANNEL_MAP_ALSA', 'PA_CHANNEL_MAP_AUX', 'PA_CHANNEL_MAP_WAVEEX',
'PA_CHANNEL_MAP_OSS', 'PA_CHANNEL_MAP_DEFAULT', 'pa_channel_map',
'pa_channel_map_init', 'pa_channel_map_init_mono',
'pa_channel_map_init_stereo', 'pa_channel_map_init_auto',
'pa_channel_position_to_string',
'pa_channel_position_to_pretty_string',
'PA_CHANNEL_MAP_SNPRINT_MAX', 'pa_channel_map_snprint',
'pa_channel_map_parse', 'pa_channel_map_equal',
'pa_channel_map_valid',
'pa_volume_t', 'PA_VOLUME_NORM', 'PA_VOLUME_MUTED', 'pa_cvolume',
'pa_cvolume_equal', 'pa_cvolume_set', 'PA_CVOLUME_SNPRINT_MAX',
'pa_cvolume_snprint', 'pa_cvolume_avg', 'pa_cvolume_valid',
'pa_cvolume_channels_equal_to', 'pa_sw_volume_multiply',
'pa_sw_cvolume_multiply', 'pa_sw_volume_from_dB',
'pa_sw_volume_to_dB',
'pa_sw_volume_from_linear', 'pa_sw_volume_to_linear',
'PA_DECIBEL_MININFTY',
'pa_stream', 'pa_stream_success_cb_t', 'pa_stream_request_cb_t',
'pa_stream_notify_cb_t', 'pa_stream_new', 'pa_stream_unref',
'pa_stream_ref',
'pa_stream_get_state', 'pa_stream_get_context',
'pa_stream_get_index',
'pa_stream_get_device_index', 'pa_stream_get_device_name',
'pa_stream_is_suspended', 'pa_stream_connect_playback',
'pa_stream_connect_record', 'pa_stream_disconnect',
'pa_stream_write',
'pa_stream_peek', 'pa_stream_drop', 'pa_stream_writable_size',
'pa_stream_readable_size', 'pa_stream_drain',
'pa_stream_update_timing_info',
'pa_stream_set_state_callback', 'pa_stream_set_write_callback',
'pa_stream_set_read_callback', 'pa_stream_set_overflow_callback',
'pa_stream_set_underflow_callback',
'pa_stream_set_latency_update_callback',
'pa_stream_set_moved_callback', 'pa_stream_set_suspended_callback',
'pa_stream_cork', 'pa_stream_flush', 'pa_stream_prebuf',
'pa_stream_trigger',
'pa_stream_set_name', 'pa_stream_get_time', 'pa_stream_get_latency',
'pa_stream_get_timing_info', 'pa_stream_get_sample_spec',
'pa_stream_get_channel_map', 'pa_stream_get_buffer_attr',
'pa_stream_set_buffer_attr', 'pa_stream_update_sample_rate',
'pa_sink_info',
'pa_sink_info_cb_t', 'pa_context_get_sink_info_by_name',
'pa_context_get_sink_info_by_index', 'pa_context_get_sink_info_list',
'pa_source_info', 'pa_source_info_cb_t',
'pa_context_get_source_info_by_name',
'pa_context_get_source_info_by_index',
'pa_context_get_source_info_list',
'pa_server_info', 'pa_server_info_cb_t',
'pa_context_get_server_info',
'pa_module_info', 'pa_module_info_cb_t',
'pa_context_get_module_info',
'pa_context_get_module_info_list', 'pa_client_info',
'pa_client_info_cb_t',
'pa_context_get_client_info', 'pa_context_get_client_info_list',
'pa_sink_input_info', 'pa_sink_input_info_cb_t',
'pa_context_get_sink_input_info',
'pa_context_get_sink_input_info_list',
'pa_source_output_info', 'pa_source_output_info_cb_t',
'pa_context_get_source_output_info',
'pa_context_get_source_output_info_list',
'pa_context_set_sink_volume_by_index',
'pa_context_set_sink_volume_by_name',
'pa_context_set_sink_mute_by_index',
'pa_context_set_sink_mute_by_name',
'pa_context_set_sink_input_volume', 'pa_context_set_sink_input_mute',
'pa_context_set_source_volume_by_index',
'pa_context_set_source_volume_by_name',
'pa_context_set_source_mute_by_index',
'pa_context_set_source_mute_by_name', 'pa_stat_info',
'pa_stat_info_cb_t',
'pa_context_stat', 'pa_sample_info', 'pa_sample_info_cb_t',
'pa_context_get_sample_info_by_name',
'pa_context_get_sample_info_by_index',
'pa_context_get_sample_info_list', 'pa_context_kill_client',
'pa_context_kill_sink_input', 'pa_context_kill_source_output',
'pa_context_index_cb_t', 'pa_context_load_module',
'pa_context_unload_module',
'pa_autoload_type_t', 'PA_AUTOLOAD_SINK', 'PA_AUTOLOAD_SOURCE',
'pa_autoload_info', 'pa_autoload_info_cb_t',
'pa_context_get_autoload_info_by_name',
'pa_context_get_autoload_info_by_index',
'pa_context_get_autoload_info_list',
'pa_context_add_autoload', 'pa_context_remove_autoload_by_name',
'pa_context_remove_autoload_by_index',
'pa_context_move_sink_input_by_name',
'pa_context_move_sink_input_by_index',
'pa_context_move_source_output_by_name',
'pa_context_move_source_output_by_index',
'pa_context_suspend_sink_by_name',
'pa_context_suspend_sink_by_index',
'pa_context_suspend_source_by_name',
'pa_context_suspend_source_by_index', 'pa_context_subscribe_cb_t',
'pa_context_subscribe', 'pa_context_set_subscribe_callback',
'pa_stream_connect_upload', 'pa_stream_finish_upload',
'pa_context_play_sample', 'pa_context_remove_sample',
'pa_get_library_version', 'PA_API_VERSION', 'PA_PROTOCOL_VERSION',
'pa_strerror', 'pa_xmalloc', 'pa_xmalloc0', 'pa_xrealloc',
'pa_xfree',
'pa_xstrdup', 'pa_xstrndup', 'pa_xmemdup', 'pa_utf8_valid',
'pa_utf8_filter',
'pa_utf8_to_locale', 'pa_locale_to_utf8', 'pa_threaded_mainloop',
'pa_threaded_mainloop_new', 'pa_threaded_mainloop_free',
'pa_threaded_mainloop_start', 'pa_threaded_mainloop_stop',
'pa_threaded_mainloop_lock', 'pa_threaded_mainloop_unlock',
'pa_threaded_mainloop_wait', 'pa_threaded_mainloop_signal',
'pa_threaded_mainloop_accept', 'pa_threaded_mainloop_get_retval',
'pa_threaded_mainloop_get_api', 'pa_threaded_mainloop_in_thread',
'pa_mainloop', 'pa_mainloop_new', 'pa_mainloop_free',
'pa_mainloop_prepare',
'pa_mainloop_poll', 'pa_mainloop_dispatch', 'pa_mainloop_get_retval',
'pa_mainloop_iterate', 'pa_mainloop_run', 'pa_mainloop_get_api',
'pa_mainloop_quit', 'pa_mainloop_wakeup', 'pa_poll_func',
'pa_mainloop_set_poll_func', 'pa_signal_init', 'pa_signal_done',
'pa_signal_event', 'pa_signal_new', 'pa_signal_free',
'pa_signal_set_destroy',
'pa_get_user_name', 'pa_get_host_name', 'pa_get_fqdn',
'pa_get_home_dir',
'pa_get_binary_name', 'pa_path_get_filename', 'pa_msleep',
'PA_MSEC_PER_SEC',
'PA_USEC_PER_SEC', 'PA_NSEC_PER_SEC', 'PA_USEC_PER_MSEC',
'pa_gettimeofday',
'pa_timeval_diff', 'pa_timeval_cmp', 'pa_timeval_age',
'pa_timeval_add',
'pa_timeval_store', 'pa_timeval_load']
| {
"content_hash": "2b9a9828886c1abf2b53920289e3a9c5",
"timestamp": "",
"source": "github",
"line_count": 2507,
"max_line_length": 88,
"avg_line_length": 36.81053051455923,
"alnum_prop": 0.6393632699059425,
"repo_name": "bitcraft/pyglet",
"id": "970487f5faad1554826730c3da20c4d7365073db",
"size": "92284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/experimental/mt_media/drivers/pulse/lib_pulseaudio.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1828"
},
{
"name": "HTML",
"bytes": "1652"
},
{
"name": "JavaScript",
"bytes": "6745"
},
{
"name": "PHP",
"bytes": "2192"
},
{
"name": "Python",
"bytes": "6201398"
},
{
"name": "Shell",
"bytes": "251"
}
],
"symlink_target": ""
} |
from multiprocessing.connection import Listener, Client
from os import path, environ
import datetime
address = path.join(environ['XDG_RUNTIME_DIR'], 'i3timer')
authkey = bytes(environ['XDG_SESSION_COOKIE'], 'ascii')
class Timer(object):
def __init__(self):
self.stop()
@property
def current(self):
if self.accumulator is None:
return self.running
r = self.running
if r is None:
return self.accumulator
else:
return self.accumulator + r
@property
def running(self):
return datetime.datetime.now() - self.last_start if self.last_start is not None else None
@property
def is_running(self):
return self.last_start is not None
def get_state(self):
return {'current': self.current, 'running': self.running}
def start(self):
self.last_start = datetime.datetime.now()
def pause(self):
self.accumulator = self.current
self.last_start = None
def toggle(self):
if self.is_running:
self.pause()
else:
self.start()
def stop(self):
self.last_start = None
self.accumulator = None
def listen_forever():
timer = Timer()
with Listener(address=address, authkey=authkey) as listener:
while True:
conn = listener.accept()
try:
message = conn.recv()
conn.send(getattr(timer, message)())
conn.close()
except EOFError as e:
pass
except Exception as e:
print('exception occurred: %s' % e)
class TimerClient(object):
def __init__(self):
pass
def execute(self, command):
client = Client(address=address, authkey=authkey)
client.send(command)
return client.recv()
@property
def state(self):
return self.execute('get_state')
| {
"content_hash": "71e2b1bd8910b7c9de78def3ca50c21b",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 97,
"avg_line_length": 21.7,
"alnum_prop": 0.5765488991295443,
"repo_name": "sniegu/i3timer",
"id": "e2557b316408bf913736ef95376f8b832d193290",
"size": "1968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "i3timer/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4669"
}
],
"symlink_target": ""
} |
import os
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
from app import app, db
#app.config.from_object(os.environ['APP_SETTINGS'])
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run() | {
"content_hash": "fdc0e0889f972042cd291f79573c44e7",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 53,
"avg_line_length": 18.823529411764707,
"alnum_prop": 0.721875,
"repo_name": "sotdjin/glibglab",
"id": "8583eb13d1f1bb98886d2659ffa44b1c0f604e55",
"size": "320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "164971"
},
{
"name": "HTML",
"bytes": "35613"
},
{
"name": "JavaScript",
"bytes": "10310"
},
{
"name": "Mako",
"bytes": "9463"
},
{
"name": "PowerShell",
"bytes": "468"
},
{
"name": "Python",
"bytes": "9281018"
},
{
"name": "Shell",
"bytes": "3228"
}
],
"symlink_target": ""
} |
from scipy import linspace
from scipy.stats import norm
import numpy as np
import math
def densitiesPlot( densities, unit, legend = None ):
import matplotlib.pyplot as plt
L = len( densities[0] )/2
pts = symmetric_lattice( L=L, unit=unit )
for density in densities:
plt.plot( pts, density )
if legend is not None:
plt.legend( legend )
plt.grid()
plt.show()
def integer_shift( cdf, k ):
""" Shift cdf to the *right* so it represents the cdf for Y ~ X + k*unit
:param cdf:
:param k: int Number of lattice points
:return:
"""
if k < 0:
return np.append( cdf[ abs(k):], cdf[-1]*np.ones( abs(k) ) )
elif k==0:
return cdf
else:
return np.append( np.zeros(k), cdf[:-k] )
def fractional_shift( cdf, x ):
""" Shift cdf to the *right* so it represents the cdf for Y ~ X + x*unit
:param cdf:
:param x: float Number of lattice points to shift (need not be integer)
:return:
"""
( l, lc), ( u, uc) = _low_high( x )
return lc * integer_shift( cdf, l ) + uc * integer_shift( cdf, u )
def _low_high( offset ):
l = math.floor( offset )
u = math.ceil( offset )
r = offset - l
return (l, 1-r ), (u, r)
def fractional_shift_density( density, x ):
""" Shift pdf to the *right* so it represents the pdf for Y ~ X + x*unit """
cdf = pdf_to_cdf(density)
shifted_cdf = fractional_shift( cdf, x )
return cdf_to_pdf(shifted_cdf)
return shifted_pdf
def center_density( density ):
""" Shift density to near its mean """
m = mean_of_density(density, unit=1.0)
return fractional_shift_density( density, -m )
def mean_of_density( density, unit ):
L = len( density) /2
pts = symmetric_lattice( L=L, unit=unit )
return np.inner( density, pts )
def symmetric_lattice(L, unit):
return unit*linspace( -L, L, 2*L+1 )
#############################################
# Simple family of skewed distributions #
#############################################
def skew_normal_density( L, unit, loc=0, scale=1.0, a=2.0):
""" Skew normal as a lattice density """
lattice = symmetric_lattice(L=L, unit=unit)
density = np.array( [ _unnormalized_skew_cdf(x, loc=loc, scale=scale, a=a ) for x in lattice])
density = density / np.sum( density )
return density
def _unnormalized_skew_cdf(x, loc=0, scale=1, a=2.0):
""" Proportional to skew-normal density
:param x:
:param loc: location
:param scale: scale
:param a: controls skew (a>0 means fat tail on right)
:return: np.array length 2*L+1
"""
t = (x-loc) / scale
return 2 / scale * norm.pdf(t) * norm.cdf(a*t)
# Probabilities on lattices
# Nothing below here depends on the unit chosen
#############################################
# Order statistics on lattices #
#############################################
def pdf_to_cdf( density ):
""" Prob( X <= k*unit ) """
return np.cumsum( density )
def cdf_to_pdf(cumulative):
""" Given cumulative distribution on lattice, return the pdf """
prepended = np.insert( cumulative, 0, 0.)
return np.diff( prepended )
def winner_of_many( densities, multiplicities = None):
""" The PDF of the minimum of the random variables represented by densities
:param densities: [ np.array ]
:return: np.array
"""
d = densities[0]
multiplicities = multiplicities or [ None for _ in densities ]
m = multiplicities[0]
for d2, m2 in zip( densities[1:], multiplicities[1:] ):
d, m = _winner_of_two_pdf( d, d2, multiplicityA=m, multiplicityB = m2 )
return d, m
def sample_from_cdf( cdf, nSamples ):
""" Monte Carlo sample """
rvs = np.random.rand( nSamples )
return [ sum( [ rv>c for c in cdf ] ) for rv in rvs ]
def sample_winner_of_many( densities, nSamples = 5000 ):
""" The PDF of the minimum of the integer random variables represented by densities, by Monte Carlo """
cdfs = [pdf_to_cdf(density) for density in densities]
cols = [sample_from_cdf(cdf, nSamples) for cdf in cdfs]
rows = map( list, zip( *cols ))
D = [ min( row ) for row in rows ]
density = np.bincount( D, minlength=len( densities[0] ) ) / (1.0*nSamples)
return density
def expected_payoff( density, densityAll, multiplicityAll, cdf = None, cdfAll = None):
""" Returns expected _conditional_payoff_against_rest broken down by score,
where _conditional_payoff_against_rest is 1 if we are better than rest (lower) and 1/(1+multiplicity) if we are equal
"""
# Use np.sum( expected_payoff ) for the expectation
if cdf is None:
cdf = pdf_to_cdf(density)
if cdfAll is None:
cdfAll = pdf_to_cdf(densityAll)
if density is None:
density = cdf_to_pdf(cdf)
if densityAll is None:
densityAll = cdf_to_pdf(cdfAll)
S = 1 - cdfAll
S1 = 1 - cdf
Srest = ( S + 1e-18 ) / ( S1 + 1e-6 )
cdfRest = 1 - Srest
# Multiplicity inversion (uses notation from blog post)
# This is written up in my blog post
m = multiplicityAll
f1 = density
m1 = 1.0
fRest = cdf_to_pdf(cdfRest)
# numer = m*f1*Srest + m*(f1+S1)*fRest - m1*f1*( Srest + fRest )
# denom = fRest*(f1+S1)
numer = m*f1*Srest + m*(f1+S1)*fRest - m1*f1*( Srest + fRest )
denom = fRest*(f1+S1)
multiplicityLeftTail = (1e-18 + numer ) / ( 1e-18 + denom )
multiplicityRest = multiplicityLeftTail
T1 = (S1 +1.0e-18) / (f1 + 1e-6 ) # This calculation is more stable on the right tail. It should tend to zero eventually
Trest = (Srest + 1e-18) / ( fRest + 1e-6 )
multiplicityRightTail = m*Trest / (1 + T1) + m - m1 * (1 + Trest ) / (1 + T1 )
k = list( f1 == max(f1) ).index( True )
multiplicityRest[k:] = multiplicityRightTail[k:]
return _conditional_payoff_against_rest(density = density, densityRest = None, multiplicityRest = multiplicityRest, cdf = cdf, cdfRest = cdfRest)
def _winner_of_two_pdf( densityA, densityB, multiplicityA = None, multiplicityB = None, cdfA = None, cdfB = None):
""" The PDF of the minimum of two random variables represented by densities
:param densityA: np.array
:param densityB: np.array
:return: density, multiplicity
"""
cdfA = pdf_to_cdf(densityA)
cdfB = pdf_to_cdf(densityB)
cdfMin = 1 - np.multiply( 1 - cdfA, 1- cdfB )
density = cdf_to_pdf(cdfMin)
L = len( density ) / 2
if multiplicityA is None:
multiplicityA = np.ones( 2*L+1 )
if multiplicityB is None:
multiplicityB = np.ones( 2*L+1 )
winA, draw, winB = _conditional_win_draw_loss( densityA, densityB, cdfA, cdfB )
multiplicity = ( winA*multiplicityA + draw*(multiplicityA+multiplicityB) + winB*multiplicityB +1e-18) / ( winA+draw+winB+1e-18)
return density, multiplicity
def _conditional_win_draw_loss(densityA, densityB, cdfA, cdfB):
""" Conditional win, draw and loss probability lattices for a two horse race """
win = densityA * ( 1 - cdfB )
draw = densityA * densityB
lose = densityB * ( 1 - cdfA )
return win, draw, lose
def _conditional_payoff_against_rest(density, densityRest, multiplicityRest, cdf = None, cdfRest = None):
""" Returns expected _conditional_payoff_against_rest broken down by score, where _conditional_payoff_against_rest is 1 if we are better than rest (lower) and 1/(1+multiplicity) if we are equal """
# use np.sum( _conditional_payoff_against_rest) for the expectation
if cdf is None:
cdf = pdf_to_cdf(density)
if cdfRest is None:
cdfRest = pdf_to_cdf(densityRest)
if density is None:
density = cdf_to_pdf(cdf)
if densityRest is None:
densityRest = cdf_to_pdf(cdfRest)
win, draw, loss = _conditional_win_draw_loss(density, densityRest, cdf, cdfRest)
return win + draw / (1+multiplicityRest )
def densities_and_coefs_from_offsets( density, offsets ):
""" Given a density and a list of offsets (which might be non-integer)
:param density: np.ndarray
:param offsets: [ float ]
:return: [ np.ndarray ]
"""
cdf = pdf_to_cdf(density)
coefs = [ _low_high( offset ) for offset in offsets ]
cdfs = [lc * integer_shift(cdf, l) + uc * integer_shift(cdf, u) for (l, lc), (u, uc) in coefs]
return [cdf_to_pdf(cdf) for cdf in cdfs], coefs
def densities_from_offsets( density, offsets ):
return densities_and_coefs_from_offsets( density, offsets)[0]
def state_prices_from_offsets( density, offsets ):
densities = densities_from_offsets( density, offsets )
densityAll, multiplicityAll = winner_of_many( densities )
return implicit_state_prices( density, densityAll = densityAll, multiplicityAll=multiplicityAll, offsets=offsets )
def implicit_state_prices( density, densityAll, multiplicityAll = None, cdf = None, cdfAll = None, offsets = None ):
""" Returns the expected _conditional_payoff_against_rest as a function of location changes in cdf """
L = len( density )/2
if cdf is None:
cdf = pdf_to_cdf( density )
if cdfAll is None:
cdfAll = pdf_to_cdf(densityAll)
if multiplicityAll is None:
multiplicityAll = np.ones( 2*L+1 )
if offsets is None:
offsets = xrange( -L/2, L/2 )
implicit = list()
for k in offsets:
if k==int( k ):
offset_cdf = integer_shift( cdf, k )
ip = expected_payoff( density = None, densityAll = densityAll, multiplicityAll=multiplicityAll, cdf = offset_cdf, cdfAll = cdfAll)
implicit.append( np.sum( ip ) )
else:
(l, l_coef ), ( r, r_coef) = _low_high( k )
offset_cdf_left = integer_shift(cdf, l)
offset_cdf_right = integer_shift(cdf, r)
ip_left = expected_payoff(density = None, densityAll = densityAll, multiplicityAll=multiplicityAll, cdf = offset_cdf_left, cdfAll = cdfAll)
ip_right = expected_payoff(density = None, densityAll = densityAll, multiplicityAll=multiplicityAll, cdf = offset_cdf_right, cdfAll = cdfAll)
implicit.append( l_coef*np.sum( ip_left ) + r_coef*np.sum( ip_right ) )
return implicit
| {
"content_hash": "4456289e01b1220d6796d23765c98168",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 201,
"avg_line_length": 39.923371647509576,
"alnum_prop": 0.608637236084453,
"repo_name": "notbanker/pysport",
"id": "c31b632de963495286ddbeac0f25079e082cf5b8",
"size": "10420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysport/horseracing/lattice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31589"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.conf.urls import patterns, include, url
urlpatterns = patterns('members.views',
url(r'^$', 'members_home', name="home"),
url(r'^login/$', 'members_login', name="login"),
url(r'^cancel/$', 'members_cancel', name="cancel"),
url(r'^purchase/(\S+)/$', 'purchase', name="purchase"),
url(r'^charge/(\S+)/$', 'charge', name="charge"),
url(r'^edit-name/$', 'edit_name', name="edit-name"),
url(r'^paypal-success/$', 'paypal_success', name="paypal-success"),
url(r'^support/$', 'support', name="support"),
)
| {
"content_hash": "566944fe6243c1638d2fba0731910431",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 69,
"avg_line_length": 43.07692307692308,
"alnum_prop": 0.6339285714285714,
"repo_name": "pizzapanther/Super-Neutron-Drive",
"id": "174714b94b3b7402e1de540311571190a42aa584",
"size": "560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/members/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "139458"
},
{
"name": "HTML",
"bytes": "101330"
},
{
"name": "JavaScript",
"bytes": "851908"
},
{
"name": "Python",
"bytes": "79810"
},
{
"name": "Shell",
"bytes": "4847"
}
],
"symlink_target": ""
} |
"""
Admin support code for DurationFields.
"""
import ttcal
from django.forms.fields import Field
from django.forms import ValidationError
from django.forms.utils import flatatt
from django.forms.widgets import TextInput
from django.utils.safestring import mark_safe
from django.utils.encoding import force_text
class DurationInput(TextInput):
"""Duration input widget.
"""
def render(self, name, value, attrs=None):
"""output.append(u'<li>%(cb)s<label%(for)s>%(label)s</label></li>' %
{"for": label_for, "label": option_label, "cb": rendered_cb})
"""
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
if isinstance(value, int):
# Database backends serving different types
value = ttcal.Duration(seconds=value)
# Otherwise, we've got a timedelta already
final_attrs['value'] = force_text(value)
return mark_safe(u'<input%s />' % flatatt(final_attrs))
class DurationField(Field):
"""Form field for DurationField custom database field.
"""
widget = DurationInput
def __init__(self, *args, **kwargs):
super(DurationField, self).__init__(*args, **kwargs)
def clean(self, value):
"""Returns a datetime.timedelta object.
"""
super(DurationField, self).clean(value)
try:
return ttcal.Duration.parse(value)
except (ValueError, TypeError):
raise ValidationError('Enter a valid duration.')
def to_python(self, value): # pylint:disable=R0201
"""Convert form input to python value.
"""
try:
return ttcal.Duration.parse(value)
except (ValueError, TypeError):
raise ValidationError('Enter a valid duration.')
| {
"content_hash": "6ba98c50942f767f722c0ae76355489d",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 78,
"avg_line_length": 33.51724137931034,
"alnum_prop": 0.6198559670781894,
"repo_name": "datakortet/dkmodelfields",
"id": "a1b776fc9a4c28f368fad42d61d7698830ed781c",
"size": "1944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dkmodelfields/adminforms/durationfield.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66973"
}
],
"symlink_target": ""
} |
import scrapy
from scrapy.loader.processors import Join, MapCompose, TakeFirst
from w3lib.html import remove_tags
from classics_spider.utils import Sanitizer
class ClassicsSpiderItem(scrapy.Item):
post_id = scrapy.Field(
input_processor=MapCompose(remove_tags),
output_processor=Join(),
)
post_author = scrapy.Field(
input_processor=MapCompose(remove_tags, Sanitizer.trim),
output_processor=Join(),
)
post_datetime = scrapy.Field(
input_processor=MapCompose(remove_tags, Sanitizer.extract_date),
output_processor=Join(),
)
post_content = scrapy.Field(
input_processor=MapCompose(remove_tags, Sanitizer.extract_content),
output_processor=Join(),
)
| {
"content_hash": "3fcfe1813f860af99ae955221657e359",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 75,
"avg_line_length": 31.083333333333332,
"alnum_prop": 0.6943699731903485,
"repo_name": "adaschevici/classics_crawler",
"id": "b543098f382296a4926ee5174856431b9b99214b",
"size": "898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classics_spider/items.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15446"
}
],
"symlink_target": ""
} |
from decimal import Decimal
from typing import List, Union
from ...asset import Asset
from ...call_builder.base import BaseStrictReceivePathsCallBuilder
from ...call_builder.call_builder_sync.base_call_builder import BaseCallBuilder
from ...client.base_sync_client import BaseSyncClient
from ...type_checked import type_checked
__all__ = ["StrictReceivePathsCallBuilder"]
@type_checked
class StrictReceivePathsCallBuilder(BaseCallBuilder, BaseStrictReceivePathsCallBuilder):
"""Creates a new :class:`StrictReceivePathsCallBuilder` pointed to server defined by horizon_url.
Do not create this object directly, use :func:`stellar_sdk.Server.strict_receive_paths`.
The Stellar Network allows payments to be made across assets through path payments. A path payment specifies a
series of assets to route a payment through, from source asset (the asset debited from the payer) to destination
asset (the asset credited to the payee).
A path search is specified using:
- The source address or source assets.
- The asset and amount that the destination account should receive.
As part of the search, horizon will load a list of assets available to the
source address and will find any payment paths from those source assets to
the desired destination asset. The search's amount parameter will be used to
determine if there a given path can satisfy a payment of the desired amount.
If a list of assets is passed as the source, horizon will find any payment
paths from those source assets to the desired destination asset.
See `List Strict Receive Payment Paths <https://developers.stellar.org/api/aggregations/paths/strict-receive/>`__ for more information.
:param horizon_url: Horizon server URL.
:param client: The client instance used to send request.
:param source: The sender's account ID or a list of Assets. Any returned path must use a source that the sender can hold.
:param destination_asset: The destination asset.
:param destination_amount: The amount, denominated in the destination asset, that any returned path should be able to satisfy.
"""
def __init__(
self,
horizon_url: str,
client: BaseSyncClient,
source: Union[str, List[Asset]],
destination_asset: Asset,
destination_amount: Union[str, Decimal],
) -> None:
super().__init__( # type: ignore[call-arg]
horizon_url=horizon_url,
client=client,
source=source,
destination_asset=destination_asset,
destination_amount=destination_amount,
)
| {
"content_hash": "de1ed52050fc244ac716b4e33675cb7f",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 139,
"avg_line_length": 45.258620689655174,
"alnum_prop": 0.7272380952380952,
"repo_name": "StellarCN/py-stellar-base",
"id": "e0efc568a69e0d07278db80aa036082dac6451ed",
"size": "2625",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "stellar_sdk/call_builder/call_builder_sync/strict_receive_paths_call_builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1737"
},
{
"name": "Makefile",
"bytes": "1085"
},
{
"name": "Python",
"bytes": "2044193"
},
{
"name": "RPC",
"bytes": "76503"
}
],
"symlink_target": ""
} |
"""Isomap for manifold learning"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD, (C) 2011
import numpy as np
from ..base import BaseEstimator
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
out_dim : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : attempt to choose the most efficient solver
for the given problem.
'arpack' : use Arnoldi decomposition to find the eigenvalues
and eigenvectors. Note that arpack can handle both dense
and sparse data efficiently
'dense' : use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'
max_iter : integer
maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'
path_method : string ['auto'|'FW'|'D']
method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically
'FW' : Floyd-Warshall algorithm
'D' : Dijkstra algorithm with Fibonacci Heaps
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance
Attributes
----------
`embedding_` : array-like, shape (n_samples, out_dim)
Stores the embedding vectors
`kernel_pca_` : `KernelPCA` object used to implement the embedding
`training_data_` : array-like, shape (n_samples, n_features)
Stores the training data
`nbrs_` : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
`dist_matrix_` : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data
Notes
-----
**References**:
[1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, out_dim=2,
eigen_solver='auto', tol=0,
max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.out_dim = out_dim
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.out_dim,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, cKDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, cKDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, out_dim)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, out_dim)
"""
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| {
"content_hash": "6424bb3ef8044ce41a00cba7576fe5c7",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 79,
"avg_line_length": 35.02970297029703,
"alnum_prop": 0.5891746749576031,
"repo_name": "cdegroc/scikit-learn",
"id": "c7ba7ed37105df4c71019b94c91803ca3008c403",
"size": "7076",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sklearn/manifold/isomap.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6543221"
},
{
"name": "C++",
"bytes": "245669"
},
{
"name": "Python",
"bytes": "2667615"
},
{
"name": "Shell",
"bytes": "3770"
}
],
"symlink_target": ""
} |
def test_import():
import pelican_jupyter
assert pelican_jupyter.__version__ is not None
assert pelican_jupyter.__version__ != "0.0.0"
assert len(pelican_jupyter.__version__) > 0
def test_import_markup():
from pelican_jupyter import markup as nb_markup
assert nb_markup
def test_import_liquid():
from pelican_jupyter import liquid as nb_liquid
assert nb_liquid
| {
"content_hash": "b1d6af53272b4395c8090b6c00182546",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 51,
"avg_line_length": 22.22222222222222,
"alnum_prop": 0.69,
"repo_name": "danielfrg/pelican-ipynb",
"id": "e062a4f6589c9ca1f19539f1b0f71c0a0d4e5108",
"size": "400",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pelican_jupyter/tests/test_import.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "202"
},
{
"name": "Jupyter Notebook",
"bytes": "159658"
},
{
"name": "Python",
"bytes": "17864"
}
],
"symlink_target": ""
} |
import web
import hashlib
import cgi
#import Image
from PIL import Image
import os
import os.path
import random
import string
import time
import hashlib
import socket
from sign import sign
from config import upload_path,app_root
from conn import client
cgi.maxlen = 5 * 1024 * 1024 #文件大小限制,需要 try except
db = client.pyblog
def transformPosts(posts,artists):
for i in posts:
if str(i['artist']) not in artists:
artists[str(i['artist'])] = db['users'].find_one({'_id': i['artist']})
i['artist'] = artists[str(i['artist'])]
return posts
def listToHashByArtists(list):
hash = {}
for i in list:
hash[str(i['_id'])] = i
return hash
def getArtistByKey(cursor,key):
collections = list(cursor)
ids = []
for i in collections:
ids.append(i[key])
return list(db['users'].find({'_id': {'$in': ids}}))
#检测登录
def checkLogin():
if web.ctx.has_key('session'):
return web.ctx.session.hasLogin
else:
return False
# user = web.cookies().get('pyname')
# connect = web.cookies().get('pyconnect')
# if user and connect:
# return connect == sign(user)
# else:
# return False
# 上传
def upload(file,path='/',mediaType='pic'):
if not os.path.exists(upload_path+path+'/thumbs'):
os.mkdir(upload_path+path+'/thumbs')
THUMBS_WIDTH = 500
pic_width = 1280
# filename = file.filename.replace('\\','/').split('/')[-1]
# 随机名
extname = os.path.splitext(file.filename)[1]
filename = createRandomName() + extname
img = Image.open(file.file)
img_w,img_h = img.size
ratio = 1.0 * img_w / img_h
new_size_t = (THUMBS_WIDTH, int(THUMBS_WIDTH / ratio))
if mediaType is 'avatar':
pic_width = 150
new_size = (pic_width, int(pic_width / ratio))
img.thumbnail(new_size,Image.ANTIALIAS)
img.save(upload_path+path+filename)
if mediaType is not 'avatar':
img.thumbnail(new_size_t,Image.ANTIALIAS)
img.save(upload_path+path+'/thumbs/'+filename)
return '/static/upload'+path+filename
def writeSession(arg):
for i in arg:
web.ctx.session[i] = arg[i]
def createRandomName():
# http://tocode.sinaapp.com/4
# hashlib.md5(str(time.time())).digest()
salt = ''.join(random.sample(string.ascii_letters + string.digits, 13))
return salt
def randomString(num=16):
return ''.join(map(lambda xx:(hex(ord(xx))[2:]),os.urandom(num)))
def get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return "127.0.0.1"
def isTrue(str):
return str.lower() == 'true'
def addQuery(url, query):
qs = []
for i in query:
qs.append(i + '=' + str(query[i]))
qs = '&'.join(qs)
if url.find('?') == -1:
url += '?' + qs
else:
url += '&' + qs
return url
def handlerSpecPostType(posts,userId):
for i in posts:
if i.get('private'):
if userId == i['artist']:
i['showPost'] = i['private'] = True
else:
i['showPost'] = False
continue
if i.get('assigns'):
if userId == i['artist'] or str(userId) in i.get('assigns'):
i['showPost'] = i['assign'] = True
else:
i['showPost'] = False
else:
i['showPost'] = True
| {
"content_hash": "05d52fdc3567790987e8d9cccf127bac",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 73,
"avg_line_length": 24.138461538461538,
"alnum_prop": 0.6488209050350542,
"repo_name": "otarim/pyblog",
"id": "253474c9a46edfee49c461fdcbedc2e357864589",
"size": "3190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4442"
},
{
"name": "HTML",
"bytes": "73287"
},
{
"name": "JavaScript",
"bytes": "170135"
},
{
"name": "Python",
"bytes": "30856"
}
],
"symlink_target": ""
} |
import argparse, sys, copy, gzip, time, math, re
import numpy as np
import pandas as pd
from scipy import stats
from collections import Counter, defaultdict, namedtuple
import statsmodels.formula.api as smf
from operator import itemgetter
import warnings
from svtools.vcf.file import Vcf
from svtools.vcf.genotype import Genotype
from svtools.vcf.variant import Variant
import svtools.utils as su
CN_rec = namedtuple ('CN_rec', 'var_id sample svtype svlen AF GT CN AB log_len log2r')
# http://stackoverflow.com/questions/8930370/where-can-i-find-mad-mean-absolute-deviation-in-scipy
def mad(arr):
""" Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
arr = np.ma.array(arr).compressed() # should be faster to not use masked arrays.
med = np.median(arr)
return np.median(np.abs(arr - med))
def to_bnd_strings(var, fixed_gts):
old_type = var.info['SVTYPE']
old_id = var.var_id
old_pos = var.pos
old_end = var.info['END']
old_ciend = var.info['CIEND']
old_cipos = var.info['CIPOS']
old_cipos95 = var.info['CIPOS95']
old_ciend95 = var.info['CIEND95']
#for both ends
var.info['SVTYPE'] = 'BND'
var.info['EVENT'] = old_id
del var.info['SVLEN']
del var.info['END']
#var1
var.var_id = old_id + "_1"
var.info['MATEID'] = old_id + "_2"
if old_type == 'DEL':
var.alt = 'N[%s:%s[' % (var.chrom, old_end)
else:
var.alt = ']%s:%s]N' % (var.chrom, old_end)
var1=var.get_var_string(fixed_gts)
#var2
var.var_id = old_id + "_2"
var.info['MATEID'] = old_id + "_1"
var.info['CIPOS'] = old_ciend
var.info['CIEND'] = old_cipos
var.info['CIPOS95'] = old_ciend95
var.info['CIEND95'] = old_cipos95
var.pos = old_end
var.info['SECONDARY'] = True
if old_type == 'DEL':
var.alt = ']%s:%s]N' % (var.chrom, old_pos)
else:
var.alt = 'N[%s:%s[' % (var.chrom, old_pos)
var2=var.get_var_string(fixed_gts)
return var1, var2
def reciprocal_overlap(a, b_list):
overlap = 0
b_aggregate = 0
# catch divide by zero error
if a[1] == a[0]:
return 0
# update the overlap and b_aggregate
for b in b_list:
b_aggregate += (b[1] - b[0])
overlap += float(min(a[1], b[1]) - max(a[0], b[0]))
# catch divide by zero error
if b_aggregate == 0:
return 0
return min(overlap / (a[1] - a[0]), overlap / b_aggregate)
def collapse_bed_records(bed_list):
bed_list_sorted = sorted(bed_list, key=itemgetter(1))
collapsed_bed_list = []
i = 0
curr_rec = bed_list_sorted[i]
while i < len(bed_list_sorted):
# end at last element in list
if i == len(bed_list_sorted) - 1:
collapsed_bed_list.append(copy.copy(curr_rec))
break
# load next entry
next_rec = bed_list_sorted[i + 1]
# merge is overlap
if curr_rec[1] >= next_rec[0]:
curr_rec[1] = next_rec[1]
i += 1
# write out if no overlap
else:
collapsed_bed_list.append(copy.copy(curr_rec))
i += 1
curr_rec = bed_list_sorted[i]
# print 'collapsed:', collapsed_bed_list
return collapsed_bed_list
def annotation_intersect(var, ae_dict, threshold):
best_frac_overlap = 0
best_feature = ''
slop = 0
# dictionary with number of bases of overlap for each class
class_overlap = {}
# first check for reciprocal overlap
if var.chrom in ae_dict:
var_start = var.pos
var_end = int(var.info['END'])
i = 0
while 1:
# bail if end of dict
if i >= len(ae_dict[var.chrom]):
break
feature = ae_dict[var.chrom][i]
if feature[0] - slop < var_end:
if feature[1] + slop > var_start:
try:
class_overlap[feature[2]].append(feature)
except KeyError:
class_overlap[feature[2]] = [feature]
else:
break
i += 1
# print class_overlap
for me_class in class_overlap:
class_overlap[me_class] = collapse_bed_records(class_overlap[me_class])
frac_overlap = reciprocal_overlap([var_start, var_end], class_overlap[me_class])
if frac_overlap > best_frac_overlap:
best_frac_overlap = frac_overlap
best_feature = me_class
if best_frac_overlap >= threshold:
return best_feature
return None
def lowQuantile(xx):
return np.percentile(xx,2.5)
def highQuantile(xx):
return np.percentile(xx,97.5)
def lld(xx, mean, sd):
ll = 1 / sd * math.exp(-(xx-mean) * (xx-mean) / (2*sd*sd))
return ll
def calc_params(vcf_path):
tSet = list()
epsilon=0.1
header=[]
in_header = True
vcf = Vcf()
if vcf_path.endswith('.gz'):
vcf_file = gzip.open(vcf_path, 'rb')
else:
vcf_file = open(vcf_path, 'r')
for line in vcf_file:
if in_header:
if line[0] == '#':
header.append(line)
if line[1] != '#':
vcf_samples = line.rstrip().split('\t')[9:]
in_header = False
vcf.add_header(header)
continue
else:
v = line.rstrip().split('\t')
info = v[7].split(';')
svtype = None
for x in info:
if x.startswith('SVTYPE='):
svtype = x.split('=')[1]
break
if svtype not in ['DEL', 'DUP'] or v[0]=="X" or v[0]=="Y":
continue
var = Variant(v, vcf)
for sample in vcf_samples:
sample_genotype = var.genotype(sample)
if sample_genotype.get_format('GT') != './.':
log2r = math.log((float(sample_genotype.get_format('CN'))+ epsilon)/2,2) #to avoid log(0)
tSet.append(CN_rec(var.var_id, sample, var.info['SVTYPE'], abs(float(var.info['SVLEN'])), var.info['AF'],
sample_genotype.get_format('GT'), sample_genotype.get_format('CN'), sample_genotype.get_format('AB'), math.log(abs(float(var.info['SVLEN']))), log2r))
df=pd.DataFrame(tSet, columns=CN_rec._fields)
#exclude from training data, DELs and DUPs with CN in the tails of the distribution
df.loc[:,'q_low']=df.groupby(['sample', 'svtype', 'GT'])['log2r'].transform(lowQuantile)
df.loc[:,'q_high']=df.groupby(['sample', 'svtype', 'GT'])['log2r'].transform(highQuantile)
df=df[(df.log2r>=df.q_low) & (df.log2r<=df.q_high)]
#df.to_csv('./train.csv')
#adjust copy number for small deletions (<1kb), no strong relationship b/w cn and size for dups evident so far
small_het_dels = df[(df.svtype=="DEL") & (df.GT=="0/1") & (df.svlen<1000) & (df.svlen>=50)].copy()
small_hom_dels = df[(df.svtype=="DEL") & (df.GT=="1/1") & (df.svlen<1000) & (df.svlen>=50)].copy()
het_del_mean=np.mean(df[(df.svlen>1000) & (df.GT=="0/1") & (df.svtype=="DEL")]['log2r'])
hom_del_mean=np.mean(df[(df.svlen>1000) & (df.GT=="1/1") & (df.svtype=="DEL")]['log2r'])
small_het_dels.loc[:,'offset']=small_het_dels.loc[:,'log2r']-het_del_mean
small_hom_dels.loc[:,'offset']=small_hom_dels.loc[:,'log2r']-hom_del_mean
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
hom_del_fit=smf.ols('offset~log_len',small_hom_dels).fit()
het_del_fit=smf.ols('offset~log_len',small_het_dels).fit()
#print hom_del_fit.summary()
#print het_del_fit.summary()
small_hom_dels.loc[:,'log2r_adj'] = small_hom_dels.loc[:,'log2r'] - hom_del_fit.predict(small_hom_dels)
small_het_dels.loc[:,'log2r_adj'] = small_het_dels.loc[:,'log2r'] - het_del_fit.predict(small_het_dels)
small_dels=small_hom_dels.append(small_het_dels)
small_dels=small_dels[['var_id', 'sample', 'svtype', 'svlen', 'AF', 'GT', 'CN', 'log_len', 'log2r', 'q_low', 'q_high', 'log2r_adj']]
# dels of length<100 bp are excluded here
df1=df.loc[(df.svtype!="DEL") | (df.GT=="0/0") | (df.svlen>=1000), :].copy()
df1.loc[:,'log2r_adj']=df1.loc[:,'log2r']
df1=df1.append(small_dels)
params=df1.groupby(['sample', 'svtype', 'GT'])['log2r_adj'].aggregate([np.mean,np.var, len]).reset_index()
params=pd.pivot_table(params, index=['sample', 'svtype'], columns='GT', values=['mean', 'var', 'len']).reset_index()
params.columns=['sample', 'svtype', 'mean0', 'mean1', 'mean2', 'var0', 'var1', 'var2', 'len0', 'len1', 'len2']
params['std_pooled']=np.sqrt((params['var0']*params['len0']+params['var1']*params['len1']+params['var2']*params['len2'])/(params['len0']+params['len1']+params['len2']))
#params.to_csv('./params.csv')
return (params, het_del_fit, hom_del_fit)
def rd_support_nb(temp, p_cnv):
tr = pd.DataFrame({'p0' : [1.0, 0.1, 0.0], 'p1' : [0.0, 0.7, 0.25], 'p2' : [0.0, 0.2, 0.75], 'GT' : ["0/0", "0/1", "1/1"]})
temp = pd.merge(temp, tr, on='GT', how='left')
temp['p_mix'] = temp['lld0'] * temp['p0'] + temp['lld1'] * temp['p1'] + temp['lld2'] * temp['p2']
return np.log(p_cnv)+np.sum(np.log(temp['p_mix'])) > np.log(1-p_cnv)+np.sum(np.log(temp['lld0']))
def has_rd_support_by_nb(test_set, het_del_fit, hom_del_fit, params, p_cnv = 0.5):
svtype=test_set['svtype'][0]
svlen=test_set['svlen'][0]
log_len=test_set['log_len'][0]
if svtype == 'DEL' and svlen<1000:
params1=params[params.svtype=='DEL'].copy()
if svlen<50:
params1['log_len']=math.log(50)
else:
params1['log_len']=log_len
params1.loc[:,'mean1_adj'] = params1.loc[:,'mean1'] + het_del_fit.predict(params1)
params1.loc[:,'mean2_adj'] = params1.loc[:,'mean2'] + hom_del_fit.predict(params1)
else:
params1=params.copy()
params1.loc[:,'mean1_adj'] = params1.loc[:,'mean1']
params1.loc[:,'mean2_adj'] = params1.loc[:,'mean2']
v0=test_set.loc[test_set.GT=="0/0", 'log2r'].values
v1=test_set.loc[test_set.GT=="0/1", 'log2r'].values
v2=test_set.loc[test_set.GT=="1/1", 'log2r'].values
if len(v0)>0:
med0=np.median(v0)
else:
if len(v1)>0:
med0=med1=np.median(v1)
elif len(v2)>0:
med0=med1=med2=np.median(v2)
else:
return False
if len(v1)>0:
med1=np.median(v1)
else:
med1=med0
if len(v2)>0:
med2=np.median(v2)
else:
med2=med1
if svtype=='DEL' and ( med1>med0 or med2>med0 ):
return False
elif svtype=='DUP' and (med1<med0 or med2<med0):
return False
mm=pd.merge(test_set, params1, how='left')
mm.loc[:,'lld0'] = mm.apply(lambda row:lld(row["log2r"], row["mean0"],row["std_pooled"]), axis=1)
mm.loc[:,'lld1'] = mm.apply(lambda row:lld(row["log2r"], row["mean1_adj"],row["std_pooled"]), axis=1)
mm.loc[:,'lld2'] = mm.apply(lambda row:lld(row["log2r"], row["mean2_adj"],row["std_pooled"]), axis=1)
return rd_support_nb(mm, p_cnv)
def load_df(var, exclude, sex):
epsilon=0.1
test_set = list()
for s in var.sample_list:
if s in exclude:
continue
cn = var.genotype(s).get_format('CN')
if (var.chrom == 'X' or var.chrom == 'Y') and sex[s] == 1:
cn=str(float(cn)*2)
log2r = math.log((float(cn)+epsilon)/2, 2) # to avoid log(0)
test_set.append(CN_rec(var.var_id, s, var.info['SVTYPE'], abs(float(var.info['SVLEN'])), var.info['AF'],
var.genotype(s).get_format('GT'), cn , var.genotype(s).get_format('AB'), math.log(abs(float(var.info['SVLEN']))), log2r))
test_set = pd.DataFrame(data = test_set, columns=CN_rec._fields)
return test_set
# test for read depth support of low frequency variants
def has_low_freq_depth_support(test_set, mad_threshold=2, absolute_cn_diff=0.5):
mad_quorum = 0.5 # this fraction of the pos. genotyped results must meet the mad_threshold
hom_ref_cn=test_set[test_set.GT=="0/0"]['CN'].values.astype(float)
hom_het_alt_cn=test_set[(test_set.GT=="0/1") | (test_set.GT=="1/1")]['CN'].values.astype(float)
if len(hom_ref_cn) > 0:
cn_median = np.median(hom_ref_cn)
cn_mad = mad(hom_ref_cn)
else:
cn_median = None
cn_mad = None
# bail after writing out diagnostic info, if no ref samples or all ref samples
if (len(hom_ref_cn) == 0 or len(hom_het_alt_cn) == 0):
return False
# tally up the pos. genotyped samples meeting the mad_threshold
resid=hom_het_alt_cn-cn_median
#if test_set['svtype'][0]=='DEL':
if test_set.loc[0, 'svtype']=='DEL':
resid=-resid
resid=resid[(resid > (cn_mad * mad_threshold) ) & (resid>absolute_cn_diff)]
if float(len(resid))/len(hom_het_alt_cn)>mad_quorum:
return True
else:
return False
# test whether variant has read depth support by regression
def has_high_freq_depth_support(df, slope_threshold, rsquared_threshold):
rd = df[[ 'AB', 'CN']][df['AB']!='.'].values.astype(float)
if len(np.unique(rd[0,:])) > 1 and len(np.unique(rd[1,:])) > 1:
(slope, intercept, r_value, p_value, std_err) = stats.linregress(rd)
if df['svtype'][0] == 'DEL':
slope=-slope
#sys.stderr.write(df['var_id'][0]+"\t"+str(slope)+"\t"+str(r_value)+"\n")
if (slope < slope_threshold or r_value*r_value < rsquared_threshold):
return False
return True
return False
def has_rd_support_by_ls(df, slope_threshold, rsquared_threshold, num_pos_samps, mad_threshold=2, absolute_cn_diff=0.5):
min_pos_samps_for_regression=10
if num_pos_samps>min_pos_samps_for_regression:
return has_high_freq_depth_support(df, slope_threshold, rsquared_threshold)
else:
return has_low_freq_depth_support(df, mad_threshold, absolute_cn_diff)
return False
def has_rd_support_hybrid(df, het_del_fit, hom_del_fit, params, p_cnv, slope_threshold, rsquared_threshold, num_pos_samps):
hybrid_support=False
nb_support=has_rd_support_by_nb(df, het_del_fit, hom_del_fit, params, p_cnv)
ls_support=has_rd_support_by_ls(df, slope_threshold, rsquared_threshold, num_pos_samps)
if nb_support and ls_support:
hybrid_support=True
elif nb_support and has_rd_support_by_ls(df, 2*slope_threshold, 2*rsquared_threshold, num_pos_samps, 2, 0.75):
hybrid_support=True
elif ls_support and has_rd_support_by_nb(df, het_del_fit, hom_del_fit, params, 0.2*p_cnv):
hybrid_support=True
return [ls_support, nb_support, hybrid_support]
# primary function
def sv_classify(vcf_in, vcf_out, gender_file, exclude_file, ae_dict, f_overlap, slope_threshold, rsquared_threshold, p_cnv, het_del_fit, hom_del_fit, params, diag_outfile, method):
vcf = Vcf()
header = []
in_header = True
sex = {}
# read sample genders
for line in gender_file:
v = line.rstrip().split('\t')
sex[v[0]] = int(v[1])
exclude = []
if exclude_file is not None:
for line in exclude_file:
exclude.append(line.rstrip())
if diag_outfile is not None:
outf=open(diag_outfile, 'w', 4096)
outf.write("varid\torig_svtype\tsvlen\tnum_pos_samps\tnb_support\tls_support\thybrid_support\thas_rd_support\n")
for line in vcf_in:
if in_header:
if line[0] == '#':
header.append(line)
continue
else:
in_header = False
vcf.add_header(header)
vcf_out.write(vcf.get_header() + '\n')
v = line.rstrip().split('\t')
info = v[7].split(';')
svtype = None
for x in info:
if x.startswith('SVTYPE='):
svtype = x.split('=')[1]
break
# bail if not DEL or DUP prior to reclassification
if svtype not in ['DEL', 'DUP']:
vcf_out.write(line)
continue
var = Variant(v, vcf)
# check intersection with mobile elements
if ae_dict is not None and var.info['SVTYPE'] in ['DEL']:
ae = annotation_intersect(var, ae_dict, f_overlap)
if ae is not None:
if ae.startswith('SINE') or ae.startswith('LINE') or ae.split('|')[2].startswith('SVA'):
ae = 'ME:' + ae
var.alt = '<DEL:%s>' % ae
var.info['SVTYPE'] = 'MEI'
vcf_out.write(var.get_var_string(True) + '\n')
continue
#count positively genotyped samples
num_pos_samps = 0
num_total_samps=len(var.sample_list)
for s in var.sample_list:
if var.genotype(s).get_format('GT') not in ["./.", "0/0"]:
num_pos_samps += 1
nb_support = False
ls_support = False
hybrid_support = False
has_rd_support = False
if num_pos_samps == 0:
vcf_out.write(line)
else:
df=load_df(var, exclude, sex)
if method=='large_sample':
ls_support = has_rd_support_by_ls(df, slope_threshold, rsquared_threshold, num_pos_samps)
has_rd_support=ls_support
elif method=='naive_bayes':
nb_support = has_rd_support_by_nb(df, het_del_fit, hom_del_fit, params, p_cnv)
has_rd_support=nb_support
elif method=='hybrid':
ls_support, nb_support, hybrid_support = has_rd_support_hybrid(df, het_del_fit, hom_del_fit, params, p_cnv, slope_threshold, rsquared_threshold, num_pos_samps)
has_rd_support=hybrid_support
if has_rd_support:
vcf_out.write(line)
else:
for m_var in to_bnd_strings(var, True):
vcf_out.write(m_var + '\n')
if diag_outfile is not None:
svlen=df['svlen'][0]
outf.write(var.var_id+"\t"+svtype+"\t"+str(svlen)+"\t"+str(num_pos_samps)+"\t"+str(nb_support)+"\t"+str(ls_support)+"\t"+str(hybrid_support)+"\t"+str(has_rd_support)+"\n")
vcf_out.close()
if diag_outfile is not None:
outf.close()
vcf_in.close()
vcf_out.close()
gender_file.close()
if exclude_file is not None:
exclude_file.close()
return
def get_ae_dict(ae_path):
if ae_path.endswith('.gz'):
ae_bedfile = gzip.open(ae_path, 'rb')
else:
ae_bedfile = open(ae_path, 'r')
ae_dict = {}
for line in ae_bedfile:
v = line.rstrip().split('\t')
if len(v) < 4:
continue
v[1] = int(v[1])
v[2] = int(v[2])
if v[0] in ae_dict:
ae_dict[v[0]].append(v[1:])
else:
ae_dict[v[0]] = [v[1:]]
ae_bedfile.close()
return ae_dict
def run_reclassifier(vcf_file, vcf_out, sex_file, ae_path, f_overlap, exclude_list, slope_threshold, rsquared_threshold, training_data, method, diag_outfile):
ae_dict = None
params = None
het_del_fit = None
hom_del_fit = None
p_cnv=0.5 # prior probability that CNV is real
if ae_path is not None:
sys.stderr.write("loading annotations\n")
ae_dict=get_ae_dict(ae_path)
if(method!="large_sample"):
sys.stderr.write("calculating parameters\n")
#calculate per-sample CN profiles on training set
[params, het_del_fit, hom_del_fit]=calc_params(training_data)
sys.stderr.write("reclassifying\n")
sv_classify(vcf_file,
vcf_out,
sex_file,
exclude_list,
ae_dict,
f_overlap,
slope_threshold,
rsquared_threshold,
p_cnv,
het_del_fit,
hom_del_fit,
params,
diag_outfile,
method)
def add_arguments_to_parser(parser):
parser.add_argument('-i', '--input', metavar='<VCF>', default=None, help='VCF input')
#parser.add_argument('-i', '--input', metavar='<STRING>', dest='vcf_in', type=argparse.FileType('r'), default=None, help='VCF input [stdin]')
parser.add_argument('-o', '--output', metavar='<VCF>', dest='vcf_out', type=argparse.FileType('w'), default=sys.stdout, help='VCF output [stdout]')
parser.add_argument('-g', '--gender', metavar='<FILE>', dest='gender', type=argparse.FileType('r'), required=True, default=None, help='tab delimited file of sample genders (male=1, female=2)\nex: SAMPLE_A\t2')
parser.add_argument('-a', '--annotation', metavar='<BED>', dest='ae_path', type=str, default=None, help='BED file of annotated elements')
parser.add_argument('-f', '--fraction', metavar='<FLOAT>', dest='f_overlap', type=float, default=0.9, help='fraction of reciprocal overlap to apply annotation to variant [0.9]')
parser.add_argument('-e', '--exclude', metavar='<FILE>', dest='exclude', type=argparse.FileType('r'), required=False, default=None, help='list of samples to exclude from classification algorithms')
parser.add_argument('-s', '--slope_threshold', metavar='<FLOAT>', dest='slope_threshold', type=float, default=1.0, help='minimum slope absolute value of regression line to classify as DEL or DUP[1.0]')
parser.add_argument('-r', '--rsquared_threshold', metavar='<FLOAT>', dest='rsquared_threshold', type=float, default=0.2, help='minimum R^2 correlation value of regression line to classify as DEL or DUP [0.2], for large sample reclassification')
parser.add_argument('-t', '--tSet', metavar='<STRING>', dest='tSet', type=str, default=None, required=False, help='high quality deletions & duplications training dataset[vcf], required by naive Bayes reclassification')
parser.add_argument('-m', '--method', metavar='<STRING>', dest='method', type=str, default="large_sample", required=False, help='reclassification method, one of (large_sample, naive_bayes, hybrid)', choices=['large_sample', 'naive_bayes', 'hybrid'])
parser.add_argument('-d', '--diag_file', metavar='<STRING>', dest='diag_outfile', type=str, default=None, required=False, help='text file to output method comparisons')
parser.set_defaults(entry_point=run_from_args)
def description():
return 'reclassify DEL and DUP based on read depth information'
def command_parser():
parser = argparse.ArgumentParser(description=description())
add_arguments_to_parser(parser)
return parser
def run_from_args(args):
# sys.stderr.write(args.vcf_in)
if args.tSet is None:
if args.method!="large_sample":
sys.stderr.write("Training data required for naive Bayes or hybrid classifiers\n")
parser.print_help()
sys.exit(1)
with su.InputStream(args.input) as stream:
run_reclassifier(stream, args.vcf_out, args.gender, args.ae_path, args.f_overlap, args.exclude, args.slope_threshold, args.rsquared_threshold, args.tSet, args.method, args.diag_outfile)
if __name__ == '__main__':
parser = command_parser()
args=parser.parse_args()
sys.exit(args.entry_point(args))
| {
"content_hash": "4f068c64eb31db820d1d694ac306c85a",
"timestamp": "",
"source": "github",
"line_count": 595,
"max_line_length": 253,
"avg_line_length": 39.22016806722689,
"alnum_prop": 0.582533424751457,
"repo_name": "abelhj/svtools",
"id": "ff54efaba5c82568b53873f128d5d5ecfa9fb3a4",
"size": "23359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "svtools/sv_classifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "2266"
},
{
"name": "Python",
"bytes": "557827"
},
{
"name": "R",
"bytes": "1564"
},
{
"name": "Shell",
"bytes": "5187"
}
],
"symlink_target": ""
} |
import unittest
from django.test import TestCase
from django.utils import six
from django.db.models import (CharField, TextField,
BooleanField, ForeignKey,
SmallIntegerField)
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from django.utils.timezone import now, timedelta
from deck.models import Event, Proposal, Vote, Jury
from test_utils import get_all_field_names
EVENT_DATA = {
'title': 'RuPy',
'slug': 'rupy',
'description': 'A really good event.',
'author_id': 1,
'is_published': False,
'slots': 30,
'closing_date': now() + timedelta(days=7),
}
PROPOSAL_DATA = {
'title': 'Python For Zombies',
'slug': 'python-for-zombies',
'description': 'Brain...',
'author_id': 1,
'slides_url': 'jane_doe/talk'
}
ANOTHER_PROPOSAL_DATA = {
'title': 'A Python 3 Metaprogramming Tutorial',
'slug': 'python-3-metaprogramming',
'description': 'An advanced tutorial on Python 3 and Metaprogramming',
'author_id': 1
}
class EventModelIntegrityTest(TestCase):
def setUp(self):
self.fields = {
field.name: field for field in Event._meta.fields
}
def test_assert_event_should_have_a_verbose_name(self):
self.assertEquals(_('Event'), Event._meta.verbose_name)
def test_assert_event_should_have_a_verbose_name_plural(self):
self.assertEquals(_('Events'), Event._meta.verbose_name_plural)
def test_assert_event_should_have_a_title(self):
self.assertIn('title', get_all_field_names(Event))
def test_assert_event_title_should_be_a_CharField(self):
self.assertIsInstance(self.fields['title'], CharField)
def test_assert_event_title_should_be_required(self):
self.assertEquals(False, self.fields['title'].null)
self.assertEquals(False, self.fields['title'].blank)
def test_assert_event_title_should_have_at_most_200_characters(self):
self.assertEquals(200, self.fields['title'].max_length)
def test_assert_event_should_have_a_description(self):
self.assertIn('description', get_all_field_names(Event))
def test_assert_event_description_should_be_a_TextField(self):
self.assertIsInstance(self.fields['description'], TextField)
def test_assert_event_description_should_be_nullable_but_needs_to_be_blank(self):
self.assertEquals(False, self.fields['description'].null)
self.assertEquals(True, self.fields['description'].blank)
def test_assert_event_description_should_have_at_most_10000_characters(self):
self.assertEquals(10000, self.fields['description'].max_length)
def test_assert_event_should_allow_public_voting(self):
self.assertIn('allow_public_voting', get_all_field_names(Event))
def test_assert_event_allow_public_voting_should_be_a_BooleanField(self):
self.assertIsInstance(self.fields['allow_public_voting'], BooleanField)
def test_assert_event_allow_public_voting_should_be_True_as_default(self):
self.assertEquals(True, self.fields['allow_public_voting'].default)
def test_assert_event_should_have_a_author(self):
self.assertIn('author', get_all_field_names(Event))
def test_assert_event_author_should_be_an_User(self):
self.assertEquals(User, self.fields['author'].rel.to)
def test_assert_event_author_should_be_a_ForeignKey(self):
self.assertIsInstance(self.fields['author'], ForeignKey)
def test_assert_event_author_should_be_required(self):
self.assertEquals(False, self.fields['author'].null)
self.assertEquals(False, self.fields['author'].blank)
def test_assert_event_author_should_have_a_related_name(self):
self.assertEquals('events', self.fields['author'].rel.related_name)
def test_assert_event_should_have_a_publish_flag(self):
self.assertIn('is_published', get_all_field_names(Event))
def test_assert_event_is_published_should_be_a_BooleanField(self):
self.assertIsInstance(self.fields['is_published'], BooleanField)
def test_assert_event_is_published_should_be_True_as_default(self):
self.assertEquals(True, self.fields['is_published'].default)
def test_assert_event_should_have_a_jury(self):
self.assertIn('jury', get_all_field_names(Event))
def test_assert_event_jury_should_be_an_Jury(self):
self.assertEquals(Jury, self.fields['jury'].rel.to)
def test_assert_event_jury_should_be_a_ForeignKey(self):
self.assertIsInstance(self.fields['jury'], ForeignKey)
def test_assert_event_jury_should_not_be_required(self):
self.assertEquals(True, self.fields['jury'].null)
self.assertEquals(True, self.fields['jury'].blank)
def test_assert_event_jury_should_have_a_related_name(self):
self.assertEquals('event', self.fields['jury'].rel.related_name)
class EventObjectTest(TestCase):
def setUp(self):
self.event = Event(**EVENT_DATA)
@unittest.skipIf(six.PY3, 'not test unicode on python3')
def test_assert_event_unicode_representation(self):
self.assertEquals(u'RuPy', six.text_type(self.event))
def test_assert_event_title(self):
self.assertEquals(u'RuPy', self.event.title)
def test_assert_event_description(self):
self.assertEquals(u'A really good event.', self.event.description)
def test_assert_event_author(self):
self.assertEquals(1, self.event.author_id)
def test_assert_event_allow_public_voting(self):
self.assertEquals(True, self.event.allow_public_voting)
def test_assert_event_is_published(self):
self.assertEquals(False, self.event.is_published)
class ProposalModelIntegrityTest(TestCase):
def setUp(self):
self.fields = {
field.name: field for field in Proposal._meta.fields
}
def test_assert_proposal_should_have_a_verbose_name(self):
self.assertEquals(_('Proposal'), Proposal._meta.verbose_name)
def test_assert_proposal_should_have_a_verbose_name_plural(self):
self.assertEquals(_('Proposals'), Proposal._meta.verbose_name_plural)
def test_assert_proposal_should_have_a_title(self):
self.assertIn('title', get_all_field_names(Proposal))
def test_assert_proposal_title_should_be_a_CharField(self):
self.assertIsInstance(self.fields['title'], CharField)
def test_assert_proposal_title_should_be_required(self):
self.assertEquals(False, self.fields['title'].null)
self.assertEquals(False, self.fields['title'].blank)
def test_assert_proposal_title_should_have_at_most_200_characters(self):
self.assertEquals(200, self.fields['title'].max_length)
def test_assert_proposal_should_have_a_description(self):
self.assertIn('description', get_all_field_names(Proposal))
def test_assert_proposal_description_should_be_a_TextField(self):
self.assertIsInstance(self.fields['description'], TextField)
def test_assert_proposal_description_should_be_nullable_but_needs_to_be_blank(self):
self.assertEquals(False, self.fields['description'].null)
self.assertEquals(True, self.fields['description'].blank)
def test_assert_proposal_description_should_have_at_most_10000_characters(self):
self.assertEquals(10000, self.fields['description'].max_length)
def test_assert_proposal_should_have_a_author(self):
self.assertIn('author', get_all_field_names(Proposal))
def test_assert_proposal_author_should_be_an_User(self):
self.assertEquals(User, self.fields['author'].rel.to)
def test_assert_proposal_author_should_be_a_ForeignKey(self):
self.assertIsInstance(self.fields['author'], ForeignKey)
def test_assert_proposal_author_should_be_required(self):
self.assertEquals(False, self.fields['author'].null)
self.assertEquals(False, self.fields['author'].blank)
def test_assert_proposal_event_should_have_a_related_name(self):
self.assertEquals('proposals', self.fields['event'].rel.related_name)
def test_assert_proposal_should_have_a_event(self):
self.assertIn('event', get_all_field_names(Proposal))
def test_assert_proposal_event_should_be_an_Event(self):
self.assertEquals(Event, self.fields['event'].rel.to)
def test_assert_proposal_event_should_be_a_ForeignKey(self):
self.assertIsInstance(self.fields['event'], ForeignKey)
def test_assert_proposal_event_should_be_required(self):
self.assertEquals(False, self.fields['event'].null)
self.assertEquals(False, self.fields['event'].blank)
def test_assert_proposal_should_have_a_publish_flag(self):
self.assertIn('is_published', get_all_field_names(Proposal))
def test_assert_proposal_is_published_should_be_a_BooleanField(self):
self.assertIsInstance(self.fields['is_published'], BooleanField)
def test_assert_proposal_is_published_should_be_True_as_default(self):
self.assertEquals(True, self.fields['is_published'].default)
def test_assert_proposal_is_approved_should_be_a_BooleanField(self):
self.assertIsInstance(self.fields['is_approved'], BooleanField)
def test_assert_proposal_is_approved_should_be_False_as_default(self):
self.assertEquals(False, self.fields['is_approved'].default)
def test_assert_proposal_slides_url_should_not_be_required(self):
self.assertEquals(True, self.fields['slides_url'].null)
self.assertEquals(True, self.fields['slides_url'].blank)
def test_assert_proposal_slides_url_should_be_a_CharField(self):
self.assertIsInstance(self.fields['slides_url'], CharField)
def test_assert_proposal_slides_url_should_have_at_most_250_characters(self):
self.assertEquals(250, self.fields['slides_url'].max_length)
class ProposalObjectTest(TestCase):
fixtures = ['user.json']
def setUp(self):
self.user = User.objects.first()
self.event = Event(**EVENT_DATA)
self.proposal = Proposal(**PROPOSAL_DATA)
self.vote = Vote(user_id=self.event.author_id,
proposal=self.proposal, rate=3)
@unittest.skipIf(six.PY3, 'not test unicode on python3')
def test_assert_proposal_unicode_representation(self):
self.assertEquals(u'Python For Zombies', six.text_type(self.proposal))
def test_assert_proposal_title(self):
self.assertEquals(u'Python For Zombies', self.proposal.title)
def test_assert_proposal_description(self):
self.assertEquals(u'Brain...', self.proposal.description)
def test_assert_proposal_slides_url(self):
self.assertEquals(u'jane_doe/talk', self.proposal.slides_url)
def test_assert_proposal_get_full_slides_url(self):
self.assertEquals(u'http://www.speakerdeck.com/jane_doe/talk', self.proposal.get_full_slides_url())
def test_assert_proposal_author(self):
self.assertEquals(1, self.proposal.author_id)
def test_assert_proposal_rate(self):
self.assertEquals(0, self.proposal.get_rate)
def test_get_absolute_url(self):
self.proposal.event = self.event
self.assertEquals('/events/rupy/#python-for-zombies',
self.proposal.get_absolute_url())
def test_assert_user_cannot_vote_multiple_times(self):
self.event.save()
self.proposal.event = self.event
self.proposal.author = User.objects.get(id=2)
self.proposal.save()
self.vote.proposal = self.proposal
self.vote.save()
self.assertTrue(self.proposal.user_already_voted(self.user))
def test_assert_proposal_is_published(self):
self.assertEquals(True, self.proposal.is_published)
def test_assert_proposal_approve(self):
self.event.save()
self.proposal.event = self.event
self.proposal.save()
self.assertEquals(False, self.proposal.is_approved)
self.proposal.approve()
self.assertEquals(True, self.proposal.is_approved)
def test_assert_proposal_disapprove(self):
self.event.save()
self.proposal.event = self.event
self.proposal.is_approved = True
self.proposal.save()
self.assertEquals(True, self.proposal.is_approved)
self.proposal.disapprove()
self.assertEquals(False, self.proposal.is_approved)
class VoteModelIntegrityTest(TestCase):
def setUp(self):
self.fields = {
field.name: field for field in Vote._meta.fields
}
def test_assert_vote_should_have_a_verbose_name(self):
self.assertEquals(_('Vote'), Vote._meta.verbose_name)
def test_assert_vote_should_have_a_verbose_name_plural(self):
self.assertEquals(_('Votes'), Vote._meta.verbose_name_plural)
def test_assert_vote_should_have_a_unique_together_constraint(self):
self.assertEquals((('proposal', 'user'),), Vote._meta.unique_together)
def test_assert_vote_should_have_a_rate(self):
self.assertIn('rate', get_all_field_names(Vote))
def test_assert_vote_rate_should_be_a_SmallIntegerField(self):
self.assertIsInstance(self.fields['rate'], SmallIntegerField)
def test_assert_vote_rate_should_be_required(self):
self.assertEquals(True, self.fields['rate'].null)
self.assertEquals(True, self.fields['rate'].blank)
def test_assert_vote_should_have_a_proposal(self):
self.assertIn('proposal', get_all_field_names(Vote))
def test_assert_vote_proposal_should_be_an_Proposal(self):
self.assertEquals(Proposal, self.fields['proposal'].rel.to)
def test_assert_vote_proposal_should_be_a_ForeignKey(self):
self.assertIsInstance(self.fields['proposal'], ForeignKey)
def test_assert_vote_proposal_should_be_required(self):
self.assertEquals(False, self.fields['proposal'].null)
self.assertEquals(False, self.fields['proposal'].blank)
def test_assert_vote_proposal_should_have_a_related_name(self):
self.assertEquals('votes', self.fields['proposal'].rel.related_name)
def test_assert_vote_should_have_a_author(self):
self.assertIn('user', get_all_field_names(Vote))
def test_assert_vote_user_should_be_an_User(self):
self.assertEquals(User, self.fields['user'].rel.to)
def test_assert_vote_user_should_be_a_ForeignKey(self):
self.assertIsInstance(self.fields['user'], ForeignKey)
def test_assert_vote_user_should_be_required(self):
self.assertEquals(False, self.fields['user'].null)
self.assertEquals(False, self.fields['user'].blank)
def test_assert_vote_event_should_have_a_related_name(self):
self.assertEquals('votes', self.fields['user'].rel.related_name)
class VoteObjectTest(TestCase):
def setUp(self):
self.event = Event(**EVENT_DATA)
self.proposal = Proposal(event=self.event, **PROPOSAL_DATA)
self.vote = Vote(user_id=self.event.author_id,
proposal=self.proposal, rate=3)
@unittest.skipIf(six.PY3, 'not test unicode on python3')
def test_assert_vote_unicode_representation(self):
self.vote.user = User(username='User')
self.assertEquals(u'User: 3 in Python For Zombies', six.text_type(self.vote))
def test_assert_vote_rate(self):
self.assertEquals(3, self.vote.rate)
def test_assert_vote_proposal(self):
self.assertEquals(self.proposal, self.vote.proposal)
def test_assert_vote_author(self):
self.assertEquals(1, self.vote.user_id)
| {
"content_hash": "3d9afb8f3c06b20ad74a9ab264d8ee62",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 107,
"avg_line_length": 39.53299492385787,
"alnum_prop": 0.6893297380585516,
"repo_name": "luanfonceca/speakerfight",
"id": "ca8e5806697795db4f321f1a8b5fd246d8e308c7",
"size": "15576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deck/tests/test_unit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "64338"
},
{
"name": "Dockerfile",
"bytes": "146"
},
{
"name": "HTML",
"bytes": "114755"
},
{
"name": "JavaScript",
"bytes": "65026"
},
{
"name": "Makefile",
"bytes": "836"
},
{
"name": "Python",
"bytes": "217175"
}
],
"symlink_target": ""
} |
"""
Utilities for handling command line-related tasks - parsing arguments for
program options, expanding args into paths etc.
@author James Skinner
@version 0.1
"""
__all__ = [
"WinShlex",
"Args",
"parseargs"
]
import getopt
import shlex
from collections import namedtuple
from spiralx.props import Props
class WindowsShlex(shlex.shlex):
"""
Sub-class of the shlex class in the shlex module which is initialised for
splitting program arguments in Windows, i.e. using ^ as the quote character
and treating backslashes as normal characters.
"""
def __init__(self, s):
super(WindowsShlex, self).__init__(s, posix=True)
self.escape = "^"
@classmethod
def split(cls, s):
"""
>>> WindowsShlex.split("first second third")
['first', 'second', 'third']
"""
ws = cls(s)
ws.whitespace_split = True
return list(ws)
class ArgParser(object):
def __init__(self, opts=None, long_opts=None, defaults=None):
if opts is None:
opts = ""
if long_opts is None:
long_opts = []
if defaults is None:
defaults = {}
self.opts = opts
self.long_opts = long_opts
self.allopts = list(opts.replace(":", "")) + \
map(lambda o: o.replace("=", ""), long_opts)
self.defaults = defaults
def parse(self, windows=True):
if windows:
args = WindowsShlex.split(" ".join(sys.argv[1:]))
else:
args = sys.argv[1:]
o, a = getopt.getopt(args, self.opts, self.long_opts)
options = {}
options.update(self.defaults)
options.update((k.lstrip("-"), v) for k, v in o)
return options, a
class Args(namedtuple("Args", "opts args")):
def __new__(cls, options):
o, a = getopt(sys.argv[1:], _aslist(opts))
o = Props((k[1:], v) for k, v in opts)
return tuple.__new__(cls, (o, a))
def __str__(self):
ostr = " ".join("{0}='{1}'".format(k, v) for k, v in self[0])
astr = ", ".join("'{0}'".format(a) for a in self[1])
return "Args(opts: {0}, args: {1})".format(ostr, astr)
def _aslist(v):
"""
If v is a string then split it on whitespace using the shlex module to handle
quoted strings correctly, otherwise return it as a list.
>>> _aslist("first second third")
... ['first', 'second', 'third']
"""
return shlex.split(v) if isinstance(v, str) else list(v)
if __name__ == "__main__":
import doctest
doctest.testmod()
print("\nDoctest finished.\n")
| {
"content_hash": "33e514c0fd933cfd04f84b3004bac85e",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 79,
"avg_line_length": 23.02857142857143,
"alnum_prop": 0.6133167907361455,
"repo_name": "spiralx/mypy",
"id": "dad27f455e31c51f841b87e5822881292073852a",
"size": "2418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mypy/spiralx/cmdline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "109682"
},
{
"name": "JavaScript",
"bytes": "170251"
},
{
"name": "Python",
"bytes": "298163"
},
{
"name": "Shell",
"bytes": "81"
}
],
"symlink_target": ""
} |
from pyswagger import App
from ..utils import get_test_data_folder
import unittest
_json = 'application/json'
_xml = 'application/xml'
class PatchObjTestCase(unittest.TestCase):
""" test patch_obj.py """
@classmethod
def setUpClass(kls):
kls.app = App._create_(get_test_data_folder(
version='2.0',
which='patch'
))
def test_operation_produces_consumes(self):
""" test patch Operation with produces and
consumes
"""
p = self.app.s('/pc')
self.assertEqual(p.get.produces, [_json])
self.assertEqual(p.get.consumes, [_json])
self.assertEqual(p.post.produces, [_xml])
self.assertEqual(p.post.consumes, [_json])
self.assertEqual(p.put.produces, [_json])
self.assertEqual(p.put.consumes, [_xml])
self.assertEqual(p.delete.produces, [_xml])
self.assertEqual(p.delete.consumes, [_xml])
def test_operation_parameters(self):
""" test patch Operation with parameters """
p = self.app.s('/param')
pp = p.get.parameters
self.assertEqual(len(pp), 2)
self.assertEqual(pp[0].name, 'p1')
self.assertEqual(getattr(pp[0], 'in'), 'query')
self.assertEqual(getattr(pp[0], 'type'), 'string')
self.assertEqual(pp[1].name, 'p2')
self.assertEqual(getattr(pp[1], 'in'), 'query')
self.assertEqual(getattr(pp[1], 'type'), 'string')
pp = p.post.parameters
self.assertEqual(len(pp), 2)
self.assertEqual(pp[0].name, 'p1')
self.assertEqual(getattr(pp[0], 'in'), 'path')
self.assertEqual(getattr(pp[0], 'type'), 'string')
self.assertEqual(pp[1].name, 'p2')
self.assertEqual(getattr(pp[1], 'in'), 'query')
self.assertEqual(getattr(pp[1], 'type'), 'string')
def test_operation_scheme(self):
""" test patch Operation with scheme """
p = self.app.s('/s')
self.assertEqual(p.get.cached_schemes, self.app.root.schemes)
self.assertEqual(p.get.cached_schemes, ['http', 'https'])
def test_operation_security(self):
""" test patch Operation with Swagger.security """
p = self.app.s('/op_security')
# when security is something, do not overwrite
self.assertTrue(len(p.put.security) == 1)
self.assertTrue("internalApiKey" in p.put.security[0])
# when security is [], do not overwrite
self.assertEqual(p.get.security, [])
# when security is not provided, overwrite with global
self.assertTrue(len(p.post.security) == 2)
self.assertTrue("githubAccessCode" in p.post.security[0])
self.assertTrue("internalApiKey" in p.post.security[1])
def test_path_item(self):
""" test patch PathItem """
p = self.app.s('/pc')
self.assertEqual(p.get.method, 'get')
self.assertEqual(p.get.url, '//test.com/v1/pc')
self.assertEqual(p.get.path, '/pc')
self.assertEqual(p.get.base_path, '/v1')
def test_schema(self):
""" test patch Schema """
s = self.app.resolve('#/definitions/schema1')
self.assertEqual(s.name, 'schema1')
| {
"content_hash": "a62778368c828deb2c05963d7a890444",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 69,
"avg_line_length": 34.29032258064516,
"alnum_prop": 0.5989338350580119,
"repo_name": "mission-liao/pyswagger",
"id": "abf211436d3fcd2bce62f5162d3b6ed782938d32",
"size": "3189",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pyswagger/tests/v2_0/test_patch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "389129"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
import sklearn.preprocessing
def load_glove(vectors_file, normalize=False):
"""
Load a GloVe formatted file, which is simply of the format
<word_0><space><vec_0,0><space><vec_0,1><space>...<newline>
<word_1><space><vec_1,0><space><vec_1,1><space>...<newline>
...
See https://github.com/stanfordnlp/GloVe for more information.
That link also has information on how to download the pre-trained
word vectorizer models. If the file you download is compressed,
you will need to uncompress it before using this function.
Note that the loading speed and memory usage is highly depdendent
on what model you use. The downloadable model "glove.840B.300d.txt"
will take a few minutes to load and use 2.8 GB of memory, whereas the
model "glove.6B.50d.txt" will take a few seconds and use < 200 MB
of memory.
Sample usage:
>>> vectors = load_glove('tagnews/geoloc/glove.6B.50d.txt')
>>> text = 'This is a sentence and stuff.'
>>> # you should use an actual tokenizer for this step.
>>> vectorized_text = vectors.loc[[word.lower()
... for word in text.split()]]
>>> print(vectorized_text.shape)
(6, 300)
>>> k = 5
>>> import numpy as np
>>> def euc(word):
... return np.sum((vectors.values-vectors.loc[word].values)**2.0, 1)
...
>>> vectors.index[np.argpartition(euc('murder'), range(k))[:k]]
Inputs:
vectors_file: path to file that contains GloVe formatted word
vectors.
normalize: Should the word vectors be normalized? See
https://stats.stackexchange.com/questions/177905/ for
a good discussion on the topic.
Retuns:
vectors: NxM pandas dataframe whose rows are indexed by the word.
"""
with open(vectors_file, 'r', encoding='utf-8') as f:
for vocab_size, line in enumerate(f):
pass
vocab_size += 1
vec_size = len(line.split(' ')) - 1
vectors = np.zeros((vocab_size, vec_size), dtype=np.float32)
words = np.empty(shape=(vocab_size), dtype=np.dtype('object'))
with open(vectors_file, 'r', encoding='utf-8') as f:
for i, line in enumerate(f):
line = line.split(' ')
words[i] = line[0]
vectors[i] = [float(x) for x in line[1:]]
vectors = pd.DataFrame(vectors, index=words, copy=False)
vectors = vectors.loc[~vectors.index.duplicated()]
if normalize:
sklearn.preprocessing.normalize(vectors, copy=False)
return vectors
| {
"content_hash": "4bce15e5fbea7f4e90e7438142078706",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 78,
"avg_line_length": 36.861111111111114,
"alnum_prop": 0.6137905048982668,
"repo_name": "chicago-justice-project/article-tagging",
"id": "ec7d7befcdd0179e2686bbbfa92f5326c960100a",
"size": "2654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/tagnews/utils/load_vectorizer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "513382"
},
{
"name": "Python",
"bytes": "65180"
},
{
"name": "R",
"bytes": "28509"
}
],
"symlink_target": ""
} |
import json
import pathlib
import proto
import re
import shutil
import tempfile
import requests
from typing import (
Any,
Dict,
List,
NamedTuple,
Optional,
Sequence,
Tuple,
TYPE_CHECKING,
Union,
)
from google.api_core import operation
from google.api_core import exceptions as api_exceptions
from google.auth import credentials as auth_credentials
from google.auth.transport import requests as google_auth_requests
from google.cloud import aiplatform
from google.cloud.aiplatform import base
from google.cloud.aiplatform import constants
from google.cloud.aiplatform import explain
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform import jobs
from google.cloud.aiplatform import models
from google.cloud.aiplatform import utils
from google.cloud.aiplatform.utils import gcs_utils
from google.cloud.aiplatform import model_evaluation
from google.cloud.aiplatform.compat.services import endpoint_service_client
from google.cloud.aiplatform.compat.types import (
encryption_spec as gca_encryption_spec,
endpoint as gca_endpoint_compat,
explanation as gca_explanation_compat,
io as gca_io_compat,
machine_resources as gca_machine_resources_compat,
model as gca_model_compat,
model_service as gca_model_service_compat,
env_var as gca_env_var_compat,
)
from google.cloud.aiplatform.constants import (
prediction as prediction_constants,
)
from google.protobuf import field_mask_pb2, timestamp_pb2
from google.protobuf import json_format
if TYPE_CHECKING:
from google.cloud.aiplatform.prediction import LocalModel
_DEFAULT_MACHINE_TYPE = "n1-standard-2"
_DEPLOYING_MODEL_TRAFFIC_SPLIT_KEY = "0"
_SUCCESSFUL_HTTP_RESPONSE = 300
_RAW_PREDICT_DEPLOYED_MODEL_ID_KEY = "X-Vertex-AI-Deployed-Model-Id"
_RAW_PREDICT_MODEL_RESOURCE_KEY = "X-Vertex-AI-Model"
_RAW_PREDICT_MODEL_VERSION_ID_KEY = "X-Vertex-AI-Model-Version-Id"
_LOGGER = base.Logger(__name__)
_SUPPORTED_MODEL_FILE_NAMES = [
"model.pkl",
"model.joblib",
"model.bst",
"saved_model.pb",
"saved_model.pbtxt",
]
class VersionInfo(NamedTuple):
"""VersionInfo class envelopes returned Model version information.
Attributes:
version_id:
The version ID of the model.
create_time:
Timestamp when this Model version was uploaded into Vertex AI.
update_time:
Timestamp when this Model version was most recently updated.
model_display_name:
The user-defined name of the model this version belongs to.
model_resource_name:
The fully-qualified model resource name.
e.g. projects/{project}/locations/{location}/models/{model_display_name}
version_aliases:
User provided version aliases so that a model version can be referenced via
alias (i.e. projects/{project}/locations/{location}/models/{model_display_name}@{version_alias}).
Default is None.
version_description:
The description of this version.
Default is None.
"""
version_id: str
version_create_time: timestamp_pb2.Timestamp
version_update_time: timestamp_pb2.Timestamp
model_display_name: str
model_resource_name: str
version_aliases: Optional[Sequence[str]] = None
version_description: Optional[str] = None
class Prediction(NamedTuple):
"""Prediction class envelopes returned Model predictions and the Model id.
Attributes:
predictions:
The predictions that are the output of the predictions
call. The schema of any single prediction may be specified via
Endpoint's DeployedModels' [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model]
[PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
deployed_model_id:
ID of the Endpoint's DeployedModel that served this prediction.
model_version_id:
ID of the DeployedModel's version that served this prediction.
model_resource_name:
The fully-qualified resource name of the model that served this prediction.
explanations:
The explanations of the Model's predictions. It has the same number
of elements as instances to be explained. Default is None.
"""
predictions: List[Dict[str, Any]]
deployed_model_id: str
model_version_id: Optional[str] = None
model_resource_name: Optional[str] = None
explanations: Optional[Sequence[gca_explanation_compat.Explanation]] = None
class Endpoint(base.VertexAiResourceNounWithFutureManager):
client_class = utils.EndpointClientWithOverride
_resource_noun = "endpoints"
_getter_method = "get_endpoint"
_list_method = "list_endpoints"
_delete_method = "delete_endpoint"
_parse_resource_name_method = "parse_endpoint_path"
_format_resource_name_method = "endpoint_path"
def __init__(
self,
endpoint_name: str,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
):
"""Retrieves an endpoint resource.
Args:
endpoint_name (str):
Required. A fully-qualified endpoint resource name or endpoint ID.
Example: "projects/123/locations/us-central1/endpoints/456" or
"456" when project and location are initialized or passed.
project (str):
Optional. Project to retrieve endpoint from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional. Location to retrieve endpoint from. If not set, location
set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to upload this model. Overrides
credentials set in aiplatform.init.
"""
super().__init__(
project=project,
location=location,
credentials=credentials,
resource_name=endpoint_name,
)
endpoint_name = utils.full_resource_name(
resource_name=endpoint_name,
resource_noun="endpoints",
parse_resource_name_method=self._parse_resource_name,
format_resource_name_method=self._format_resource_name,
project=project,
location=location,
)
# Lazy load the Endpoint gca_resource until needed
self._gca_resource = gca_endpoint_compat.Endpoint(name=endpoint_name)
self._prediction_client = self._instantiate_prediction_client(
location=self.location,
credentials=credentials,
)
self.authorized_session = None
self.raw_predict_request_url = None
def _skipped_getter_call(self) -> bool:
"""Check if GAPIC resource was populated by call to get/list API methods
Returns False if `_gca_resource` is None or fully populated. Returns True
if `_gca_resource` is partially populated
"""
return self._gca_resource and not self._gca_resource.create_time
def _sync_gca_resource_if_skipped(self) -> None:
"""Sync GAPIC service representation of Endpoint class resource only if
get_endpoint() was never called."""
if self._skipped_getter_call():
self._gca_resource = self._get_gca_resource(
resource_name=self._gca_resource.name
)
def _assert_gca_resource_is_available(self) -> None:
"""Ensures Endpoint getter was called at least once before
asserting on gca_resource's availability."""
super()._assert_gca_resource_is_available()
self._sync_gca_resource_if_skipped()
@property
def traffic_split(self) -> Dict[str, int]:
"""A map from a DeployedModel's ID to the percentage of this Endpoint's
traffic that should be forwarded to that DeployedModel.
If a DeployedModel's ID is not listed in this map, then it receives no traffic.
The traffic percentage values must add up to 100, or map must be empty if
the Endpoint is to not accept any traffic at a moment.
"""
self._sync_gca_resource()
return dict(self._gca_resource.traffic_split)
@property
def network(self) -> Optional[str]:
"""The full name of the Google Compute Engine
[network](https://cloud.google.com/vpc/docs/vpc#networks) to which this
Endpoint should be peered.
Takes the format `projects/{project}/global/networks/{network}`. Where
{project} is a project number, as in `12345`, and {network} is a network name.
Private services access must already be configured for the network. If left
unspecified, the Endpoint is not peered with any network.
"""
self._assert_gca_resource_is_available()
return getattr(self._gca_resource, "network", None)
@classmethod
def create(
cls,
display_name: Optional[str] = None,
description: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = (),
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
encryption_spec_key_name: Optional[str] = None,
sync=True,
create_request_timeout: Optional[float] = None,
endpoint_id: Optional[str] = None,
) -> "Endpoint":
"""Creates a new endpoint.
Args:
display_name (str):
Optional. The user-defined name of the Endpoint.
The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
description (str):
Optional. The description of the Endpoint.
labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Endpoints.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
metadata (Sequence[Tuple[str, str]]):
Optional. Strings which should be sent along with the request as
metadata.
project (str):
Required. Project to retrieve endpoint from. If not set, project
set in aiplatform.init will be used.
location (str):
Required. Location to retrieve endpoint from. If not set, location
set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to upload this model. Overrides
credentials set in aiplatform.init.
encryption_spec_key_name (str):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
endpoint_id (str):
Optional. The ID to use for endpoint, which will become
the final component of the endpoint resource name. If
not provided, Vertex AI will generate a value for this
ID.
This value should be 1-10 characters, and valid
characters are /[0-9]/. When using HTTP/JSON, this field
is populated based on a query string argument, such as
``?endpoint_id=12345``. This is the fallback for fields
that are not included in either the URI or the body.
Returns:
endpoint (aiplatform.Endpoint):
Created endpoint.
"""
api_client = cls._instantiate_client(location=location, credentials=credentials)
if not display_name:
display_name = cls._generate_display_name()
utils.validate_display_name(display_name)
if labels:
utils.validate_labels(labels)
project = project or initializer.global_config.project
location = location or initializer.global_config.location
return cls._create(
api_client=api_client,
display_name=display_name,
project=project,
location=location,
description=description,
labels=labels,
metadata=metadata,
credentials=credentials,
encryption_spec=initializer.global_config.get_encryption_spec(
encryption_spec_key_name=encryption_spec_key_name
),
sync=sync,
create_request_timeout=create_request_timeout,
endpoint_id=endpoint_id,
)
@classmethod
@base.optional_sync()
def _create(
cls,
api_client: endpoint_service_client.EndpointServiceClient,
display_name: str,
project: str,
location: str,
description: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = (),
credentials: Optional[auth_credentials.Credentials] = None,
encryption_spec: Optional[gca_encryption_spec.EncryptionSpec] = None,
network: Optional[str] = None,
sync=True,
create_request_timeout: Optional[float] = None,
endpoint_id: Optional[str] = None,
) -> "Endpoint":
"""Creates a new endpoint by calling the API client.
Args:
api_client (EndpointServiceClient):
Required. An instance of EndpointServiceClient with the correct
api_endpoint already set based on user's preferences.
display_name (str):
Required. The user-defined name of the Endpoint.
The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
project (str):
Required. Project to retrieve endpoint from. If not set, project
set in aiplatform.init will be used.
location (str):
Required. Location to retrieve endpoint from. If not set, location
set in aiplatform.init will be used.
description (str):
Optional. The description of the Endpoint.
labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Endpoints.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
metadata (Sequence[Tuple[str, str]]):
Optional. Strings which should be sent along with the request as
metadata.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to upload this model. Overrides
credentials set in aiplatform.init.
encryption_spec (gca_encryption_spec.EncryptionSpec):
Optional. The Cloud KMS customer managed encryption key used to protect the dataset.
The key needs to be in the same region as where the compute
resource is created.
If set, this Dataset and all sub-resources of this Dataset will be secured by this key.
network (str):
Optional. The full name of the Compute Engine network to which
this Endpoint will be peered. E.g. "projects/12345/global/networks/myVPC".
Private services access must already be configured for the network.
Read more about PrivateEndpoints
[in the documentation](https://cloud.google.com/vertex-ai/docs/predictions/using-private-endpoints).
sync (bool):
Whether to create this endpoint synchronously.
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
endpoint_id (str):
Optional. The ID to use for endpoint, which will become
the final component of the endpoint resource name. If
not provided, Vertex AI will generate a value for this
ID.
This value should be 1-10 characters, and valid
characters are /[0-9]/. When using HTTP/JSON, this field
is populated based on a query string argument, such as
``?endpoint_id=12345``. This is the fallback for fields
that are not included in either the URI or the body.
Returns:
endpoint (aiplatform.Endpoint):
Created endpoint.
"""
parent = initializer.global_config.common_location_path(
project=project, location=location
)
gapic_endpoint = gca_endpoint_compat.Endpoint(
display_name=display_name,
description=description,
labels=labels,
encryption_spec=encryption_spec,
network=network,
)
operation_future = api_client.create_endpoint(
parent=parent,
endpoint=gapic_endpoint,
endpoint_id=endpoint_id,
metadata=metadata,
timeout=create_request_timeout,
)
_LOGGER.log_create_with_lro(cls, operation_future)
created_endpoint = operation_future.result()
_LOGGER.log_create_complete(cls, created_endpoint, "endpoint")
return cls._construct_sdk_resource_from_gapic(
gapic_resource=created_endpoint,
project=project,
location=location,
credentials=credentials,
)
@classmethod
def _construct_sdk_resource_from_gapic(
cls,
gapic_resource: proto.Message,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> "Endpoint":
"""Given a GAPIC Endpoint object, return the SDK representation.
Args:
gapic_resource (proto.Message):
A GAPIC representation of a Endpoint resource, usually
retrieved by a get_* or in a list_* API call.
project (str):
Optional. Project to construct Endpoint object from. If not set,
project set in aiplatform.init will be used.
location (str):
Optional. Location to construct Endpoint object from. If not set,
location set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to construct Endpoint.
Overrides credentials set in aiplatform.init.
Returns:
Endpoint (aiplatform.Endpoint):
An initialized Endpoint resource.
"""
endpoint = cls._empty_constructor(
project=project, location=location, credentials=credentials
)
endpoint._gca_resource = gapic_resource
endpoint._prediction_client = cls._instantiate_prediction_client(
location=endpoint.location,
credentials=credentials,
)
return endpoint
@staticmethod
def _allocate_traffic(
traffic_split: Dict[str, int],
traffic_percentage: int,
) -> Dict[str, int]:
"""Allocates desired traffic to new deployed model and scales traffic
of older deployed models.
Args:
traffic_split (Dict[str, int]):
Required. Current traffic split of deployed models in endpoint.
traffic_percentage (int):
Required. Desired traffic to new deployed model.
Returns:
new_traffic_split (Dict[str, int]):
Traffic split to use.
"""
new_traffic_split = {}
old_models_traffic = 100 - traffic_percentage
if old_models_traffic:
unallocated_traffic = old_models_traffic
for deployed_model in traffic_split:
current_traffic = traffic_split[deployed_model]
new_traffic = int(current_traffic / 100 * old_models_traffic)
new_traffic_split[deployed_model] = new_traffic
unallocated_traffic -= new_traffic
# will likely under-allocate. make total 100.
for deployed_model in new_traffic_split:
if unallocated_traffic == 0:
break
new_traffic_split[deployed_model] += 1
unallocated_traffic -= 1
new_traffic_split[_DEPLOYING_MODEL_TRAFFIC_SPLIT_KEY] = traffic_percentage
return new_traffic_split
@staticmethod
def _unallocate_traffic(
traffic_split: Dict[str, int],
deployed_model_id: str,
) -> Dict[str, int]:
"""Sets deployed model id's traffic to 0 and scales the traffic of
other deployed models.
Args:
traffic_split (Dict[str, int]):
Required. Current traffic split of deployed models in endpoint.
deployed_model_id (str):
Required. Desired traffic to new deployed model.
Returns:
new_traffic_split (Dict[str, int]):
Traffic split to use.
"""
new_traffic_split = traffic_split.copy()
del new_traffic_split[deployed_model_id]
deployed_model_id_traffic = traffic_split[deployed_model_id]
traffic_percent_left = 100 - deployed_model_id_traffic
if traffic_percent_left:
unallocated_traffic = 100
for deployed_model in new_traffic_split:
current_traffic = traffic_split[deployed_model]
new_traffic = int(current_traffic / traffic_percent_left * 100)
new_traffic_split[deployed_model] = new_traffic
unallocated_traffic -= new_traffic
# will likely under-allocate. make total 100.
for deployed_model in new_traffic_split:
if unallocated_traffic == 0:
break
new_traffic_split[deployed_model] += 1
unallocated_traffic -= 1
new_traffic_split[deployed_model_id] = 0
return new_traffic_split
@staticmethod
def _validate_deploy_args(
min_replica_count: int,
max_replica_count: int,
accelerator_type: Optional[str],
deployed_model_display_name: Optional[str],
traffic_split: Optional[Dict[str, int]],
traffic_percentage: Optional[int],
explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None,
explanation_parameters: Optional[
aiplatform.explain.ExplanationParameters
] = None,
):
"""Helper method to validate deploy arguments.
Args:
min_replica_count (int):
Required. The minimum number of machine replicas this deployed
model will be always deployed on. If traffic against it increases,
it may dynamically be deployed onto more replicas, and as traffic
decreases, some of these extra replicas may be freed.
max_replica_count (int):
Required. The maximum number of replicas this deployed model may
be deployed on when the traffic against it increases. If requested
value is too large, the deployment will error, but if deployment
succeeds then the ability to scale the model to that many replicas
is guaranteed (barring service outages). If traffic against the
deployed model increases beyond what its replicas at maximum may
handle, a portion of the traffic will be dropped. If this value
is not provided, the larger value of min_replica_count or 1 will
be used. If value provided is smaller than min_replica_count, it
will automatically be increased to be min_replica_count.
accelerator_type (str):
Required. Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED,
NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4,
NVIDIA_TESLA_T4
deployed_model_display_name (str):
Required. The display name of the DeployedModel. If not provided
upon creation, the Model's display_name is used.
traffic_split (Dict[str, int]):
Optional. A map from a DeployedModel's ID to the percentage of
this Endpoint's traffic that should be forwarded to that DeployedModel.
If a DeployedModel's ID is not listed in this map, then it receives
no traffic. The traffic percentage values must add up to 100, or
map must be empty if the Endpoint is to not accept any traffic at
the moment. Key for model being deployed is "0". Should not be
provided if traffic_percentage is provided.
traffic_percentage (int):
Optional. Desired traffic to newly deployed model. Defaults to
0 if there are pre-existing deployed models. Defaults to 100 if
there are no pre-existing deployed models. Negative values should
not be provided. Traffic of previously deployed models at the endpoint
will be scaled down to accommodate new deployed model's traffic.
Should not be provided if traffic_split is provided.
explanation_metadata (aiplatform.explain.ExplanationMetadata):
Optional. Metadata describing the Model's input and output for explanation.
`explanation_metadata` is optional while `explanation_parameters` must be
specified when used.
For more details, see `Ref docs <http://tinyurl.com/1igh60kt>`
explanation_parameters (aiplatform.explain.ExplanationParameters):
Optional. Parameters to configure explaining for Model's predictions.
For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
Raises:
ValueError: if Min or Max replica is negative. Traffic percentage > 100 or
< 0. Or if traffic_split does not sum to 100.
ValueError: if explanation_metadata is specified while explanation_parameters
is not.
"""
if min_replica_count < 0:
raise ValueError("Min replica cannot be negative.")
if max_replica_count < 0:
raise ValueError("Max replica cannot be negative.")
if deployed_model_display_name is not None:
utils.validate_display_name(deployed_model_display_name)
if traffic_split is None:
if traffic_percentage > 100:
raise ValueError("Traffic percentage cannot be greater than 100.")
if traffic_percentage < 0:
raise ValueError("Traffic percentage cannot be negative.")
elif traffic_split:
if sum(traffic_split.values()) != 100:
raise ValueError(
"Sum of all traffic within traffic split needs to be 100."
)
if bool(explanation_metadata) and not bool(explanation_parameters):
raise ValueError(
"To get model explanation, `explanation_parameters` must be specified."
)
# Raises ValueError if invalid accelerator
if accelerator_type:
utils.validate_accelerator_type(accelerator_type)
def deploy(
self,
model: "Model",
deployed_model_display_name: Optional[str] = None,
traffic_percentage: int = 0,
traffic_split: Optional[Dict[str, int]] = None,
machine_type: Optional[str] = None,
min_replica_count: int = 1,
max_replica_count: int = 1,
accelerator_type: Optional[str] = None,
accelerator_count: Optional[int] = None,
service_account: Optional[str] = None,
explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None,
explanation_parameters: Optional[
aiplatform.explain.ExplanationParameters
] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = (),
sync=True,
deploy_request_timeout: Optional[float] = None,
autoscaling_target_cpu_utilization: Optional[int] = None,
autoscaling_target_accelerator_duty_cycle: Optional[int] = None,
) -> None:
"""Deploys a Model to the Endpoint.
Args:
model (aiplatform.Model):
Required. Model to be deployed.
deployed_model_display_name (str):
Optional. The display name of the DeployedModel. If not provided
upon creation, the Model's display_name is used.
traffic_percentage (int):
Optional. Desired traffic to newly deployed model. Defaults to
0 if there are pre-existing deployed models. Defaults to 100 if
there are no pre-existing deployed models. Negative values should
not be provided. Traffic of previously deployed models at the endpoint
will be scaled down to accommodate new deployed model's traffic.
Should not be provided if traffic_split is provided.
traffic_split (Dict[str, int]):
Optional. A map from a DeployedModel's ID to the percentage of
this Endpoint's traffic that should be forwarded to that DeployedModel.
If a DeployedModel's ID is not listed in this map, then it receives
no traffic. The traffic percentage values must add up to 100, or
map must be empty if the Endpoint is to not accept any traffic at
the moment. Key for model being deployed is "0". Should not be
provided if traffic_percentage is provided.
machine_type (str):
Optional. The type of machine. Not specifying machine type will
result in model to be deployed with automatic resources.
min_replica_count (int):
Optional. The minimum number of machine replicas this deployed
model will be always deployed on. If traffic against it increases,
it may dynamically be deployed onto more replicas, and as traffic
decreases, some of these extra replicas may be freed.
max_replica_count (int):
Optional. The maximum number of replicas this deployed model may
be deployed on when the traffic against it increases. If requested
value is too large, the deployment will error, but if deployment
succeeds then the ability to scale the model to that many replicas
is guaranteed (barring service outages). If traffic against the
deployed model increases beyond what its replicas at maximum may
handle, a portion of the traffic will be dropped. If this value
is not provided, the larger value of min_replica_count or 1 will
be used. If value provided is smaller than min_replica_count, it
will automatically be increased to be min_replica_count.
accelerator_type (str):
Optional. Hardware accelerator type. Must also set accelerator_count if used.
One of ACCELERATOR_TYPE_UNSPECIFIED, NVIDIA_TESLA_K80, NVIDIA_TESLA_P100,
NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, NVIDIA_TESLA_T4
accelerator_count (int):
Optional. The number of accelerators to attach to a worker replica.
service_account (str):
The service account that the DeployedModel's container runs as. Specify the
email address of the service account. If this service account is not
specified, the container runs as a service account that doesn't have access
to the resource project.
Users deploying the Model must have the `iam.serviceAccounts.actAs`
permission on this service account.
explanation_metadata (aiplatform.explain.ExplanationMetadata):
Optional. Metadata describing the Model's input and output for explanation.
`explanation_metadata` is optional while `explanation_parameters` must be
specified when used.
For more details, see `Ref docs <http://tinyurl.com/1igh60kt>`
explanation_parameters (aiplatform.explain.ExplanationParameters):
Optional. Parameters to configure explaining for Model's predictions.
For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
metadata (Sequence[Tuple[str, str]]):
Optional. Strings which should be sent along with the request as
metadata.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
deploy_request_timeout (float):
Optional. The timeout for the deploy request in seconds.
autoscaling_target_cpu_utilization (int):
Target CPU Utilization to use for Autoscaling Replicas.
A default value of 60 will be used if not specified.
autoscaling_target_accelerator_duty_cycle (int):
Target Accelerator Duty Cycle.
Must also set accelerator_type and accelerator_count if specified.
A default value of 60 will be used if not specified.
"""
self._sync_gca_resource_if_skipped()
self._validate_deploy_args(
min_replica_count=min_replica_count,
max_replica_count=max_replica_count,
accelerator_type=accelerator_type,
deployed_model_display_name=deployed_model_display_name,
traffic_split=traffic_split,
traffic_percentage=traffic_percentage,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
)
self._deploy(
model=model,
deployed_model_display_name=deployed_model_display_name,
traffic_percentage=traffic_percentage,
traffic_split=traffic_split,
machine_type=machine_type,
min_replica_count=min_replica_count,
max_replica_count=max_replica_count,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
service_account=service_account,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
metadata=metadata,
sync=sync,
deploy_request_timeout=deploy_request_timeout,
autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization,
autoscaling_target_accelerator_duty_cycle=autoscaling_target_accelerator_duty_cycle,
)
@base.optional_sync()
def _deploy(
self,
model: "Model",
deployed_model_display_name: Optional[str] = None,
traffic_percentage: Optional[int] = 0,
traffic_split: Optional[Dict[str, int]] = None,
machine_type: Optional[str] = None,
min_replica_count: int = 1,
max_replica_count: int = 1,
accelerator_type: Optional[str] = None,
accelerator_count: Optional[int] = None,
service_account: Optional[str] = None,
explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None,
explanation_parameters: Optional[
aiplatform.explain.ExplanationParameters
] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = (),
sync=True,
deploy_request_timeout: Optional[float] = None,
autoscaling_target_cpu_utilization: Optional[int] = None,
autoscaling_target_accelerator_duty_cycle: Optional[int] = None,
) -> None:
"""Deploys a Model to the Endpoint.
Args:
model (aiplatform.Model):
Required. Model to be deployed.
deployed_model_display_name (str):
Optional. The display name of the DeployedModel. If not provided
upon creation, the Model's display_name is used.
traffic_percentage (int):
Optional. Desired traffic to newly deployed model. Defaults to
0 if there are pre-existing deployed models. Defaults to 100 if
there are no pre-existing deployed models. Negative values should
not be provided. Traffic of previously deployed models at the endpoint
will be scaled down to accommodate new deployed model's traffic.
Should not be provided if traffic_split is provided.
traffic_split (Dict[str, int]):
Optional. A map from a DeployedModel's ID to the percentage of
this Endpoint's traffic that should be forwarded to that DeployedModel.
If a DeployedModel's ID is not listed in this map, then it receives
no traffic. The traffic percentage values must add up to 100, or
map must be empty if the Endpoint is to not accept any traffic at
the moment. Key for model being deployed is "0". Should not be
provided if traffic_percentage is provided.
machine_type (str):
Optional. The type of machine. Not specifying machine type will
result in model to be deployed with automatic resources.
min_replica_count (int):
Optional. The minimum number of machine replicas this deployed
model will be always deployed on. If traffic against it increases,
it may dynamically be deployed onto more replicas, and as traffic
decreases, some of these extra replicas may be freed.
max_replica_count (int):
Optional. The maximum number of replicas this deployed model may
be deployed on when the traffic against it increases. If requested
value is too large, the deployment will error, but if deployment
succeeds then the ability to scale the model to that many replicas
is guaranteed (barring service outages). If traffic against the
deployed model increases beyond what its replicas at maximum may
handle, a portion of the traffic will be dropped. If this value
is not provided, the larger value of min_replica_count or 1 will
be used. If value provided is smaller than min_replica_count, it
will automatically be increased to be min_replica_count.
accelerator_type (str):
Optional. Hardware accelerator type. Must also set accelerator_count if used.
One of ACCELERATOR_TYPE_UNSPECIFIED, NVIDIA_TESLA_K80, NVIDIA_TESLA_P100,
NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, NVIDIA_TESLA_T4
accelerator_count (int):
Optional. The number of accelerators to attach to a worker replica.
service_account (str):
The service account that the DeployedModel's container runs as. Specify the
email address of the service account. If this service account is not
specified, the container runs as a service account that doesn't have access
to the resource project.
Users deploying the Model must have the `iam.serviceAccounts.actAs`
permission on this service account.
explanation_metadata (aiplatform.explain.ExplanationMetadata):
Optional. Metadata describing the Model's input and output for explanation.
`explanation_metadata` is optional while `explanation_parameters` must be
specified when used.
For more details, see `Ref docs <http://tinyurl.com/1igh60kt>`
explanation_parameters (aiplatform.explain.ExplanationParameters):
Optional. Parameters to configure explaining for Model's predictions.
For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
metadata (Sequence[Tuple[str, str]]):
Optional. Strings which should be sent along with the request as
metadata.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
deploy_request_timeout (float):
Optional. The timeout for the deploy request in seconds.
autoscaling_target_cpu_utilization (int):
Target CPU Utilization to use for Autoscaling Replicas.
A default value of 60 will be used if not specified.
autoscaling_target_accelerator_duty_cycle (int):
Target Accelerator Duty Cycle.
Must also set accelerator_type and accelerator_count if specified.
A default value of 60 will be used if not specified.
"""
_LOGGER.log_action_start_against_resource(
f"Deploying Model {model.resource_name} to", "", self
)
self._deploy_call(
api_client=self.api_client,
endpoint_resource_name=self.resource_name,
model=model,
endpoint_resource_traffic_split=self._gca_resource.traffic_split,
network=self.network,
deployed_model_display_name=deployed_model_display_name,
traffic_percentage=traffic_percentage,
traffic_split=traffic_split,
machine_type=machine_type,
min_replica_count=min_replica_count,
max_replica_count=max_replica_count,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
service_account=service_account,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
metadata=metadata,
deploy_request_timeout=deploy_request_timeout,
autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization,
autoscaling_target_accelerator_duty_cycle=autoscaling_target_accelerator_duty_cycle,
)
_LOGGER.log_action_completed_against_resource("model", "deployed", self)
self._sync_gca_resource()
@classmethod
def _deploy_call(
cls,
api_client: endpoint_service_client.EndpointServiceClient,
endpoint_resource_name: str,
model: "Model",
endpoint_resource_traffic_split: Optional[proto.MapField] = None,
network: Optional[str] = None,
deployed_model_display_name: Optional[str] = None,
traffic_percentage: Optional[int] = 0,
traffic_split: Optional[Dict[str, int]] = None,
machine_type: Optional[str] = None,
min_replica_count: int = 1,
max_replica_count: int = 1,
accelerator_type: Optional[str] = None,
accelerator_count: Optional[int] = None,
service_account: Optional[str] = None,
explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None,
explanation_parameters: Optional[
aiplatform.explain.ExplanationParameters
] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = (),
deploy_request_timeout: Optional[float] = None,
autoscaling_target_cpu_utilization: Optional[int] = None,
autoscaling_target_accelerator_duty_cycle: Optional[int] = None,
):
"""Helper method to deploy model to endpoint.
Args:
api_client (endpoint_service_client.EndpointServiceClient):
Required. endpoint_service_client.EndpointServiceClient to make call.
endpoint_resource_name (str):
Required. Endpoint resource name to deploy model to.
model (aiplatform.Model):
Required. Model to be deployed.
endpoint_resource_traffic_split (proto.MapField):
Optional. Endpoint current resource traffic split.
network (str):
Optional. The full name of the Compute Engine network to which
this Endpoint will be peered. E.g. "projects/123/global/networks/my_vpc".
Private services access must already be configured for the network.
deployed_model_display_name (str):
Optional. The display name of the DeployedModel. If not provided
upon creation, the Model's display_name is used.
traffic_percentage (int):
Optional. Desired traffic to newly deployed model. Defaults to
0 if there are pre-existing deployed models. Defaults to 100 if
there are no pre-existing deployed models. Negative values should
not be provided. Traffic of previously deployed models at the endpoint
will be scaled down to accommodate new deployed model's traffic.
Should not be provided if traffic_split is provided.
traffic_split (Dict[str, int]):
Optional. A map from a DeployedModel's ID to the percentage of
this Endpoint's traffic that should be forwarded to that DeployedModel.
If a DeployedModel's ID is not listed in this map, then it receives
no traffic. The traffic percentage values must add up to 100, or
map must be empty if the Endpoint is to not accept any traffic at
the moment. Key for model being deployed is "0". Should not be
provided if traffic_percentage is provided.
machine_type (str):
Optional. The type of machine. Not specifying machine type will
result in model to be deployed with automatic resources.
min_replica_count (int):
Optional. The minimum number of machine replicas this deployed
model will be always deployed on. If traffic against it increases,
it may dynamically be deployed onto more replicas, and as traffic
decreases, some of these extra replicas may be freed.
max_replica_count (int):
Optional. The maximum number of replicas this deployed model may
be deployed on when the traffic against it increases. If requested
value is too large, the deployment will error, but if deployment
succeeds then the ability to scale the model to that many replicas
is guaranteed (barring service outages). If traffic against the
deployed model increases beyond what its replicas at maximum may
handle, a portion of the traffic will be dropped. If this value
is not provided, the larger value of min_replica_count or 1 will
be used. If value provided is smaller than min_replica_count, it
will automatically be increased to be min_replica_count.
accelerator_type (str):
Optional. Hardware accelerator type. Must also set accelerator_count if used.
One of ACCELERATOR_TYPE_UNSPECIFIED, NVIDIA_TESLA_K80, NVIDIA_TESLA_P100,
NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, NVIDIA_TESLA_T4
accelerator_count (int):
Optional. The number of accelerators to attach to a worker replica.
service_account (str):
The service account that the DeployedModel's container runs as. Specify the
email address of the service account. If this service account is not
specified, the container runs as a service account that doesn't have access
to the resource project.
Users deploying the Model must have the `iam.serviceAccounts.actAs`
permission on this service account.
explanation_metadata (aiplatform.explain.ExplanationMetadata):
Optional. Metadata describing the Model's input and output for explanation.
`explanation_metadata` is optional while `explanation_parameters` must be
specified when used.
For more details, see `Ref docs <http://tinyurl.com/1igh60kt>`
explanation_parameters (aiplatform.explain.ExplanationParameters):
Optional. Parameters to configure explaining for Model's predictions.
For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
metadata (Sequence[Tuple[str, str]]):
Optional. Strings which should be sent along with the request as
metadata.
deploy_request_timeout (float):
Optional. The timeout for the deploy request in seconds.
autoscaling_target_cpu_utilization (int):
Optional. Target CPU Utilization to use for Autoscaling Replicas.
A default value of 60 will be used if not specified.
autoscaling_target_accelerator_duty_cycle (int):
Optional. Target Accelerator Duty Cycle.
Must also set accelerator_type and accelerator_count if specified.
A default value of 60 will be used if not specified.
Raises:
ValueError: If only `accelerator_type` or `accelerator_count` is specified.
ValueError: If model does not support deployment.
ValueError: If there is not current traffic split and traffic percentage
is not 0 or 100.
"""
max_replica_count = max(min_replica_count, max_replica_count)
if bool(accelerator_type) != bool(accelerator_count):
raise ValueError(
"Both `accelerator_type` and `accelerator_count` should be specified or None."
)
if autoscaling_target_accelerator_duty_cycle is not None and (
not accelerator_type or not accelerator_count
):
raise ValueError(
"Both `accelerator_type` and `accelerator_count` should be set "
"when specifying autoscaling_target_accelerator_duty_cycle`"
)
deployed_model = gca_endpoint_compat.DeployedModel(
model=model.versioned_resource_name,
display_name=deployed_model_display_name,
service_account=service_account,
)
supports_automatic_resources = (
gca_model_compat.Model.DeploymentResourcesType.AUTOMATIC_RESOURCES
in model.supported_deployment_resources_types
)
supports_dedicated_resources = (
gca_model_compat.Model.DeploymentResourcesType.DEDICATED_RESOURCES
in model.supported_deployment_resources_types
)
provided_custom_machine_spec = (
machine_type
or accelerator_type
or accelerator_count
or autoscaling_target_accelerator_duty_cycle
or autoscaling_target_cpu_utilization
)
# If the model supports both automatic and dedicated deployment resources,
# decide based on the presence of machine spec customizations
use_dedicated_resources = supports_dedicated_resources and (
not supports_automatic_resources or provided_custom_machine_spec
)
if provided_custom_machine_spec and not use_dedicated_resources:
_LOGGER.info(
"Model does not support dedicated deployment resources. "
"The machine_type, accelerator_type and accelerator_count,"
"autoscaling_target_accelerator_duty_cycle,"
"autoscaling_target_cpu_utilization parameters are ignored."
)
if use_dedicated_resources and not machine_type:
machine_type = _DEFAULT_MACHINE_TYPE
_LOGGER.info(f"Using default machine_type: {machine_type}")
if use_dedicated_resources:
dedicated_resources = gca_machine_resources_compat.DedicatedResources(
min_replica_count=min_replica_count,
max_replica_count=max_replica_count,
)
machine_spec = gca_machine_resources_compat.MachineSpec(
machine_type=machine_type
)
if autoscaling_target_cpu_utilization:
autoscaling_metric_spec = gca_machine_resources_compat.AutoscalingMetricSpec(
metric_name="aiplatform.googleapis.com/prediction/online/cpu/utilization",
target=autoscaling_target_cpu_utilization,
)
dedicated_resources.autoscaling_metric_specs.extend(
[autoscaling_metric_spec]
)
if accelerator_type and accelerator_count:
utils.validate_accelerator_type(accelerator_type)
machine_spec.accelerator_type = accelerator_type
machine_spec.accelerator_count = accelerator_count
if autoscaling_target_accelerator_duty_cycle:
autoscaling_metric_spec = gca_machine_resources_compat.AutoscalingMetricSpec(
metric_name="aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle",
target=autoscaling_target_accelerator_duty_cycle,
)
dedicated_resources.autoscaling_metric_specs.extend(
[autoscaling_metric_spec]
)
dedicated_resources.machine_spec = machine_spec
deployed_model.dedicated_resources = dedicated_resources
elif supports_automatic_resources:
deployed_model.automatic_resources = (
gca_machine_resources_compat.AutomaticResources(
min_replica_count=min_replica_count,
max_replica_count=max_replica_count,
)
)
else:
raise ValueError(
"Model does not support deployment. "
"See https://cloud.google.com/vertex-ai/docs/reference/rpc/google.cloud.aiplatform.v1#google.cloud.aiplatform.v1.Model.FIELDS.repeated.google.cloud.aiplatform.v1.Model.DeploymentResourcesType.google.cloud.aiplatform.v1.Model.supported_deployment_resources_types"
)
# Service will throw error if explanation_parameters is not provided
if explanation_parameters:
explanation_spec = gca_endpoint_compat.explanation.ExplanationSpec()
explanation_spec.parameters = explanation_parameters
if explanation_metadata:
explanation_spec.metadata = explanation_metadata
deployed_model.explanation_spec = explanation_spec
# Checking if traffic percentage is valid
# TODO(b/221059294) PrivateEndpoint should support traffic split
if traffic_split is None and not network:
# new model traffic needs to be 100 if no pre-existing models
if not endpoint_resource_traffic_split:
# default scenario
if traffic_percentage == 0:
traffic_percentage = 100
# verify user specified 100
elif traffic_percentage < 100:
raise ValueError(
"""There are currently no deployed models so the traffic
percentage for this deployed model needs to be 100."""
)
traffic_split = cls._allocate_traffic(
traffic_split=dict(endpoint_resource_traffic_split),
traffic_percentage=traffic_percentage,
)
operation_future = api_client.deploy_model(
endpoint=endpoint_resource_name,
deployed_model=deployed_model,
traffic_split=traffic_split,
metadata=metadata,
timeout=deploy_request_timeout,
)
_LOGGER.log_action_started_against_resource_with_lro(
"Deploy", "model", cls, operation_future
)
operation_future.result()
def undeploy(
self,
deployed_model_id: str,
traffic_split: Optional[Dict[str, int]] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = (),
sync=True,
) -> None:
"""Undeploys a deployed model.
The model to be undeployed should have no traffic or user must provide
a new traffic_split with the remaining deployed models. Refer
to `Endpoint.traffic_split` for the current traffic split mapping.
Args:
deployed_model_id (str):
Required. The ID of the DeployedModel to be undeployed from the
Endpoint.
traffic_split (Dict[str, int]):
Optional. A map of DeployedModel IDs to the percentage of
this Endpoint's traffic that should be forwarded to that DeployedModel.
Required if undeploying a model with non-zero traffic from an Endpoint
with multiple deployed models. The traffic percentage values must add
up to 100, or map must be empty if the Endpoint is to not accept any traffic
at the moment. If a DeployedModel's ID is not listed in this map, then it
receives no traffic.
metadata (Sequence[Tuple[str, str]]):
Optional. Strings which should be sent along with the request as
metadata.
"""
self._sync_gca_resource_if_skipped()
if traffic_split is not None:
if deployed_model_id in traffic_split and traffic_split[deployed_model_id]:
raise ValueError("Model being undeployed should have 0 traffic.")
if sum(traffic_split.values()) != 100:
raise ValueError(
"Sum of all traffic within traffic split needs to be 100."
)
# Two or more models deployed to Endpoint and remaining traffic will be zero
elif (
len(self.traffic_split) > 1
and deployed_model_id in self._gca_resource.traffic_split
and self._gca_resource.traffic_split[deployed_model_id] == 100
):
raise ValueError(
f"Undeploying deployed model '{deployed_model_id}' would leave the remaining "
"traffic split at 0%. Traffic split must add up to 100% when models are "
"deployed. Please undeploy the other models first or provide an updated "
"traffic_split."
)
self._undeploy(
deployed_model_id=deployed_model_id,
traffic_split=traffic_split,
metadata=metadata,
sync=sync,
)
@base.optional_sync()
def _undeploy(
self,
deployed_model_id: str,
traffic_split: Optional[Dict[str, int]] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = (),
sync=True,
) -> None:
"""Undeploys a deployed model.
Proportionally adjusts the traffic_split among the remaining deployed
models of the endpoint.
Args:
deployed_model_id (str):
Required. The ID of the DeployedModel to be undeployed from the
Endpoint.
traffic_split (Dict[str, int]):
Optional. A map from a DeployedModel's ID to the percentage of
this Endpoint's traffic that should be forwarded to that DeployedModel.
If a DeployedModel's ID is not listed in this map, then it receives
no traffic. The traffic percentage values must add up to 100, or
map must be empty if the Endpoint is to not accept any traffic at
the moment. Key for model being deployed is "0". Should not be
provided if traffic_percentage is provided.
metadata (Sequence[Tuple[str, str]]):
Optional. Strings which should be sent along with the request as
metadata.
"""
self._sync_gca_resource_if_skipped()
current_traffic_split = traffic_split or dict(self._gca_resource.traffic_split)
if deployed_model_id in current_traffic_split:
current_traffic_split = self._unallocate_traffic(
traffic_split=current_traffic_split,
deployed_model_id=deployed_model_id,
)
current_traffic_split.pop(deployed_model_id)
_LOGGER.log_action_start_against_resource("Undeploying", "model", self)
operation_future = self.api_client.undeploy_model(
endpoint=self.resource_name,
deployed_model_id=deployed_model_id,
traffic_split=current_traffic_split,
metadata=metadata,
)
_LOGGER.log_action_started_against_resource_with_lro(
"Undeploy", "model", self.__class__, operation_future
)
# block before returning
operation_future.result()
_LOGGER.log_action_completed_against_resource("model", "undeployed", self)
# update local resource
self._sync_gca_resource()
@staticmethod
def _instantiate_prediction_client(
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> utils.PredictionClientWithOverride:
"""Helper method to instantiates prediction client with optional
overrides for this endpoint.
Args:
location (str): The location of this endpoint.
credentials (google.auth.credentials.Credentials):
Optional custom credentials to use when accessing interacting with
the prediction client.
Returns:
prediction_client (prediction_service_client.PredictionServiceClient):
Initialized prediction client with optional overrides.
"""
return initializer.global_config.create_client(
client_class=utils.PredictionClientWithOverride,
credentials=credentials,
location_override=location,
prediction_client=True,
)
def update(
self,
display_name: Optional[str] = None,
description: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
traffic_split: Optional[Dict[str, int]] = None,
request_metadata: Optional[Sequence[Tuple[str, str]]] = (),
update_request_timeout: Optional[float] = None,
) -> "Endpoint":
"""Updates an endpoint.
Example usage:
my_endpoint = my_endpoint.update(
display_name='my-updated-endpoint',
description='my updated description',
labels={'key': 'value'},
traffic_split={
'123456': 20,
'234567': 80,
},
)
Args:
display_name (str):
Optional. The display name of the Endpoint.
The name can be up to 128 characters long and can be consist of any UTF-8
characters.
description (str):
Optional. The description of the Endpoint.
labels (Dict[str, str]):
Optional. The labels with user-defined metadata to organize your Endpoints.
Label keys and values can be no longer than 64 characters
(Unicode codepoints), can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are allowed.
See https://goo.gl/xmQnxf for more information and examples of labels.
traffic_split (Dict[str, int]):
Optional. A map from a DeployedModel's ID to the percentage of this Endpoint's
traffic that should be forwarded to that DeployedModel.
If a DeployedModel's ID is not listed in this map, then it receives no traffic.
The traffic percentage values must add up to 100, or map must be empty if
the Endpoint is to not accept any traffic at a moment.
request_metadata (Sequence[Tuple[str, str]]):
Optional. Strings which should be sent along with the request as metadata.
update_request_timeout (float):
Optional. The timeout for the update request in seconds.
Returns:
Endpoint (aiplatform.Prediction):
Updated endpoint resource.
Raises:
ValueError: If `labels` is not the correct format.
"""
self.wait()
current_endpoint_proto = self.gca_resource
copied_endpoint_proto = current_endpoint_proto.__class__(current_endpoint_proto)
update_mask: List[str] = []
if display_name:
utils.validate_display_name(display_name)
copied_endpoint_proto.display_name = display_name
update_mask.append("display_name")
if description:
copied_endpoint_proto.description = description
update_mask.append("description")
if labels:
utils.validate_labels(labels)
copied_endpoint_proto.labels = labels
update_mask.append("labels")
if traffic_split:
update_mask.append("traffic_split")
copied_endpoint_proto.traffic_split = traffic_split
update_mask = field_mask_pb2.FieldMask(paths=update_mask)
_LOGGER.log_action_start_against_resource(
"Updating",
"endpoint",
self,
)
self._gca_resource = self.api_client.update_endpoint(
endpoint=copied_endpoint_proto,
update_mask=update_mask,
metadata=request_metadata,
timeout=update_request_timeout,
)
_LOGGER.log_action_completed_against_resource("endpoint", "updated", self)
return self
def predict(
self,
instances: List,
parameters: Optional[Dict] = None,
timeout: Optional[float] = None,
use_raw_predict: Optional[bool] = False,
) -> Prediction:
"""Make a prediction against this Endpoint.
Args:
instances (List):
Required. The instances that are the input to the
prediction call. A DeployedModel may have an upper limit
on the number of instances it supports per request, and
when it is exceeded the prediction call errors in case
of AutoML Models, or, in case of customer created
Models, the behaviour is as documented by that Model.
The schema of any single instance may be specified via
Endpoint's DeployedModels'
[Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model]
[PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
``instance_schema_uri``.
parameters (Dict):
The parameters that govern the prediction. The schema of
the parameters may be specified via Endpoint's
DeployedModels' [Model's
][google.cloud.aiplatform.v1beta1.DeployedModel.model]
[PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
``parameters_schema_uri``.
timeout (float): Optional. The timeout for this request in seconds.
use_raw_predict (bool):
Optional. Default value is False. If set to True, the underlying prediction call will be made
against Endpoint.raw_predict().
Returns:
prediction (aiplatform.Prediction):
Prediction with returned predictions and Model ID.
"""
self.wait()
if use_raw_predict:
raw_predict_response = self.raw_predict(
body=json.dumps({"instances": instances, "parameters": parameters}),
headers={"Content-Type": "application/json"},
)
json_response = raw_predict_response.json()
return Prediction(
predictions=json_response["predictions"],
deployed_model_id=raw_predict_response.headers[
_RAW_PREDICT_DEPLOYED_MODEL_ID_KEY
],
model_resource_name=raw_predict_response.headers[
_RAW_PREDICT_MODEL_RESOURCE_KEY
],
model_version_id=raw_predict_response.headers.get(
_RAW_PREDICT_MODEL_VERSION_ID_KEY, None
),
)
else:
prediction_response = self._prediction_client.predict(
endpoint=self._gca_resource.name,
instances=instances,
parameters=parameters,
timeout=timeout,
)
return Prediction(
predictions=[
json_format.MessageToDict(item)
for item in prediction_response.predictions.pb
],
deployed_model_id=prediction_response.deployed_model_id,
model_version_id=prediction_response.model_version_id,
model_resource_name=prediction_response.model,
)
def raw_predict(
self, body: bytes, headers: Dict[str, str]
) -> requests.models.Response:
"""Makes a prediction request using arbitrary headers.
Example usage:
my_endpoint = aiplatform.Endpoint(ENDPOINT_ID)
response = my_endpoint.raw_predict(
body = b'{"instances":[{"feat_1":val_1, "feat_2":val_2}]}'
headers = {'Content-Type':'application/json'}
)
status_code = response.status_code
results = json.dumps(response.text)
Args:
body (bytes):
The body of the prediction request in bytes. This must not exceed 1.5 mb per request.
headers (Dict[str, str]):
The header of the request as a dictionary. There are no restrictions on the header.
Returns:
A requests.models.Response object containing the status code and prediction results.
"""
if not self.authorized_session:
self.credentials._scopes = constants.base.DEFAULT_AUTHED_SCOPES
self.authorized_session = google_auth_requests.AuthorizedSession(
self.credentials
)
self.raw_predict_request_url = f"https://{self.location}-{constants.base.API_BASE_PATH}/v1/projects/{self.project}/locations/{self.location}/endpoints/{self.name}:rawPredict"
return self.authorized_session.post(self.raw_predict_request_url, body, headers)
def explain(
self,
instances: List[Dict],
parameters: Optional[Dict] = None,
deployed_model_id: Optional[str] = None,
timeout: Optional[float] = None,
) -> Prediction:
"""Make a prediction with explanations against this Endpoint.
Example usage:
response = my_endpoint.explain(instances=[...])
my_explanations = response.explanations
Args:
instances (List):
Required. The instances that are the input to the
prediction call. A DeployedModel may have an upper limit
on the number of instances it supports per request, and
when it is exceeded the prediction call errors in case
of AutoML Models, or, in case of customer created
Models, the behaviour is as documented by that Model.
The schema of any single instance may be specified via
Endpoint's DeployedModels'
[Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model]
[PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
``instance_schema_uri``.
parameters (Dict):
The parameters that govern the prediction. The schema of
the parameters may be specified via Endpoint's
DeployedModels' [Model's
][google.cloud.aiplatform.v1beta1.DeployedModel.model]
[PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
``parameters_schema_uri``.
deployed_model_id (str):
Optional. If specified, this ExplainRequest will be served by the
chosen DeployedModel, overriding this Endpoint's traffic split.
timeout (float): Optional. The timeout for this request in seconds.
Returns:
prediction (aiplatform.Prediction):
Prediction with returned predictions, explanations, and Model ID.
"""
self.wait()
explain_response = self._prediction_client.explain(
endpoint=self.resource_name,
instances=instances,
parameters=parameters,
deployed_model_id=deployed_model_id,
timeout=timeout,
)
return Prediction(
predictions=[
json_format.MessageToDict(item)
for item in explain_response.predictions.pb
],
deployed_model_id=explain_response.deployed_model_id,
explanations=explain_response.explanations,
)
@classmethod
def list(
cls,
filter: Optional[str] = None,
order_by: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> List["models.Endpoint"]:
"""List all Endpoint resource instances.
Example Usage:
aiplatform.Endpoint.list(
filter='labels.my_label="my_label_value" OR display_name=!"old_endpoint"',
)
Args:
filter (str):
Optional. An expression for filtering the results of the request.
For field names both snake_case and camelCase are supported.
order_by (str):
Optional. A comma-separated list of fields to order by, sorted in
ascending order. Use "desc" after a field name for descending.
Supported fields: `display_name`, `create_time`, `update_time`
project (str):
Optional. Project to retrieve list from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional. Location to retrieve list from. If not set, location
set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to retrieve list. Overrides
credentials set in aiplatform.init.
Returns:
List[models.Endpoint]:
A list of Endpoint resource objects
"""
return cls._list_with_local_order(
cls_filter=lambda ep: not bool(
ep.network
), # `network` is empty for public Endpoints
filter=filter,
order_by=order_by,
project=project,
location=location,
credentials=credentials,
)
def list_models(self) -> List[gca_endpoint_compat.DeployedModel]:
"""Returns a list of the models deployed to this Endpoint.
Returns:
deployed_models (List[aiplatform.gapic.DeployedModel]):
A list of the models deployed in this Endpoint.
"""
self._sync_gca_resource()
return list(self._gca_resource.deployed_models)
def undeploy_all(self, sync: bool = True) -> "Endpoint":
"""Undeploys every model deployed to this Endpoint.
Args:
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
"""
self._sync_gca_resource()
models_to_undeploy = sorted( # Undeploy zero traffic models first
self._gca_resource.traffic_split.keys(),
key=lambda id: self._gca_resource.traffic_split[id],
)
for deployed_model in models_to_undeploy:
self._undeploy(deployed_model_id=deployed_model, sync=sync)
return self
def delete(self, force: bool = False, sync: bool = True) -> None:
"""Deletes this Vertex AI Endpoint resource. If force is set to True,
all models on this Endpoint will be undeployed prior to deletion.
Args:
force (bool):
Required. If force is set to True, all deployed models on this
Endpoint will be undeployed first. Default is False.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
Raises:
FailedPrecondition: If models are deployed on this Endpoint and force = False.
"""
if force:
self.undeploy_all(sync=sync)
super().delete(sync=sync)
class PrivateEndpoint(Endpoint):
"""
Represents a Vertex AI PrivateEndpoint resource.
Read more [about private endpoints in the documentation.](https://cloud.google.com/vertex-ai/docs/predictions/using-private-endpoints)
"""
def __init__(
self,
endpoint_name: str,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
):
"""Retrieves a PrivateEndpoint resource.
Example usage:
my_private_endpoint = aiplatform.PrivateEndpoint(
endpoint_name="projects/123/locations/us-central1/endpoints/1234567891234567890"
)
or (when project and location are initialized)
my_private_endpoint = aiplatform.PrivateEndpoint(
endpoint_name="1234567891234567890"
)
Args:
endpoint_name (str):
Required. A fully-qualified endpoint resource name or endpoint ID.
Example: "projects/123/locations/us-central1/endpoints/my_endpoint_id" or
"my_endpoint_id" when project and location are initialized or passed.
project (str):
Optional. Project to retrieve endpoint from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional. Location to retrieve endpoint from. If not set, location
set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to upload this model. Overrides
credentials set in aiplatform.init.
Raises:
ValueError: If the Endpoint being retrieved is not a PrivateEndpoint.
ImportError: If there is an issue importing the `urllib3` package.
"""
try:
import urllib3
except ImportError:
raise ImportError(
"Cannot import the urllib3 HTTP client. Please install google-cloud-aiplatform[private_endpoints]."
)
super().__init__(
endpoint_name=endpoint_name,
project=project,
location=location,
credentials=credentials,
)
if not self.network:
raise ValueError(
"Please ensure the Endpoint being retrieved is a PrivateEndpoint."
)
self._http_client = urllib3.PoolManager()
@property
def predict_http_uri(self) -> Optional[str]:
"""HTTP path to send prediction requests to, used when calling `PrivateEndpoint.predict()`"""
if not self._gca_resource.deployed_models:
return None
return self._gca_resource.deployed_models[0].private_endpoints.predict_http_uri
@property
def explain_http_uri(self) -> Optional[str]:
"""HTTP path to send explain requests to, used when calling `PrivateEndpoint.explain()`"""
if not self._gca_resource.deployed_models:
return None
return self._gca_resource.deployed_models[0].private_endpoints.explain_http_uri
@property
def health_http_uri(self) -> Optional[str]:
"""HTTP path to send health check requests to, used when calling `PrivateEndpoint.health_check()`"""
if not self._gca_resource.deployed_models:
return None
return self._gca_resource.deployed_models[0].private_endpoints.health_http_uri
@classmethod
def create(
cls,
display_name: str,
project: Optional[str] = None,
location: Optional[str] = None,
network: Optional[str] = None,
description: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
credentials: Optional[auth_credentials.Credentials] = None,
encryption_spec_key_name: Optional[str] = None,
sync=True,
) -> "PrivateEndpoint":
"""Creates a new PrivateEndpoint.
Example usage:
my_private_endpoint = aiplatform.PrivateEndpoint.create(
display_name="my_endpoint_name",
project="my_project_id",
location="us-central1",
network="projects/123456789123/global/networks/my_vpc"
)
or (when project and location are initialized)
my_private_endpoint = aiplatform.PrivateEndpoint.create(
display_name="my_endpoint_name",
network="projects/123456789123/global/networks/my_vpc"
)
Args:
display_name (str):
Required. The user-defined name of the Endpoint.
The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
project (str):
Optional. Project to retrieve endpoint from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional. Location to retrieve endpoint from. If not set, location
set in aiplatform.init will be used.
network (str):
Optional. The full name of the Compute Engine network to which
this Endpoint will be peered. E.g. "projects/123456789123/global/networks/my_vpc".
Private services access must already be configured for the network.
If left unspecified, the network set with aiplatform.init will be used.
description (str):
Optional. The description of the Endpoint.
labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Endpoints.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to upload this model. Overrides
credentials set in aiplatform.init.
encryption_spec_key_name (str):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this Model and all sub-resources of this Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
Returns:
endpoint (aiplatform.PrivateEndpoint):
Created endpoint.
Raises:
ValueError: A network must be instantiated when creating a PrivateEndpoint.
"""
api_client = cls._instantiate_client(location=location, credentials=credentials)
utils.validate_display_name(display_name)
if labels:
utils.validate_labels(labels)
project = project or initializer.global_config.project
location = location or initializer.global_config.location
network = network or initializer.global_config.network
if not network:
raise ValueError(
"Please provide required argument `network` or set network"
"using aiplatform.init(network=...)"
)
return cls._create(
api_client=api_client,
display_name=display_name,
project=project,
location=location,
description=description,
labels=labels,
credentials=credentials,
encryption_spec=initializer.global_config.get_encryption_spec(
encryption_spec_key_name=encryption_spec_key_name
),
network=network,
sync=sync,
)
@classmethod
def _construct_sdk_resource_from_gapic(
cls,
gapic_resource: proto.Message,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> "PrivateEndpoint":
"""Given a GAPIC PrivateEndpoint object, return the SDK representation.
Args:
gapic_resource (proto.Message):
A GAPIC representation of a PrivateEndpoint resource, usually
retrieved by a get_* or in a list_* API call.
project (str):
Optional. Project to construct Endpoint object from. If not set,
project set in aiplatform.init will be used.
location (str):
Optional. Location to construct Endpoint object from. If not set,
location set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to construct Endpoint.
Overrides credentials set in aiplatform.init.
Returns:
endpoint (aiplatform.PrivateEndpoint):
An initialized PrivateEndpoint resource.
Raises:
ImportError: If there is an issue importing the `urllib3` package.
"""
try:
import urllib3
except ImportError:
raise ImportError(
"Cannot import the urllib3 HTTP client. Please install google-cloud-aiplatform[private_endpoints]."
)
endpoint = cls._empty_constructor(
project=project, location=location, credentials=credentials
)
endpoint._gca_resource = gapic_resource
endpoint._http_client = urllib3.PoolManager()
return endpoint
def _http_request(
self,
method: str,
url: str,
body: Optional[Dict[Any, Any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> "urllib3.response.HTTPResponse": # type: ignore # noqa: F821
"""Helper function used to perform HTTP requests for PrivateEndpoint.
Args:
method (str):
Required. The HTTP request method to use. Example: "POST" or "GET"
url (str):
Required. The url used to send requests and get responses from.
body (Dict[Any, Any]):
Optional. Data sent to the url in the HTTP request. For a PrivateEndpoint,
an instance is sent and a prediction response is expected.
headers (Dict[str, str]):
Optional. Header in the HTTP request.
Returns:
urllib3.response.HTTPResponse:
A HTTP Response container.
Raises:
ImportError: If there is an issue importing the `urllib3` package.
RuntimeError: If a HTTP request could not be made.
RuntimeError: A connection could not be established with the PrivateEndpoint and
a HTTP request could not be made.
"""
try:
import urllib3
except ImportError:
raise ImportError(
"Cannot import the urllib3 HTTP client. Please install google-cloud-aiplatform[private_endpoints]."
)
try:
response = self._http_client.request(
method=method, url=url, body=body, headers=headers
)
if response.status < _SUCCESSFUL_HTTP_RESPONSE:
return response
else:
raise RuntimeError(
f"{response.status} - Failed to make request, see response: "
+ response.data.decode("utf-8")
)
except urllib3.exceptions.MaxRetryError as exc:
raise RuntimeError(
f"Failed to make a {method} request to this URI, make sure: "
" this call is being made inside the network this PrivateEndpoint is peered to "
f"({self._gca_resource.network}), calling health_check() returns True, "
f"and that {url} is a valid URL."
) from exc
def predict(self, instances: List, parameters: Optional[Dict] = None) -> Prediction:
"""Make a prediction against this PrivateEndpoint using a HTTP request.
This method must be called within the network the PrivateEndpoint is peered to.
Otherwise, the predict() call will fail with error code 404. To check, use `PrivateEndpoint.network`.
Example usage:
response = my_private_endpoint.predict(instances=[...])
my_predictions = response.predictions
Args:
instances (List):
Required. The instances that are the input to the
prediction call. Instance types mut be JSON serializable.
A DeployedModel may have an upper limit
on the number of instances it supports per request, and
when it is exceeded the prediction call errors in case
of AutoML Models, or, in case of customer created
Models, the behaviour is as documented by that Model.
The schema of any single instance may be specified via
Endpoint's DeployedModels'
[Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model]
[PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
``instance_schema_uri``.
parameters (Dict):
The parameters that govern the prediction. The schema of
the parameters may be specified via Endpoint's
DeployedModels' [Model's
][google.cloud.aiplatform.v1beta1.DeployedModel.model]
[PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
``parameters_schema_uri``.
Returns:
prediction (aiplatform.Prediction):
Prediction object with returned predictions and Model ID.
Raises:
RuntimeError: If a model has not been deployed a request cannot be made.
"""
self.wait()
self._sync_gca_resource_if_skipped()
if not self._gca_resource.deployed_models:
raise RuntimeError(
"Cannot make a predict request because a model has not been deployed on this Private"
"Endpoint. Please ensure a model has been deployed."
)
response = self._http_request(
method="POST",
url=self.predict_http_uri,
body=json.dumps({"instances": instances}),
headers={"Content-Type": "application/json"},
)
prediction_response = json.loads(response.data)
return Prediction(
predictions=prediction_response.get("predictions"),
deployed_model_id=self._gca_resource.deployed_models[0].id,
)
def raw_predict(
self, body: bytes, headers: Dict[str, str]
) -> requests.models.Response:
"""Make a prediction request using arbitrary headers.
This method must be called within the network the PrivateEndpoint is peered to.
Otherwise, the predict() call will fail with error code 404. To check, use `PrivateEndpoint.network`.
Example usage:
my_endpoint = aiplatform.PrivateEndpoint(ENDPOINT_ID)
response = my_endpoint.raw_predict(
body = b'{"instances":[{"feat_1":val_1, "feat_2":val_2}]}'
headers = {'Content-Type':'application/json'}
)
status_code = response.status_code
results = json.dumps(response.text)
Args:
body (bytes):
The body of the prediction request in bytes. This must not exceed 1.5 mb per request.
headers (Dict[str, str]):
The header of the request as a dictionary. There are no restrictions on the header.
Returns:
A requests.models.Response object containing the status code and prediction results.
"""
self.wait()
return self._http_request(
method="POST",
url=self.predict_http_uri,
body=body,
headers=headers,
)
def explain(self):
raise NotImplementedError(
f"{self.__class__.__name__} class does not support 'explain' as of now."
)
def health_check(self) -> bool:
"""
Makes a request to this PrivateEndpoint's health check URI. Must be within network
that this PrivateEndpoint is in.
Example Usage:
if my_private_endpoint.health_check():
print("PrivateEndpoint is healthy!")
Returns:
bool:
Checks if calls can be made to this PrivateEndpoint.
Raises:
RuntimeError: If a model has not been deployed a request cannot be made.
"""
self.wait()
self._sync_gca_resource_if_skipped()
if not self._gca_resource.deployed_models:
raise RuntimeError(
"Cannot make a health check request because a model has not been deployed on this Private"
"Endpoint. Please ensure a model has been deployed."
)
response = self._http_request(
method="GET",
url=self.health_http_uri,
)
return response.status < _SUCCESSFUL_HTTP_RESPONSE
@classmethod
def list(
cls,
filter: Optional[str] = None,
order_by: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> List["models.PrivateEndpoint"]:
"""List all PrivateEndpoint resource instances.
Example Usage:
my_private_endpoints = aiplatform.PrivateEndpoint.list()
or
my_private_endpoints = aiplatform.PrivateEndpoint.list(
filter='labels.my_label="my_label_value" OR display_name=!"old_endpoint"',
)
Args:
filter (str):
Optional. An expression for filtering the results of the request.
For field names both snake_case and camelCase are supported.
order_by (str):
Optional. A comma-separated list of fields to order by, sorted in
ascending order. Use "desc" after a field name for descending.
Supported fields: `display_name`, `create_time`, `update_time`
project (str):
Optional. Project to retrieve list from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional. Location to retrieve list from. If not set, location
set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to retrieve list. Overrides
credentials set in aiplatform.init.
Returns:
List[models.PrivateEndpoint]:
A list of PrivateEndpoint resource objects.
"""
return cls._list_with_local_order(
cls_filter=lambda ep: bool(
ep.network
), # Only PrivateEndpoints have a network set
filter=filter,
order_by=order_by,
project=project,
location=location,
credentials=credentials,
)
def deploy(
self,
model: "Model",
deployed_model_display_name: Optional[str] = None,
machine_type: Optional[str] = None,
min_replica_count: int = 1,
max_replica_count: int = 1,
accelerator_type: Optional[str] = None,
accelerator_count: Optional[int] = None,
service_account: Optional[str] = None,
explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None,
explanation_parameters: Optional[
aiplatform.explain.ExplanationParameters
] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = (),
sync=True,
) -> None:
"""Deploys a Model to the PrivateEndpoint.
Example Usage:
my_private_endpoint.deploy(
model=my_model
)
Args:
model (aiplatform.Model):
Required. Model to be deployed.
deployed_model_display_name (str):
Optional. The display name of the DeployedModel. If not provided
upon creation, the Model's display_name is used.
machine_type (str):
Optional. The type of machine. Not specifying machine type will
result in model to be deployed with automatic resources.
min_replica_count (int):
Optional. The minimum number of machine replicas this deployed
model will be always deployed on. If traffic against it increases,
it may dynamically be deployed onto more replicas, and as traffic
decreases, some of these extra replicas may be freed.
max_replica_count (int):
Optional. The maximum number of replicas this deployed model may
be deployed on when the traffic against it increases. If requested
value is too large, the deployment will error, but if deployment
succeeds then the ability to scale the model to that many replicas
is guaranteed (barring service outages). If traffic against the
deployed model increases beyond what its replicas at maximum may
handle, a portion of the traffic will be dropped. If this value
is not provided, the larger value of min_replica_count or 1 will
be used. If value provided is smaller than min_replica_count, it
will automatically be increased to be min_replica_count.
accelerator_type (str):
Optional. Hardware accelerator type. Must also set accelerator_count if used.
One of ACCELERATOR_TYPE_UNSPECIFIED, NVIDIA_TESLA_K80, NVIDIA_TESLA_P100,
NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, NVIDIA_TESLA_T4
accelerator_count (int):
Optional. The number of accelerators to attach to a worker replica.
service_account (str):
The service account that the DeployedModel's container runs as. Specify the
email address of the service account. If this service account is not
specified, the container runs as a service account that doesn't have access
to the resource project.
Users deploying the Model must have the `iam.serviceAccounts.actAs`
permission on this service account.
explanation_metadata (aiplatform.explain.ExplanationMetadata):
Optional. Metadata describing the Model's input and output for explanation.
`explanation_metadata` is optional while `explanation_parameters` must be
specified when used.
For more details, see `Ref docs <http://tinyurl.com/1igh60kt>`
explanation_parameters (aiplatform.explain.ExplanationParameters):
Optional. Parameters to configure explaining for Model's predictions.
For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
metadata (Sequence[Tuple[str, str]]):
Optional. Strings which should be sent along with the request as
metadata.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
"""
self._validate_deploy_args(
min_replica_count=min_replica_count,
max_replica_count=max_replica_count,
accelerator_type=accelerator_type,
deployed_model_display_name=deployed_model_display_name,
traffic_split=None,
traffic_percentage=100,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
)
self._deploy(
model=model,
deployed_model_display_name=deployed_model_display_name,
traffic_percentage=100,
traffic_split=None,
machine_type=machine_type,
min_replica_count=min_replica_count,
max_replica_count=max_replica_count,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
service_account=service_account,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
metadata=metadata,
sync=sync,
)
def undeploy(
self,
deployed_model_id: str,
sync=True,
) -> None:
"""Undeploys a deployed model from the PrivateEndpoint.
Example Usage:
my_private_endpoint.undeploy(
deployed_model_id="1234567891232567891"
)
or
my_deployed_model_id = my_private_endpoint.list_models()[0].id
my_private_endpoint.undeploy(
deployed_model_id=my_deployed_model_id
)
Args:
deployed_model_id (str):
Required. The ID of the DeployedModel to be undeployed from the
PrivateEndpoint. Use PrivateEndpoint.list_models() to get the
deployed model ID.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
"""
self._sync_gca_resource_if_skipped()
# TODO(b/211351292): Add traffic splitting for PrivateEndpoint
self._undeploy(
deployed_model_id=deployed_model_id,
traffic_split=None,
sync=sync,
)
def delete(self, force: bool = False, sync: bool = True) -> None:
"""Deletes this Vertex AI PrivateEndpoint resource. If force is set to True,
all models on this PrivateEndpoint will be undeployed prior to deletion.
Args:
force (bool):
Required. If force is set to True, all deployed models on this
Endpoint will be undeployed first. Default is False.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
Raises:
FailedPrecondition: If models are deployed on this Endpoint and force = False.
"""
if force and self._gca_resource.deployed_models:
self.undeploy(
deployed_model_id=self._gca_resource.deployed_models[0].id,
sync=sync,
)
super().delete(force=False, sync=sync)
class Model(base.VertexAiResourceNounWithFutureManager):
client_class = utils.ModelClientWithOverride
_resource_noun = "models"
_getter_method = "get_model"
_list_method = "list_models"
_delete_method = "delete_model"
_parse_resource_name_method = "parse_model_path"
_format_resource_name_method = "model_path"
@property
def uri(self) -> Optional[str]:
"""Path to the directory containing the Model artifact and any of its
supporting files. Not present for AutoML Models."""
self._assert_gca_resource_is_available()
return self._gca_resource.artifact_uri or None
@property
def description(self) -> str:
"""Description of the model."""
self._assert_gca_resource_is_available()
return self._gca_resource.description
@property
def supported_export_formats(
self,
) -> Dict[str, List[gca_model_compat.Model.ExportFormat.ExportableContent]]:
"""The formats and content types in which this Model may be exported.
If empty, this Model is not available for export.
For example, if this model can be exported as a Tensorflow SavedModel and
have the artifacts written to Cloud Storage, the expected value would be:
{'tf-saved-model': [<ExportableContent.ARTIFACT: 1>]}
"""
self._assert_gca_resource_is_available()
return {
export_format.id: [
gca_model_compat.Model.ExportFormat.ExportableContent(content)
for content in export_format.exportable_contents
]
for export_format in self._gca_resource.supported_export_formats
}
@property
def supported_deployment_resources_types(
self,
) -> List[aiplatform.gapic.Model.DeploymentResourcesType]:
"""List of deployment resource types accepted for this Model.
When this Model is deployed, its prediction resources are described by
the `prediction_resources` field of the objects returned by
`Endpoint.list_models()`. Because not all Models support all resource
configuration types, the configuration types this Model supports are
listed here.
If no configuration types are listed, the Model cannot be
deployed to an `Endpoint` and does not support online predictions
(`Endpoint.predict()` or `Endpoint.explain()`). Such a Model can serve
predictions by using a `BatchPredictionJob`, if it has at least one entry
each in `Model.supported_input_storage_formats` and
`Model.supported_output_storage_formats`."""
self._assert_gca_resource_is_available()
return list(self._gca_resource.supported_deployment_resources_types)
@property
def supported_input_storage_formats(self) -> List[str]:
"""The formats this Model supports in the `input_config` field of a
`BatchPredictionJob`. If `Model.predict_schemata.instance_schema_uri`
exists, the instances should be given as per that schema.
[Read the docs for more on batch prediction formats](https://cloud.google.com/vertex-ai/docs/predictions/batch-predictions#batch_request_input)
If this Model doesn't support any of these formats it means it cannot be
used with a `BatchPredictionJob`. However, if it has
`supported_deployment_resources_types`, it could serve online predictions
by using `Endpoint.predict()` or `Endpoint.explain()`.
"""
self._assert_gca_resource_is_available()
return list(self._gca_resource.supported_input_storage_formats)
@property
def supported_output_storage_formats(self) -> List[str]:
"""The formats this Model supports in the `output_config` field of a
`BatchPredictionJob`.
If both `Model.predict_schemata.instance_schema_uri` and
`Model.predict_schemata.prediction_schema_uri` exist, the predictions
are returned together with their instances. In other words, the
prediction has the original instance data first, followed by the actual
prediction content (as per the schema).
[Read the docs for more on batch prediction formats](https://cloud.google.com/vertex-ai/docs/predictions/batch-predictions)
If this Model doesn't support any of these formats it means it cannot be
used with a `BatchPredictionJob`. However, if it has
`supported_deployment_resources_types`, it could serve online predictions
by using `Endpoint.predict()` or `Endpoint.explain()`.
"""
self._assert_gca_resource_is_available()
return list(self._gca_resource.supported_output_storage_formats)
@property
def predict_schemata(self) -> Optional[aiplatform.gapic.PredictSchemata]:
"""The schemata that describe formats of the Model's predictions and
explanations, if available."""
self._assert_gca_resource_is_available()
return getattr(self._gca_resource, "predict_schemata")
@property
def training_job(self) -> Optional["aiplatform.training_jobs._TrainingJob"]:
"""The TrainingJob that uploaded this Model, if any.
Raises:
api_core.exceptions.NotFound: If the Model's training job resource
cannot be found on the Vertex service.
"""
self._assert_gca_resource_is_available()
job_name = getattr(self._gca_resource, "training_pipeline")
if not job_name:
return None
try:
return aiplatform.training_jobs._TrainingJob._get_and_return_subclass(
resource_name=job_name,
project=self.project,
location=self.location,
credentials=self.credentials,
)
except api_exceptions.NotFound:
raise api_exceptions.NotFound(
f"The training job used to create this model could not be found: {job_name}"
)
@property
def container_spec(self) -> Optional[aiplatform.gapic.ModelContainerSpec]:
"""The specification of the container that is to be used when deploying
this Model. Not present for AutoML Models."""
self._assert_gca_resource_is_available()
return getattr(self._gca_resource, "container_spec")
@property
def version_id(self) -> str:
"""The version ID of the model.
A new version is committed when a new model version is uploaded or
trained under an existing model id. It is an auto-incrementing decimal
number in string representation."""
self._assert_gca_resource_is_available()
return getattr(self._gca_resource, "version_id")
@property
def version_aliases(self) -> Sequence[str]:
"""User provided version aliases so that a model version can be referenced via
alias (i.e. projects/{project}/locations/{location}/models/{model_id}@{version_alias}
instead of auto-generated version id (i.e.
projects/{project}/locations/{location}/models/{model_id}@{version_id}).
The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9] to distinguish from
version_id. A default version alias will be created for the first version
of the model, and there must be exactly one default version alias for a model."""
self._assert_gca_resource_is_available()
return getattr(self._gca_resource, "version_aliases")
@property
def version_create_time(self) -> timestamp_pb2.Timestamp:
"""Timestamp when this version was created."""
self._assert_gca_resource_is_available()
return getattr(self._gca_resource, "version_create_time")
@property
def version_update_time(self) -> timestamp_pb2.Timestamp:
"""Timestamp when this version was updated."""
self._assert_gca_resource_is_available()
return getattr(self._gca_resource, "version_update_time")
@property
def version_description(self) -> str:
"""The description of this version."""
self._assert_gca_resource_is_available()
return getattr(self._gca_resource, "version_description")
@property
def resource_name(self) -> str:
"""Full qualified resource name, without any version ID."""
self._assert_gca_resource_is_available()
return ModelRegistry._parse_versioned_name(self._gca_resource.name)[0]
@property
def name(self) -> str:
"""Name of this resource."""
self._assert_gca_resource_is_available()
return ModelRegistry._parse_versioned_name(super().name)[0]
@property
def versioned_resource_name(self) -> str:
"""The fully-qualified resource name, including the version ID. For example,
projects/{project}/locations/{location}/models/{model_id}@{version_id}
"""
self._assert_gca_resource_is_available()
return ModelRegistry._get_versioned_name(
self.resource_name,
self.version_id,
)
@property
def versioning_registry(self) -> "ModelRegistry":
"""The registry of model versions associated with this
Model instance."""
return self._registry
def __init__(
self,
model_name: str,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
version: Optional[str] = None,
):
"""Retrieves the model resource and instantiates its representation.
Args:
model_name (str):
Required. A fully-qualified model resource name or model ID.
Example: "projects/123/locations/us-central1/models/456" or
"456" when project and location are initialized or passed.
May optionally contain a version ID or version alias in
{model_name}@{version} form. See version arg.
project (str):
Optional project to retrieve model from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional location to retrieve model from. If not set, location
set in aiplatform.init will be used.
credentials: Optional[auth_credentials.Credentials]=None,
Custom credentials to use to upload this model. If not set,
credentials set in aiplatform.init will be used.
version (str):
Optional. Version ID or version alias.
When set, the specified model version will be targeted
unless overridden in method calls.
When not set, the model with the "default" alias will
be targeted unless overridden in method calls.
No behavior change if only one version of a model exists.
Raises:
ValueError: If `version` is passed alongside a model_name referencing a different version.
"""
# If the version was passed in model_name, parse it
model_name, parsed_version = ModelRegistry._parse_versioned_name(model_name)
if parsed_version:
if version and version != parsed_version:
raise ValueError(
f"A version of {version} was passed that conflicts with the version of {parsed_version} in the model_name."
)
version = parsed_version
super().__init__(
project=project,
location=location,
credentials=credentials,
resource_name=model_name,
)
# Model versions can include @{version} in the resource name.
self._resource_id_validator = super()._revisioned_resource_id_validator
# Create a versioned model_name, if it exists, for getting the GCA model
versioned_model_name = ModelRegistry._get_versioned_name(model_name, version)
self._gca_resource = self._get_gca_resource(resource_name=versioned_model_name)
# Create ModelRegistry with the unversioned resource name
self._registry = ModelRegistry(
self.resource_name,
location=location,
project=project,
credentials=credentials,
)
def update(
self,
display_name: Optional[str] = None,
description: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
) -> "Model":
"""Updates a model.
Example usage:
my_model = my_model.update(
display_name="my-model",
description="my description",
labels={'key': 'value'},
)
Args:
display_name (str):
The display name of the Model. The name can be up to 128
characters long and can be consist of any UTF-8 characters.
description (str):
The description of the model.
labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
Returns:
model (aiplatform.Model):
Updated model resource.
Raises:
ValueError: If `labels` is not the correct format.
"""
self.wait()
current_model_proto = self.gca_resource
copied_model_proto = current_model_proto.__class__(current_model_proto)
update_mask: List[str] = []
# Updates to base model properties cannot occur if a versioned model is passed.
# Use the unversioned model resource name.
copied_model_proto.name = self.resource_name
if display_name:
utils.validate_display_name(display_name)
copied_model_proto.display_name = display_name
update_mask.append("display_name")
if description:
copied_model_proto.description = description
update_mask.append("description")
if labels:
utils.validate_labels(labels)
copied_model_proto.labels = labels
update_mask.append("labels")
update_mask = field_mask_pb2.FieldMask(paths=update_mask)
self.api_client.update_model(model=copied_model_proto, update_mask=update_mask)
self._sync_gca_resource()
return self
# TODO(b/170979552) Add support for predict schemata
# TODO(b/170979926) Add support for metadata and metadata schema
@classmethod
@base.optional_sync()
def upload(
cls,
serving_container_image_uri: Optional[str] = None,
*,
artifact_uri: Optional[str] = None,
model_id: Optional[str] = None,
parent_model: Optional[str] = None,
is_default_version: bool = True,
version_aliases: Optional[Sequence[str]] = None,
version_description: Optional[str] = None,
serving_container_predict_route: Optional[str] = None,
serving_container_health_route: Optional[str] = None,
description: Optional[str] = None,
serving_container_command: Optional[Sequence[str]] = None,
serving_container_args: Optional[Sequence[str]] = None,
serving_container_environment_variables: Optional[Dict[str, str]] = None,
serving_container_ports: Optional[Sequence[int]] = None,
local_model: Optional["LocalModel"] = None,
instance_schema_uri: Optional[str] = None,
parameters_schema_uri: Optional[str] = None,
prediction_schema_uri: Optional[str] = None,
explanation_metadata: Optional[explain.ExplanationMetadata] = None,
explanation_parameters: Optional[explain.ExplanationParameters] = None,
display_name: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
labels: Optional[Dict[str, str]] = None,
encryption_spec_key_name: Optional[str] = None,
staging_bucket: Optional[str] = None,
sync=True,
upload_request_timeout: Optional[float] = None,
) -> "Model":
"""Uploads a model and returns a Model representing the uploaded Model
resource.
Example usage:
my_model = Model.upload(
display_name="my-model",
artifact_uri="gs://my-model/saved-model",
serving_container_image_uri="tensorflow/serving"
)
Args:
serving_container_image_uri (str):
Optional. The URI of the Model serving container. This parameter is required
if the parameter `local_model` is not specified.
artifact_uri (str):
Optional. The path to the directory containing the Model artifact and
any of its supporting files. Leave blank for custom container prediction.
Not present for AutoML Models.
model_id (str):
Optional. The ID to use for the uploaded Model, which will
become the final component of the model resource name.
This value may be up to 63 characters, and valid characters
are `[a-z0-9_-]`. The first character cannot be a number or hyphen.
parent_model (str):
Optional. The resource name or model ID of an existing model that the
newly-uploaded model will be a version of.
Only set this field when uploading a new version of an existing model.
is_default_version (bool):
Optional. When set to True, the newly uploaded model version will
automatically have alias "default" included. Subsequent uses of
this model without a version specified will use this "default" version.
When set to False, the "default" alias will not be moved.
Actions targeting the newly-uploaded model version will need
to specifically reference this version by ID or alias.
New model uploads, i.e. version 1, will always be "default" aliased.
version_aliases (Sequence[str]):
Optional. User provided version aliases so that a model version
can be referenced via alias instead of auto-generated version ID.
A default version alias will be created for the first version of the model.
The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9]
version_description (str):
Optional. The description of the model version being uploaded.
serving_container_predict_route (str):
Optional. An HTTP path to send prediction requests to the container, and
which must be supported by it. If not specified a default HTTP path will
be used by Vertex AI.
serving_container_health_route (str):
Optional. An HTTP path to send health check requests to the container, and which
must be supported by it. If not specified a standard HTTP path will be
used by Vertex AI.
description (str):
The description of the model.
serving_container_command: Optional[Sequence[str]]=None,
The command with which the container is run. Not executed within a
shell. The Docker image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's
environment. If a variable cannot be resolved, the reference in the
input string will be unchanged. The $(VAR_NAME) syntax can be escaped
with a double $$, ie: $$(VAR_NAME). Escaped references will never be
expanded, regardless of whether the variable exists or not.
serving_container_args: Optional[Sequence[str]]=None,
The arguments to the command. The Docker image's CMD is used if this is
not provided. Variable references $(VAR_NAME) are expanded using the
container's environment. If a variable cannot be resolved, the reference
in the input string will be unchanged. The $(VAR_NAME) syntax can be
escaped with a double $$, ie: $$(VAR_NAME). Escaped references will
never be expanded, regardless of whether the variable exists or not.
serving_container_environment_variables: Optional[Dict[str, str]]=None,
The environment variables that are to be present in the container.
Should be a dictionary where keys are environment variable names
and values are environment variable values for those names.
serving_container_ports: Optional[Sequence[int]]=None,
Declaration of ports that are exposed by the container. This field is
primarily informational, it gives Vertex AI information about the
network connections the container uses. Listing or not a port here has
no impact on whether the port is actually exposed, any port listening on
the default "0.0.0.0" address inside a container will be accessible from
the network.
local_model (Optional[LocalModel]):
Optional. A LocalModel instance that includes a `serving_container_spec`.
If provided, the `serving_container_spec` of the LocalModel instance
will overwrite the values of all other serving container parameters.
instance_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single instance, which
are used in
``PredictRequest.instances``,
``ExplainRequest.instances``
and
``BatchPredictionJob.input_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
parameters_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the parameters of prediction and
explanation via
``PredictRequest.parameters``,
``ExplainRequest.parameters``
and
``BatchPredictionJob.model_parameters``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform, if no parameters are supported it is set to an
empty string. Note: The URI given on output will be
immutable and probably different, including the URI scheme,
than the one given on input. The output URI will point to a
location where the user only has a read access.
prediction_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single prediction
produced by this Model, which are returned via
``PredictResponse.predictions``,
``ExplainResponse.explanations``,
and
``BatchPredictionJob.output_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
explanation_metadata (aiplatform.explain.ExplanationMetadata):
Optional. Metadata describing the Model's input and output for explanation.
`explanation_metadata` is optional while `explanation_parameters` must be
specified when used.
For more details, see `Ref docs <http://tinyurl.com/1igh60kt>`
explanation_parameters (aiplatform.explain.ExplanationParameters):
Optional. Parameters to configure explaining for Model's predictions.
For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
display_name (str):
Optional. The display name of the Model. The name can be up to 128
characters long and can be consist of any UTF-8 characters.
project: Optional[str]=None,
Project to upload this model to. Overrides project set in
aiplatform.init.
location: Optional[str]=None,
Location to upload this model to. Overrides location set in
aiplatform.init.
credentials: Optional[auth_credentials.Credentials]=None,
Custom credentials to use to upload this model. Overrides credentials
set in aiplatform.init.
labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this Model and all sub-resources of this Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
staging_bucket (str):
Optional. Bucket to stage local model artifacts. Overrides
staging_bucket set in aiplatform.init.
upload_request_timeout (float):
Optional. The timeout for the upload request in seconds.
Returns:
model (aiplatform.Model):
Instantiated representation of the uploaded model resource.
Raises:
ValueError: If explanation_metadata is specified while explanation_parameters
is not.
Also if model directory does not contain a supported model file.
If `local_model` is specified but `serving_container_spec.image_uri`
in the `local_model` is None.
If `local_model` is not specified and `serving_container_image_uri`
is None.
"""
if not display_name:
display_name = cls._generate_display_name()
utils.validate_display_name(display_name)
if labels:
utils.validate_labels(labels)
if bool(explanation_metadata) and not bool(explanation_parameters):
raise ValueError(
"To get model explanation, `explanation_parameters` must be specified."
)
appended_user_agent = None
if local_model:
container_spec = local_model.get_serving_container_spec()
appended_user_agent = [prediction_constants.CUSTOM_PREDICTION_ROUTINES]
else:
if not serving_container_image_uri:
raise ValueError(
"The parameter `serving_container_image_uri` is required "
"if no `local_model` is provided."
)
env = None
ports = None
if serving_container_environment_variables:
env = [
gca_env_var_compat.EnvVar(name=str(key), value=str(value))
for key, value in serving_container_environment_variables.items()
]
if serving_container_ports:
ports = [
gca_model_compat.Port(container_port=port)
for port in serving_container_ports
]
container_spec = gca_model_compat.ModelContainerSpec(
image_uri=serving_container_image_uri,
command=serving_container_command,
args=serving_container_args,
env=env,
ports=ports,
predict_route=serving_container_predict_route,
health_route=serving_container_health_route,
)
model_predict_schemata = None
if any([instance_schema_uri, parameters_schema_uri, prediction_schema_uri]):
model_predict_schemata = gca_model_compat.PredictSchemata(
instance_schema_uri=instance_schema_uri,
parameters_schema_uri=parameters_schema_uri,
prediction_schema_uri=prediction_schema_uri,
)
# TODO(b/182388545) initializer.global_config.get_encryption_spec from a sync function
encryption_spec = initializer.global_config.get_encryption_spec(
encryption_spec_key_name=encryption_spec_key_name,
)
parent_model = ModelRegistry._get_true_version_parent(
location=location, project=project, parent_model=parent_model
)
version_aliases = ModelRegistry._get_true_alias_list(
version_aliases=version_aliases, is_default_version=is_default_version
)
managed_model = gca_model_compat.Model(
display_name=display_name,
description=description,
version_aliases=version_aliases,
version_description=version_description,
container_spec=container_spec,
predict_schemata=model_predict_schemata,
labels=labels,
encryption_spec=encryption_spec,
)
if artifact_uri and not artifact_uri.startswith("gs://"):
model_dir = pathlib.Path(artifact_uri)
# Validating the model directory
if not model_dir.exists():
raise ValueError(f"artifact_uri path does not exist: '{artifact_uri}'")
PREBUILT_IMAGE_RE = "(us|europe|asia)-docker.pkg.dev/vertex-ai/prediction/"
if re.match(PREBUILT_IMAGE_RE, serving_container_image_uri):
if not model_dir.is_dir():
raise ValueError(
f"artifact_uri path must be a directory: '{artifact_uri}' when using prebuilt image '{serving_container_image_uri}'"
)
if not any(
(model_dir / file_name).exists()
for file_name in _SUPPORTED_MODEL_FILE_NAMES
):
raise ValueError(
"artifact_uri directory does not contain any supported model files. "
f"When using a prebuilt serving image, the upload method only supports the following model files: '{_SUPPORTED_MODEL_FILE_NAMES}'"
)
# Uploading the model
staged_data_uri = gcs_utils.stage_local_data_in_gcs(
data_path=str(model_dir),
staging_gcs_dir=staging_bucket,
project=project,
location=location,
credentials=credentials,
)
artifact_uri = staged_data_uri
if artifact_uri:
managed_model.artifact_uri = artifact_uri
# Override explanation_spec if required field is provided
if explanation_parameters:
explanation_spec = gca_endpoint_compat.explanation.ExplanationSpec()
explanation_spec.parameters = explanation_parameters
if explanation_metadata:
explanation_spec.metadata = explanation_metadata
managed_model.explanation_spec = explanation_spec
request = gca_model_service_compat.UploadModelRequest(
parent=initializer.global_config.common_location_path(project, location),
model=managed_model,
parent_model=parent_model,
model_id=model_id,
)
api_client = cls._instantiate_client(
location, credentials, appended_user_agent=appended_user_agent
)
lro = api_client.upload_model(
request=request,
timeout=upload_request_timeout,
)
_LOGGER.log_create_with_lro(cls, lro)
model_upload_response = lro.result()
this_model = cls(
model_upload_response.model, version=model_upload_response.model_version_id
)
_LOGGER.log_create_complete(cls, this_model._gca_resource, "model")
return this_model
def deploy(
self,
endpoint: Optional[Union["Endpoint", "PrivateEndpoint"]] = None,
deployed_model_display_name: Optional[str] = None,
traffic_percentage: Optional[int] = 0,
traffic_split: Optional[Dict[str, int]] = None,
machine_type: Optional[str] = None,
min_replica_count: int = 1,
max_replica_count: int = 1,
accelerator_type: Optional[str] = None,
accelerator_count: Optional[int] = None,
service_account: Optional[str] = None,
explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None,
explanation_parameters: Optional[
aiplatform.explain.ExplanationParameters
] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = (),
encryption_spec_key_name: Optional[str] = None,
network: Optional[str] = None,
sync=True,
deploy_request_timeout: Optional[float] = None,
autoscaling_target_cpu_utilization: Optional[int] = None,
autoscaling_target_accelerator_duty_cycle: Optional[int] = None,
) -> Union[Endpoint, PrivateEndpoint]:
"""Deploys model to endpoint. Endpoint will be created if unspecified.
Args:
endpoint (Union[Endpoint, PrivateEndpoint]):
Optional. Public or private Endpoint to deploy model to. If not specified,
endpoint display name will be model display name+'_endpoint'.
deployed_model_display_name (str):
Optional. The display name of the DeployedModel. If not provided
upon creation, the Model's display_name is used.
traffic_percentage (int):
Optional. Desired traffic to newly deployed model. Defaults to
0 if there are pre-existing deployed models. Defaults to 100 if
there are no pre-existing deployed models. Negative values should
not be provided. Traffic of previously deployed models at the endpoint
will be scaled down to accommodate new deployed model's traffic.
Should not be provided if traffic_split is provided.
traffic_split (Dict[str, int]):
Optional. A map from a DeployedModel's ID to the percentage of
this Endpoint's traffic that should be forwarded to that DeployedModel.
If a DeployedModel's ID is not listed in this map, then it receives
no traffic. The traffic percentage values must add up to 100, or
map must be empty if the Endpoint is to not accept any traffic at
the moment. Key for model being deployed is "0". Should not be
provided if traffic_percentage is provided.
machine_type (str):
Optional. The type of machine. Not specifying machine type will
result in model to be deployed with automatic resources.
min_replica_count (int):
Optional. The minimum number of machine replicas this deployed
model will be always deployed on. If traffic against it increases,
it may dynamically be deployed onto more replicas, and as traffic
decreases, some of these extra replicas may be freed.
max_replica_count (int):
Optional. The maximum number of replicas this deployed model may
be deployed on when the traffic against it increases. If requested
value is too large, the deployment will error, but if deployment
succeeds then the ability to scale the model to that many replicas
is guaranteed (barring service outages). If traffic against the
deployed model increases beyond what its replicas at maximum may
handle, a portion of the traffic will be dropped. If this value
is not provided, the smaller value of min_replica_count or 1 will
be used.
accelerator_type (str):
Optional. Hardware accelerator type. Must also set accelerator_count if used.
One of ACCELERATOR_TYPE_UNSPECIFIED, NVIDIA_TESLA_K80, NVIDIA_TESLA_P100,
NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, NVIDIA_TESLA_T4
accelerator_count (int):
Optional. The number of accelerators to attach to a worker replica.
service_account (str):
The service account that the DeployedModel's container runs as. Specify the
email address of the service account. If this service account is not
specified, the container runs as a service account that doesn't have access
to the resource project.
Users deploying the Model must have the `iam.serviceAccounts.actAs`
permission on this service account.
explanation_metadata (aiplatform.explain.ExplanationMetadata):
Optional. Metadata describing the Model's input and output for explanation.
`explanation_metadata` is optional while `explanation_parameters` must be
specified when used.
For more details, see `Ref docs <http://tinyurl.com/1igh60kt>`
explanation_parameters (aiplatform.explain.ExplanationParameters):
Optional. Parameters to configure explaining for Model's predictions.
For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
metadata (Sequence[Tuple[str, str]]):
Optional. Strings which should be sent along with the request as
metadata.
encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
network (str):
Optional. The full name of the Compute Engine network to which
the Endpoint, if created, will be peered to. E.g. "projects/12345/global/networks/myVPC".
Private services access must already be configured for the network.
If set or aiplatform.init(network=...) has been set, a PrivateEndpoint will be created.
If left unspecified, an Endpoint will be created. Read more about PrivateEndpoints
[in the documentation](https://cloud.google.com/vertex-ai/docs/predictions/using-private-endpoints).
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
deploy_request_timeout (float):
Optional. The timeout for the deploy request in seconds.
autoscaling_target_cpu_utilization (int):
Optional. Target CPU Utilization to use for Autoscaling Replicas.
A default value of 60 will be used if not specified.
autoscaling_target_accelerator_duty_cycle (int):
Optional. Target Accelerator Duty Cycle.
Must also set accelerator_type and accelerator_count if specified.
A default value of 60 will be used if not specified.
Returns:
endpoint (Union[Endpoint, PrivateEndpoint]):
Endpoint with the deployed model.
Raises:
ValueError: If `traffic_split` is set for PrivateEndpoint.
"""
network = network or initializer.global_config.network
Endpoint._validate_deploy_args(
min_replica_count=min_replica_count,
max_replica_count=max_replica_count,
accelerator_type=accelerator_type,
deployed_model_display_name=deployed_model_display_name,
traffic_split=traffic_split,
traffic_percentage=traffic_percentage,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
)
if isinstance(endpoint, PrivateEndpoint):
if traffic_split:
raise ValueError(
"Traffic splitting is not yet supported for PrivateEndpoint. "
"Try calling deploy() without providing `traffic_split`. "
"A maximum of one model can be deployed to each private Endpoint."
)
return self._deploy(
endpoint=endpoint,
deployed_model_display_name=deployed_model_display_name,
traffic_percentage=traffic_percentage,
traffic_split=traffic_split,
machine_type=machine_type,
min_replica_count=min_replica_count,
max_replica_count=max_replica_count,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
service_account=service_account,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
metadata=metadata,
encryption_spec_key_name=encryption_spec_key_name
or initializer.global_config.encryption_spec_key_name,
network=network,
sync=sync,
deploy_request_timeout=deploy_request_timeout,
autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization,
autoscaling_target_accelerator_duty_cycle=autoscaling_target_accelerator_duty_cycle,
)
@base.optional_sync(return_input_arg="endpoint", bind_future_to_self=False)
def _deploy(
self,
endpoint: Optional[Union["Endpoint", "PrivateEndpoint"]] = None,
deployed_model_display_name: Optional[str] = None,
traffic_percentage: Optional[int] = 0,
traffic_split: Optional[Dict[str, int]] = None,
machine_type: Optional[str] = None,
min_replica_count: int = 1,
max_replica_count: int = 1,
accelerator_type: Optional[str] = None,
accelerator_count: Optional[int] = None,
service_account: Optional[str] = None,
explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None,
explanation_parameters: Optional[
aiplatform.explain.ExplanationParameters
] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = (),
encryption_spec_key_name: Optional[str] = None,
network: Optional[str] = None,
sync: bool = True,
deploy_request_timeout: Optional[float] = None,
autoscaling_target_cpu_utilization: Optional[int] = None,
autoscaling_target_accelerator_duty_cycle: Optional[int] = None,
) -> Union[Endpoint, PrivateEndpoint]:
"""Deploys model to endpoint. Endpoint will be created if unspecified.
Args:
endpoint (Union[Endpoint, PrivateEndpoint]):
Optional. Public or private Endpoint to deploy model to. If not specified,
endpoint display name will be model display name+'_endpoint'.
deployed_model_display_name (str):
Optional. The display name of the DeployedModel. If not provided
upon creation, the Model's display_name is used.
traffic_percentage (int):
Optional. Desired traffic to newly deployed model. Defaults to
0 if there are pre-existing deployed models. Defaults to 100 if
there are no pre-existing deployed models. Negative values should
not be provided. Traffic of previously deployed models at the endpoint
will be scaled down to accommodate new deployed model's traffic.
Should not be provided if traffic_split is provided.
traffic_split (Dict[str, int]):
Optional. A map from a DeployedModel's ID to the percentage of
this Endpoint's traffic that should be forwarded to that DeployedModel.
If a DeployedModel's ID is not listed in this map, then it receives
no traffic. The traffic percentage values must add up to 100, or
map must be empty if the Endpoint is to not accept any traffic at
the moment. Key for model being deployed is "0". Should not be
provided if traffic_percentage is provided.
machine_type (str):
Optional. The type of machine. Not specifying machine type will
result in model to be deployed with automatic resources.
min_replica_count (int):
Optional. The minimum number of machine replicas this deployed
model will be always deployed on. If traffic against it increases,
it may dynamically be deployed onto more replicas, and as traffic
decreases, some of these extra replicas may be freed.
max_replica_count (int):
Optional. The maximum number of replicas this deployed model may
be deployed on when the traffic against it increases. If requested
value is too large, the deployment will error, but if deployment
succeeds then the ability to scale the model to that many replicas
is guaranteed (barring service outages). If traffic against the
deployed model increases beyond what its replicas at maximum may
handle, a portion of the traffic will be dropped. If this value
is not provided, the smaller value of min_replica_count or 1 will
be used.
accelerator_type (str):
Optional. Hardware accelerator type. Must also set accelerator_count if used.
One of ACCELERATOR_TYPE_UNSPECIFIED, NVIDIA_TESLA_K80, NVIDIA_TESLA_P100,
NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, NVIDIA_TESLA_T4
accelerator_count (int):
Optional. The number of accelerators to attach to a worker replica.
service_account (str):
The service account that the DeployedModel's container runs as. Specify the
email address of the service account. If this service account is not
specified, the container runs as a service account that doesn't have access
to the resource project.
Users deploying the Model must have the `iam.serviceAccounts.actAs`
permission on this service account.
explanation_metadata (aiplatform.explain.ExplanationMetadata):
Optional. Metadata describing the Model's input and output for explanation.
`explanation_metadata` is optional while `explanation_parameters` must be
specified when used.
For more details, see `Ref docs <http://tinyurl.com/1igh60kt>`
explanation_parameters (aiplatform.explain.ExplanationParameters):
Optional. Parameters to configure explaining for Model's predictions.
For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
metadata (Sequence[Tuple[str, str]]):
Optional. Strings which should be sent along with the request as
metadata.
encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this Model and all sub-resources of this Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init
network (str):
Optional. The full name of the Compute Engine network to which
the Endpoint, if created, will be peered to. E.g. "projects/12345/global/networks/myVPC".
Private services access must already be configured for the network.
Read more about PrivateEndpoints
[in the documentation](https://cloud.google.com/vertex-ai/docs/predictions/using-private-endpoints).
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
deploy_request_timeout (float):
Optional. The timeout for the deploy request in seconds.
autoscaling_target_cpu_utilization (int):
Optional. Target CPU Utilization to use for Autoscaling Replicas.
A default value of 60 will be used if not specified.
autoscaling_target_accelerator_duty_cycle (int):
Optional. Target Accelerator Duty Cycle.
Must also set accelerator_type and accelerator_count if specified.
A default value of 60 will be used if not specified.
Returns:
endpoint (Union[Endpoint, PrivateEndpoint]):
Endpoint with the deployed model.
"""
if endpoint is None:
display_name = self.display_name[:118] + "_endpoint"
if not network:
endpoint = Endpoint.create(
display_name=display_name,
project=self.project,
location=self.location,
credentials=self.credentials,
encryption_spec_key_name=encryption_spec_key_name,
)
else:
endpoint = PrivateEndpoint.create(
display_name=display_name,
network=network,
project=self.project,
location=self.location,
credentials=self.credentials,
encryption_spec_key_name=encryption_spec_key_name,
)
_LOGGER.log_action_start_against_resource("Deploying model to", "", endpoint)
endpoint._deploy_call(
endpoint.api_client,
endpoint.resource_name,
self,
endpoint._gca_resource.traffic_split,
network=network,
deployed_model_display_name=deployed_model_display_name,
traffic_percentage=traffic_percentage,
traffic_split=traffic_split,
machine_type=machine_type,
min_replica_count=min_replica_count,
max_replica_count=max_replica_count,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
service_account=service_account,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
metadata=metadata,
deploy_request_timeout=deploy_request_timeout,
autoscaling_target_cpu_utilization=autoscaling_target_cpu_utilization,
autoscaling_target_accelerator_duty_cycle=autoscaling_target_accelerator_duty_cycle,
)
_LOGGER.log_action_completed_against_resource("model", "deployed", endpoint)
endpoint._sync_gca_resource()
return endpoint
def batch_predict(
self,
job_display_name: Optional[str] = None,
gcs_source: Optional[Union[str, Sequence[str]]] = None,
bigquery_source: Optional[str] = None,
instances_format: str = "jsonl",
gcs_destination_prefix: Optional[str] = None,
bigquery_destination_prefix: Optional[str] = None,
predictions_format: str = "jsonl",
model_parameters: Optional[Dict] = None,
machine_type: Optional[str] = None,
accelerator_type: Optional[str] = None,
accelerator_count: Optional[int] = None,
starting_replica_count: Optional[int] = None,
max_replica_count: Optional[int] = None,
generate_explanation: Optional[bool] = False,
explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None,
explanation_parameters: Optional[
aiplatform.explain.ExplanationParameters
] = None,
labels: Optional[Dict[str, str]] = None,
credentials: Optional[auth_credentials.Credentials] = None,
encryption_spec_key_name: Optional[str] = None,
sync: bool = True,
create_request_timeout: Optional[float] = None,
batch_size: Optional[int] = None,
) -> jobs.BatchPredictionJob:
"""Creates a batch prediction job using this Model and outputs
prediction results to the provided destination prefix in the specified
`predictions_format`. One source and one destination prefix are
required.
Example usage:
my_model.batch_predict(
job_display_name="prediction-123",
gcs_source="gs://example-bucket/instances.csv",
instances_format="csv",
bigquery_destination_prefix="projectId.bqDatasetId.bqTableId"
)
Args:
job_display_name (str):
Optional. The user-defined name of the BatchPredictionJob.
The name can be up to 128 characters long and can be consist
of any UTF-8 characters.
gcs_source: Optional[Sequence[str]] = None
Google Cloud Storage URI(-s) to your instances to run
batch prediction on. They must match `instances_format`.
bigquery_source: Optional[str] = None
BigQuery URI to a table, up to 2000 characters long. For example:
`bq://projectId.bqDatasetId.bqTableId`
instances_format: str = "jsonl"
The format in which instances are provided. Must be one
of the formats listed in `Model.supported_input_storage_formats`.
Default is "jsonl" when using `gcs_source`. If a `bigquery_source`
is provided, this is overridden to "bigquery".
gcs_destination_prefix: Optional[str] = None
The Google Cloud Storage location of the directory where the
output is to be written to. In the given directory a new
directory is created. Its name is
``prediction-<model-display-name>-<job-create-time>``, where
timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
Inside of it files ``predictions_0001.<extension>``,
``predictions_0002.<extension>``, ...,
``predictions_N.<extension>`` are created where
``<extension>`` depends on chosen ``predictions_format``,
and N may equal 0001 and depends on the total number of
successfully predicted instances. If the Model has both
``instance`` and ``prediction`` schemata defined then each such
file contains predictions as per the ``predictions_format``.
If prediction for any instance failed (partially or
completely), then an additional ``errors_0001.<extension>``,
``errors_0002.<extension>``,..., ``errors_N.<extension>``
files are created (N depends on total number of failed
predictions). These files contain the failed instances, as
per their schema, followed by an additional ``error`` field
which as value has ```google.rpc.Status`` <Status>`__
containing only ``code`` and ``message`` fields.
bigquery_destination_prefix: Optional[str] = None
The BigQuery URI to a project or table, up to 2000 characters long.
When only the project is specified, the Dataset and Table is created.
When the full table reference is specified, the Dataset must exist and
table must not exist. Accepted forms: ``bq://projectId`` or
``bq://projectId.bqDatasetId`` or
``bq://projectId.bqDatasetId.bqTableId``. If no Dataset is specified,
a new one is created with the name
``prediction_<model-display-name>_<job-create-time>``
where the table name is made BigQuery-dataset-name compatible
(for example, most special characters become underscores), and
timestamp is in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601"
format. In the dataset two tables will be created, ``predictions``,
and ``errors``. If the Model has both ``instance`` and
``prediction`` schemata defined then the tables have columns as
follows: The ``predictions`` table contains instances for which
the prediction succeeded, it has columns as per a concatenation
of the Model's instance and prediction schemata. The ``errors``
table contains rows for which the prediction has failed, it has
instance columns, as per the instance schema, followed by a single
"errors" column, which as values has ```google.rpc.Status`` <Status>`__
represented as a STRUCT, and containing only ``code`` and ``message``.
predictions_format: str = "jsonl"
Required. The format in which Vertex AI outputs the
predictions, must be one of the formats specified in
`Model.supported_output_storage_formats`.
Default is "jsonl" when using `gcs_destination_prefix`. If a
`bigquery_destination_prefix` is provided, this is overridden to
"bigquery".
model_parameters: Optional[Dict] = None
Optional. The parameters that govern the predictions. The schema of
the parameters may be specified via the Model's `parameters_schema_uri`.
machine_type: Optional[str] = None
Optional. The type of machine for running batch prediction on
dedicated resources. Not specifying machine type will result in
batch prediction job being run with automatic resources.
accelerator_type: Optional[str] = None
Optional. The type of accelerator(s) that may be attached
to the machine as per `accelerator_count`. Only used if
`machine_type` is set.
accelerator_count: Optional[int] = None
Optional. The number of accelerators to attach to the
`machine_type`. Only used if `machine_type` is set.
starting_replica_count: Optional[int] = None
The number of machine replicas used at the start of the batch
operation. If not set, Vertex AI decides starting number, not
greater than `max_replica_count`. Only used if `machine_type` is
set.
max_replica_count: Optional[int] = None
The maximum number of machine replicas the batch operation may
be scaled to. Only used if `machine_type` is set.
Default is 10.
generate_explanation (bool):
Optional. Generate explanation along with the batch prediction
results. This will cause the batch prediction output to include
explanations based on the `prediction_format`:
- `bigquery`: output includes a column named `explanation`. The value
is a struct that conforms to the [aiplatform.gapic.Explanation] object.
- `jsonl`: The JSON objects on each line include an additional entry
keyed `explanation`. The value of the entry is a JSON object that
conforms to the [aiplatform.gapic.Explanation] object.
- `csv`: Generating explanations for CSV format is not supported.
explanation_metadata (aiplatform.explain.ExplanationMetadata):
Optional. Explanation metadata configuration for this BatchPredictionJob.
Can be specified only if `generate_explanation` is set to `True`.
This value overrides the value of `Model.explanation_metadata`.
All fields of `explanation_metadata` are optional in the request. If
a field of the `explanation_metadata` object is not populated, the
corresponding field of the `Model.explanation_metadata` object is inherited.
For more details, see `Ref docs <http://tinyurl.com/1igh60kt>`
explanation_parameters (aiplatform.explain.ExplanationParameters):
Optional. Parameters to configure explaining for Model's predictions.
Can be specified only if `generate_explanation` is set to `True`.
This value overrides the value of `Model.explanation_parameters`.
All fields of `explanation_parameters` are optional in the request. If
a field of the `explanation_parameters` object is not populated, the
corresponding field of the `Model.explanation_parameters` object is inherited.
For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
labels: Optional[Dict[str, str]] = None
Optional. The labels with user-defined metadata to organize your
BatchPredictionJobs. Label keys and values can be no longer than
64 characters (Unicode codepoints), can only contain lowercase
letters, numeric characters, underscores and dashes.
International characters are allowed. See https://goo.gl/xmQnxf
for more information and examples of labels.
credentials: Optional[auth_credentials.Credentials] = None
Optional. Custom credentials to use to create this batch prediction
job. Overrides credentials set in aiplatform.init.
encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this Model and all sub-resources of this Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
create_request_timeout (float):
Optional. The timeout for the create request in seconds.
batch_size (int):
Optional. The number of the records (e.g. instances) of the operation given in each batch
to a machine replica. Machine type, and size of a single record should be considered
when setting this parameter, higher value speeds up the batch operation's execution,
but too high value will result in a whole batch not fitting in a machine's memory,
and the whole operation will fail.
The default value is 64.
Returns:
job (jobs.BatchPredictionJob):
Instantiated representation of the created batch prediction job.
"""
return jobs.BatchPredictionJob.create(
job_display_name=job_display_name,
model_name=self,
instances_format=instances_format,
predictions_format=predictions_format,
gcs_source=gcs_source,
bigquery_source=bigquery_source,
gcs_destination_prefix=gcs_destination_prefix,
bigquery_destination_prefix=bigquery_destination_prefix,
model_parameters=model_parameters,
machine_type=machine_type,
accelerator_type=accelerator_type,
accelerator_count=accelerator_count,
starting_replica_count=starting_replica_count,
max_replica_count=max_replica_count,
batch_size=batch_size,
generate_explanation=generate_explanation,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
labels=labels,
project=self.project,
location=self.location,
credentials=credentials or self.credentials,
encryption_spec_key_name=encryption_spec_key_name,
sync=sync,
create_request_timeout=create_request_timeout,
)
@classmethod
def list(
cls,
filter: Optional[str] = None,
order_by: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> List["models.Model"]:
"""List all Model resource instances.
Example Usage:
aiplatform.Model.list(
filter='labels.my_label="my_label_value" AND display_name="my_model"',
)
Args:
filter (str):
Optional. An expression for filtering the results of the request.
For field names both snake_case and camelCase are supported.
order_by (str):
Optional. A comma-separated list of fields to order by, sorted in
ascending order. Use "desc" after a field name for descending.
Supported fields: `display_name`, `create_time`, `update_time`
project (str):
Optional. Project to retrieve list from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional. Location to retrieve list from. If not set, location
set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to retrieve list. Overrides
credentials set in aiplatform.init.
Returns:
List[models.Model]:
A list of Model resource objects
"""
return cls._list(
filter=filter,
order_by=order_by,
project=project,
location=location,
credentials=credentials,
)
@classmethod
def _construct_sdk_resource_from_gapic(
cls,
gapic_resource: gca_model_compat.Model,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> "Model":
"""Override base._construct_sdk_resource_from_gapic to allow for setting
a ModelRegistry and resource_id_validator.
Args:
gapic_resource (gca_model_compat.Model):
A GAPIC representation of a Model resource.
project (str):
Optional. Project to construct SDK object from. If not set,
project set in aiplatform.init will be used.
location (str):
Optional. Location to construct SDK object from. If not set,
location set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to construct SDK object.
Overrides credentials set in aiplatform.init.
Returns:
Model:
An initialized SDK Model object that represents the Model GAPIC type.
"""
sdk_resource = super()._construct_sdk_resource_from_gapic(
gapic_resource=gapic_resource,
project=project,
location=location,
credentials=credentials,
)
sdk_resource._resource_id_validator = super()._revisioned_resource_id_validator
sdk_resource._registry = ModelRegistry(
sdk_resource.resource_name,
location=location,
project=project,
credentials=credentials,
)
return sdk_resource
@base.optional_sync()
def _wait_on_export(self, operation_future: operation.Operation, sync=True) -> None:
operation_future.result()
def export_model(
self,
export_format_id: str,
artifact_destination: Optional[str] = None,
image_destination: Optional[str] = None,
sync: bool = True,
) -> Dict[str, str]:
"""Exports a trained, exportable Model to a location specified by the user.
A Model is considered to be exportable if it has at least one `supported_export_formats`.
Either `artifact_destination` or `image_destination` must be provided.
Example Usage:
my_model.export(
export_format_id="tf-saved-model",
artifact_destination="gs://my-bucket/models/"
)
or
my_model.export(
export_format_id="custom-model",
image_destination="us-central1-docker.pkg.dev/projectId/repo/image"
)
Args:
export_format_id (str):
Required. The ID of the format in which the Model must be exported.
The list of export formats that this Model supports can be found
by calling `Model.supported_export_formats`.
artifact_destination (str):
The Cloud Storage location where the Model artifact is to be
written to. Under the directory given as the destination a
new one with name
"``model-export-<model-display-name>-<timestamp-of-export-call>``",
where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601
format, will be created. Inside, the Model and any of its
supporting files will be written.
This field should only be set when, in [Model.supported_export_formats],
the value for the key given in `export_format_id` contains ``ARTIFACT``.
image_destination (str):
The Google Container Registry or Artifact Registry URI where
the Model container image will be copied to. Accepted forms:
- Google Container Registry path. For example:
``gcr.io/projectId/imageName:tag``.
- Artifact Registry path. For example:
``us-central1-docker.pkg.dev/projectId/repoName/imageName:tag``.
This field should only be set when, in [Model.supported_export_formats],
the value for the key given in `export_format_id` contains ``IMAGE``.
sync (bool):
Whether to execute this export synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
Returns:
output_info (Dict[str, str]):
Details of the completed export with output destination paths to
the artifacts or container image.
Raises:
ValueError: If model does not support exporting.
ValueError: If invalid arguments or export formats are provided.
"""
self.wait()
# Model does not support exporting
if not self.supported_export_formats:
raise ValueError(f"The model `{self.resource_name}` is not exportable.")
# No destination provided
if not any((artifact_destination, image_destination)):
raise ValueError(
"Please provide an `artifact_destination` or `image_destination`."
)
export_format_id = export_format_id.lower()
# Unsupported export type
if export_format_id not in self.supported_export_formats:
raise ValueError(
f"'{export_format_id}' is not a supported export format for this model. "
f"Choose one of the following: {self.supported_export_formats}"
)
content_types = gca_model_compat.Model.ExportFormat.ExportableContent
supported_content_types = self.supported_export_formats[export_format_id]
if (
artifact_destination
and content_types.ARTIFACT not in supported_content_types
):
raise ValueError(
"This model can not be exported as an artifact in '{export_format_id}' format. "
"Try exporting as a container image by passing the `image_destination` argument."
)
if image_destination and content_types.IMAGE not in supported_content_types:
raise ValueError(
"This model can not be exported as a container image in '{export_format_id}' format. "
"Try exporting the model artifacts by passing a `artifact_destination` argument."
)
# Construct request payload
output_config = gca_model_service_compat.ExportModelRequest.OutputConfig(
export_format_id=export_format_id
)
if artifact_destination:
output_config.artifact_destination = gca_io_compat.GcsDestination(
output_uri_prefix=artifact_destination
)
if image_destination:
output_config.image_destination = (
gca_io_compat.ContainerRegistryDestination(output_uri=image_destination)
)
_LOGGER.log_action_start_against_resource("Exporting", "model", self)
model_name = self.versioned_resource_name
operation_future = self.api_client.export_model(
name=model_name, output_config=output_config
)
_LOGGER.log_action_started_against_resource_with_lro(
"Export", "model", self.__class__, operation_future
)
# Block before returning
self._wait_on_export(operation_future=operation_future, sync=sync)
_LOGGER.log_action_completed_against_resource("model", "exported", self)
return json_format.MessageToDict(operation_future.metadata.output_info._pb)
@classmethod
@base.optional_sync()
def upload_xgboost_model_file(
cls,
model_file_path: str,
xgboost_version: Optional[str] = None,
display_name: Optional[str] = None,
description: Optional[str] = None,
model_id: Optional[str] = None,
parent_model: Optional[str] = None,
is_default_version: Optional[bool] = True,
version_aliases: Optional[Sequence[str]] = None,
version_description: Optional[str] = None,
instance_schema_uri: Optional[str] = None,
parameters_schema_uri: Optional[str] = None,
prediction_schema_uri: Optional[str] = None,
explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None,
explanation_parameters: Optional[
aiplatform.explain.ExplanationParameters
] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
labels: Optional[Dict[str, str]] = None,
encryption_spec_key_name: Optional[str] = None,
staging_bucket: Optional[str] = None,
sync=True,
upload_request_timeout: Optional[float] = None,
) -> "Model":
"""Uploads a model and returns a Model representing the uploaded Model
resource.
Note: This function is *experimental* and can be changed in the future.
Example usage:
my_model = Model.upload_xgboost_model_file(
model_file_path="iris.xgboost_model.bst"
)
Args:
model_file_path (str): Required. Local file path of the model.
xgboost_version (str): Optional. The version of the XGBoost serving container.
Supported versions: ["0.82", "0.90", "1.1", "1.2", "1.3", "1.4"].
If the version is not specified, the latest version is used.
display_name (str):
Optional. The display name of the Model. The name can be up to 128
characters long and can be consist of any UTF-8 characters.
description (str):
The description of the model.
model_id (str):
Optional. The ID to use for the uploaded Model, which will
become the final component of the model resource name.
This value may be up to 63 characters, and valid characters
are `[a-z0-9_-]`. The first character cannot be a number or hyphen.
parent_model (str):
Optional. The resource name or model ID of an existing model that the
newly-uploaded model will be a version of.
Only set this field when uploading a new version of an existing model.
is_default_version (bool):
Optional. When set to True, the newly uploaded model version will
automatically have alias "default" included. Subsequent uses of
this model without a version specified will use this "default" version.
When set to False, the "default" alias will not be moved.
Actions targeting the newly-uploaded model version will need
to specifically reference this version by ID or alias.
New model uploads, i.e. version 1, will always be "default" aliased.
version_aliases (Sequence[str]):
Optional. User provided version aliases so that a model version
can be referenced via alias instead of auto-generated version ID.
A default version alias will be created for the first version of the model.
The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9]
version_description (str):
Optional. The description of the model version being uploaded.
instance_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single instance, which
are used in
``PredictRequest.instances``,
``ExplainRequest.instances``
and
``BatchPredictionJob.input_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
parameters_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the parameters of prediction and
explanation via
``PredictRequest.parameters``,
``ExplainRequest.parameters``
and
``BatchPredictionJob.model_parameters``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform, if no parameters are supported it is set to an
empty string. Note: The URI given on output will be
immutable and probably different, including the URI scheme,
than the one given on input. The output URI will point to a
location where the user only has a read access.
prediction_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single prediction
produced by this Model, which are returned via
``PredictResponse.predictions``,
``ExplainResponse.explanations``,
and
``BatchPredictionJob.output_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
explanation_metadata (aiplatform.explain.ExplanationMetadata):
Optional. Metadata describing the Model's input and output for explanation.
`explanation_metadata` is optional while `explanation_parameters` must be
specified when used.
For more details, see `Ref docs <http://tinyurl.com/1igh60kt>`
explanation_parameters (aiplatform.explain.ExplanationParameters):
Optional. Parameters to configure explaining for Model's predictions.
For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
project: Optional[str]=None,
Project to upload this model to. Overrides project set in
aiplatform.init.
location: Optional[str]=None,
Location to upload this model to. Overrides location set in
aiplatform.init.
credentials: Optional[auth_credentials.Credentials]=None,
Custom credentials to use to upload this model. Overrides credentials
set in aiplatform.init.
labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this Model and all sub-resources of this Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
staging_bucket (str):
Optional. Bucket to stage local model artifacts. Overrides
staging_bucket set in aiplatform.init.
upload_request_timeout (float):
Optional. The timeout for the upload request in seconds.
Returns:
model (aiplatform.Model):
Instantiated representation of the uploaded model resource.
Raises:
ValueError: If model directory does not contain a supported model file.
"""
if not display_name:
display_name = cls._generate_display_name("XGBoost model")
XGBOOST_SUPPORTED_MODEL_FILE_EXTENSIONS = [
".pkl",
".joblib",
".bst",
]
container_image_uri = aiplatform.helpers.get_prebuilt_prediction_container_uri(
region=location,
framework="xgboost",
framework_version=xgboost_version or "1.4",
accelerator="cpu",
)
model_file_path_obj = pathlib.Path(model_file_path)
if not model_file_path_obj.is_file():
raise ValueError(
f"model_file_path path must point to a file: '{model_file_path}'"
)
model_file_extension = model_file_path_obj.suffix
if model_file_extension not in XGBOOST_SUPPORTED_MODEL_FILE_EXTENSIONS:
_LOGGER.warning(
f"Only the following XGBoost model file extensions are currently supported: '{XGBOOST_SUPPORTED_MODEL_FILE_EXTENSIONS}'"
)
_LOGGER.warning(
"Treating the model file as a binary serialized XGBoost Booster."
)
model_file_extension = ".bst"
# Preparing model directory
# We cannot clean up the directory immediately after calling Model.upload since
# that call may be asynchronous and return before the model file has been read.
# To work around this, we make this method asynchronous (decorate with @base.optional_sync)
# but call Model.upload with sync=True.
with tempfile.TemporaryDirectory() as prepared_model_dir:
prepared_model_file_path = pathlib.Path(prepared_model_dir) / (
"model" + model_file_extension
)
shutil.copy(model_file_path_obj, prepared_model_file_path)
return cls.upload(
serving_container_image_uri=container_image_uri,
artifact_uri=prepared_model_dir,
display_name=display_name,
description=description,
model_id=model_id,
parent_model=parent_model,
is_default_version=is_default_version,
version_aliases=version_aliases,
version_description=version_description,
instance_schema_uri=instance_schema_uri,
parameters_schema_uri=parameters_schema_uri,
prediction_schema_uri=prediction_schema_uri,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
project=project,
location=location,
credentials=credentials,
labels=labels,
encryption_spec_key_name=encryption_spec_key_name,
staging_bucket=staging_bucket,
sync=True,
upload_request_timeout=upload_request_timeout,
)
@classmethod
@base.optional_sync()
def upload_scikit_learn_model_file(
cls,
model_file_path: str,
sklearn_version: Optional[str] = None,
display_name: Optional[str] = None,
description: Optional[str] = None,
model_id: Optional[str] = None,
parent_model: Optional[str] = None,
is_default_version: Optional[bool] = True,
version_aliases: Optional[Sequence[str]] = None,
version_description: Optional[str] = None,
instance_schema_uri: Optional[str] = None,
parameters_schema_uri: Optional[str] = None,
prediction_schema_uri: Optional[str] = None,
explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None,
explanation_parameters: Optional[
aiplatform.explain.ExplanationParameters
] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
labels: Optional[Dict[str, str]] = None,
encryption_spec_key_name: Optional[str] = None,
staging_bucket: Optional[str] = None,
sync=True,
upload_request_timeout: Optional[float] = None,
) -> "Model":
"""Uploads a model and returns a Model representing the uploaded Model
resource.
Note: This function is *experimental* and can be changed in the future.
Example usage:
my_model = Model.upload_scikit_learn_model_file(
model_file_path="iris.sklearn_model.joblib"
)
Args:
model_file_path (str): Required. Local file path of the model.
sklearn_version (str):
Optional. The version of the Scikit-learn serving container.
Supported versions: ["0.20", "0.22", "0.23", "0.24", "1.0"].
If the version is not specified, the latest version is used.
display_name (str):
Optional. The display name of the Model. The name can be up to 128
characters long and can be consist of any UTF-8 characters.
description (str):
The description of the model.
model_id (str):
Optional. The ID to use for the uploaded Model, which will
become the final component of the model resource name.
This value may be up to 63 characters, and valid characters
are `[a-z0-9_-]`. The first character cannot be a number or hyphen.
parent_model (str):
Optional. The resource name or model ID of an existing model that the
newly-uploaded model will be a version of.
Only set this field when uploading a new version of an existing model.
is_default_version (bool):
Optional. When set to True, the newly uploaded model version will
automatically have alias "default" included. Subsequent uses of
this model without a version specified will use this "default" version.
When set to False, the "default" alias will not be moved.
Actions targeting the newly-uploaded model version will need
to specifically reference this version by ID or alias.
New model uploads, i.e. version 1, will always be "default" aliased.
version_aliases (Sequence[str]):
Optional. User provided version aliases so that a model version
can be referenced via alias instead of auto-generated version ID.
A default version alias will be created for the first version of the model.
The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9]
version_description (str):
Optional. The description of the model version being uploaded.
instance_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single instance, which
are used in
``PredictRequest.instances``,
``ExplainRequest.instances``
and
``BatchPredictionJob.input_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
parameters_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the parameters of prediction and
explanation via
``PredictRequest.parameters``,
``ExplainRequest.parameters``
and
``BatchPredictionJob.model_parameters``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform, if no parameters are supported it is set to an
empty string. Note: The URI given on output will be
immutable and probably different, including the URI scheme,
than the one given on input. The output URI will point to a
location where the user only has a read access.
prediction_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single prediction
produced by this Model, which are returned via
``PredictResponse.predictions``,
``ExplainResponse.explanations``,
and
``BatchPredictionJob.output_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
explanation_metadata (aiplatform.explain.ExplanationMetadata):
Optional. Metadata describing the Model's input and output for explanation.
`explanation_metadata` is optional while `explanation_parameters` must be
specified when used.
For more details, see `Ref docs <http://tinyurl.com/1igh60kt>`
explanation_parameters (aiplatform.explain.ExplanationParameters):
Optional. Parameters to configure explaining for Model's predictions.
For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
project: Optional[str]=None,
Project to upload this model to. Overrides project set in
aiplatform.init.
location: Optional[str]=None,
Location to upload this model to. Overrides location set in
aiplatform.init.
credentials: Optional[auth_credentials.Credentials]=None,
Custom credentials to use to upload this model. Overrides credentials
set in aiplatform.init.
labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this Model and all sub-resources of this Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
staging_bucket (str):
Optional. Bucket to stage local model artifacts. Overrides
staging_bucket set in aiplatform.init.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
upload_request_timeout (float):
Optional. The timeout for the upload request in seconds.
Returns:
model (aiplatform.Model):
Instantiated representation of the uploaded model resource.
Raises:
ValueError: If explanation_metadata is specified while explanation_parameters
is not. Also if model directory does not contain a supported model file.
"""
if not display_name:
display_name = cls._generate_display_name("Scikit-Learn model")
SKLEARN_SUPPORTED_MODEL_FILE_EXTENSIONS = [
".pkl",
".joblib",
]
container_image_uri = aiplatform.helpers.get_prebuilt_prediction_container_uri(
region=location,
framework="sklearn",
framework_version=sklearn_version or "1.0",
accelerator="cpu",
)
model_file_path_obj = pathlib.Path(model_file_path)
if not model_file_path_obj.is_file():
raise ValueError(
f"model_file_path path must point to a file: '{model_file_path}'"
)
model_file_extension = model_file_path_obj.suffix
if model_file_extension not in SKLEARN_SUPPORTED_MODEL_FILE_EXTENSIONS:
_LOGGER.warning(
f"Only the following Scikit-learn model file extensions are currently supported: '{SKLEARN_SUPPORTED_MODEL_FILE_EXTENSIONS}'"
)
_LOGGER.warning(
"Treating the model file as a pickle serialized Scikit-learn model."
)
model_file_extension = ".pkl"
# Preparing model directory
# We cannot clean up the directory immediately after calling Model.upload since
# that call may be asynchronous and return before the model file has been read.
# To work around this, we make this method asynchronous (decorate with @base.optional_sync)
# but call Model.upload with sync=True.
with tempfile.TemporaryDirectory() as prepared_model_dir:
prepared_model_file_path = pathlib.Path(prepared_model_dir) / (
"model" + model_file_extension
)
shutil.copy(model_file_path_obj, prepared_model_file_path)
return cls.upload(
serving_container_image_uri=container_image_uri,
artifact_uri=prepared_model_dir,
display_name=display_name,
description=description,
model_id=model_id,
parent_model=parent_model,
is_default_version=is_default_version,
version_aliases=version_aliases,
version_description=version_description,
instance_schema_uri=instance_schema_uri,
parameters_schema_uri=parameters_schema_uri,
prediction_schema_uri=prediction_schema_uri,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
project=project,
location=location,
credentials=credentials,
labels=labels,
encryption_spec_key_name=encryption_spec_key_name,
staging_bucket=staging_bucket,
sync=True,
upload_request_timeout=upload_request_timeout,
)
@classmethod
def upload_tensorflow_saved_model(
cls,
saved_model_dir: str,
tensorflow_version: Optional[str] = None,
use_gpu: bool = False,
display_name: Optional[str] = None,
description: Optional[str] = None,
model_id: Optional[str] = None,
parent_model: Optional[str] = None,
is_default_version: Optional[bool] = True,
version_aliases: Optional[Sequence[str]] = None,
version_description: Optional[str] = None,
instance_schema_uri: Optional[str] = None,
parameters_schema_uri: Optional[str] = None,
prediction_schema_uri: Optional[str] = None,
explanation_metadata: Optional[aiplatform.explain.ExplanationMetadata] = None,
explanation_parameters: Optional[
aiplatform.explain.ExplanationParameters
] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
labels: Optional[Dict[str, str]] = None,
encryption_spec_key_name: Optional[str] = None,
staging_bucket: Optional[str] = None,
sync=True,
upload_request_timeout: Optional[str] = None,
) -> "Model":
"""Uploads a model and returns a Model representing the uploaded Model
resource.
Note: This function is *experimental* and can be changed in the future.
Example usage:
my_model = Model.upload_scikit_learn_model_file(
model_file_path="iris.tensorflow_model.SavedModel"
)
Args:
saved_model_dir (str): Required.
Local directory of the Tensorflow SavedModel.
tensorflow_version (str):
Optional. The version of the Tensorflow serving container.
Supported versions: ["0.15", "2.1", "2.2", "2.3", "2.4", "2.5", "2.6", "2.7"].
If the version is not specified, the latest version is used.
use_gpu (bool): Whether to use GPU for model serving.
display_name (str):
Optional. The display name of the Model. The name can be up to 128
characters long and can be consist of any UTF-8 characters.
description (str):
The description of the model.
model_id (str):
Optional. The ID to use for the uploaded Model, which will
become the final component of the model resource name.
This value may be up to 63 characters, and valid characters
are `[a-z0-9_-]`. The first character cannot be a number or hyphen.
parent_model (str):
Optional. The resource name or model ID of an existing model that the
newly-uploaded model will be a version of.
Only set this field when uploading a new version of an existing model.
is_default_version (bool):
Optional. When set to True, the newly uploaded model version will
automatically have alias "default" included. Subsequent uses of
this model without a version specified will use this "default" version.
When set to False, the "default" alias will not be moved.
Actions targeting the newly-uploaded model version will need
to specifically reference this version by ID or alias.
New model uploads, i.e. version 1, will always be "default" aliased.
version_aliases (Sequence[str]):
Optional. User provided version aliases so that a model version
can be referenced via alias instead of auto-generated version ID.
A default version alias will be created for the first version of the model.
The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9]
version_description (str):
Optional. The description of the model version being uploaded.
instance_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single instance, which
are used in
``PredictRequest.instances``,
``ExplainRequest.instances``
and
``BatchPredictionJob.input_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
parameters_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the parameters of prediction and
explanation via
``PredictRequest.parameters``,
``ExplainRequest.parameters``
and
``BatchPredictionJob.model_parameters``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform, if no parameters are supported it is set to an
empty string. Note: The URI given on output will be
immutable and probably different, including the URI scheme,
than the one given on input. The output URI will point to a
location where the user only has a read access.
prediction_schema_uri (str):
Optional. Points to a YAML file stored on Google Cloud
Storage describing the format of a single prediction
produced by this Model, which are returned via
``PredictResponse.predictions``,
``ExplainResponse.explanations``,
and
``BatchPredictionJob.output_config``.
The schema is defined as an OpenAPI 3.0.2 `Schema
Object <https://tinyurl.com/y538mdwt#schema-object>`__.
AutoML Models always have this field populated by AI
Platform. Note: The URI given on output will be immutable
and probably different, including the URI scheme, than the
one given on input. The output URI will point to a location
where the user only has a read access.
explanation_metadata (aiplatform.explain.ExplanationMetadata):
Optional. Metadata describing the Model's input and output for explanation.
`explanation_metadata` is optional while `explanation_parameters` must be
specified when used.
For more details, see `Ref docs <http://tinyurl.com/1igh60kt>`
explanation_parameters (aiplatform.explain.ExplanationParameters):
Optional. Parameters to configure explaining for Model's predictions.
For more details, see `Ref docs <http://tinyurl.com/1an4zake>`
project: Optional[str]=None,
Project to upload this model to. Overrides project set in
aiplatform.init.
location: Optional[str]=None,
Location to upload this model to. Overrides location set in
aiplatform.init.
credentials: Optional[auth_credentials.Credentials]=None,
Custom credentials to use to upload this model. Overrides credentials
set in aiplatform.init.
labels (Dict[str, str]):
Optional. The labels with user-defined metadata to
organize your Models.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
encryption_spec_key_name (Optional[str]):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the model. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If set, this Model and all sub-resources of this Model will be secured by this key.
Overrides encryption_spec_key_name set in aiplatform.init.
staging_bucket (str):
Optional. Bucket to stage local model artifacts. Overrides
staging_bucket set in aiplatform.init.
sync (bool):
Whether to execute this method synchronously. If False, this method
will be executed in concurrent Future and any downstream object will
be immediately returned and synced when the Future has completed.
upload_request_timeout (float):
Optional. The timeout for the upload request in seconds.
Returns:
model (aiplatform.Model):
Instantiated representation of the uploaded model resource.
Raises:
ValueError: If explanation_metadata is specified while explanation_parameters
is not. Also if model directory does not contain a supported model file.
"""
if not display_name:
display_name = cls._generate_display_name("Tensorflow model")
container_image_uri = aiplatform.helpers.get_prebuilt_prediction_container_uri(
region=location,
framework="tensorflow",
framework_version=tensorflow_version or "2.7",
accelerator="gpu" if use_gpu else "cpu",
)
return cls.upload(
serving_container_image_uri=container_image_uri,
artifact_uri=saved_model_dir,
display_name=display_name,
description=description,
model_id=model_id,
parent_model=parent_model,
is_default_version=is_default_version,
version_aliases=version_aliases,
version_description=version_description,
instance_schema_uri=instance_schema_uri,
parameters_schema_uri=parameters_schema_uri,
prediction_schema_uri=prediction_schema_uri,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
project=project,
location=location,
credentials=credentials,
labels=labels,
encryption_spec_key_name=encryption_spec_key_name,
staging_bucket=staging_bucket,
sync=sync,
upload_request_timeout=upload_request_timeout,
)
def list_model_evaluations(
self,
) -> List["model_evaluation.ModelEvaluation"]:
"""List all Model Evaluation resources associated with this model.
If this Model resource was instantiated with a version, the Model
Evaluation resources for that version will be returned. If no version
was provided when the Model resource was instantiated, Model Evaluation
resources will be returned for the default version.
Example Usage:
my_model = Model(
model_name="projects/123/locations/us-central1/models/456@1"
)
my_evaluations = my_model.list_model_evaluations()
Returns:
List[model_evaluation.ModelEvaluation]:
List of ModelEvaluation resources for the model.
"""
return model_evaluation.ModelEvaluation._list(
parent=self.versioned_resource_name,
credentials=self.credentials,
)
def get_model_evaluation(
self,
evaluation_id: Optional[str] = None,
) -> Optional[model_evaluation.ModelEvaluation]:
"""Returns a ModelEvaluation resource and instantiates its representation.
If no evaluation_id is passed, it will return the first evaluation associated
with this model. If the aiplatform.Model resource was instantiated with a
version, this will return a Model Evaluation from that version. If no version
was specified when instantiating the Model resource, this will return an
Evaluation from the default version.
Example usage:
my_model = Model(
model_name="projects/123/locations/us-central1/models/456"
)
my_evaluation = my_model.get_model_evaluation(
evaluation_id="789"
)
# If no arguments are passed, this method returns the first evaluation for the model
my_evaluation = my_model.get_model_evaluation()
Args:
evaluation_id (str):
Optional. The ID of the model evaluation to retrieve.
Returns:
model_evaluation.ModelEvaluation:
Instantiated representation of the ModelEvaluation resource.
"""
evaluations = self.list_model_evaluations()
if not evaluation_id:
if len(evaluations) > 1:
_LOGGER.warning(
f"Your model has more than one model evaluation, this is returning only one evaluation resource: {evaluations[0].resource_name}"
)
return evaluations[0] if evaluations else evaluations
else:
resource_uri_parts = self._parse_resource_name(self.resource_name)
evaluation_resource_name = (
model_evaluation.ModelEvaluation._format_resource_name(
**resource_uri_parts,
evaluation=evaluation_id,
)
)
return model_evaluation.ModelEvaluation(
evaluation_name=evaluation_resource_name,
credentials=self.credentials,
)
# TODO (b/232546878): Async support
class ModelRegistry:
def __init__(
self,
model: Union[Model, str],
location: Optional[str] = None,
project: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
):
"""Creates a ModelRegistry instance for version management of a registered model.
Args:
model (Union[Model, str]):
Required. One of the following:
1. A Model instance
2. A fully-qualified model resource name
3. A model ID. A location and project must be provided.
location (str):
Optional. The model location. Used when passing a model name as model.
If not set, project set in aiplatform.init will be used.
project (str):
Optional. The model project. Used when passing a model name as model.
If not set, project set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use with model access. If not set,
credentials set in aiplatform.init will be used.
"""
if isinstance(model, Model):
self.model_resource_name = model.resource_name
else:
self.model_resource_name = utils.full_resource_name(
resource_name=model,
resource_noun="models",
parse_resource_name_method=Model._parse_resource_name,
format_resource_name_method=Model._format_resource_name,
project=project,
location=location,
resource_id_validator=base.VertexAiResourceNoun._revisioned_resource_id_validator,
)
self.credentials = credentials or (
model.credentials
if isinstance(model, Model)
else initializer.global_config.credentials
)
self.client = Model._instantiate_client(location, self.credentials)
def get_model(
self,
version: Optional[str] = None,
) -> Model:
"""Gets a registered model with optional version.
Args:
version (str):
Optional. A model version ID or alias to target.
Defaults to the model with the "default" alias.
Returns:
Model: An instance of a Model from this ModelRegistry.
"""
return Model(
self.model_resource_name, version=version, credentials=self.credentials
)
def list_versions(
self,
) -> List[VersionInfo]:
"""Lists the versions and version info of a model.
Returns:
List[VersionInfo]:
A list of VersionInfo, each containing
info about specific model versions.
"""
_LOGGER.info(f"Getting versions for {self.model_resource_name}")
page_result = self.client.list_model_versions(
name=self.model_resource_name,
)
versions = [
VersionInfo(
version_id=model.version_id,
version_create_time=model.version_create_time,
version_update_time=model.version_update_time,
model_display_name=model.display_name,
model_resource_name=self._parse_versioned_name(model.name)[0],
version_aliases=model.version_aliases,
version_description=model.version_description,
)
for model in page_result
]
return versions
def get_version_info(
self,
version: str,
) -> VersionInfo:
"""Gets information about a specific model version.
Args:
version (str): Required. The model version to obtain info for.
Returns:
VersionInfo: Contains info about the model version.
"""
_LOGGER.info(f"Getting version {version} info for {self.model_resource_name}")
model = self.client.get_model(
name=self._get_versioned_name(self.model_resource_name, version),
)
return VersionInfo(
version_id=model.version_id,
version_create_time=model.version_create_time,
version_update_time=model.version_update_time,
model_display_name=model.display_name,
model_resource_name=self._parse_versioned_name(model.name)[0],
version_aliases=model.version_aliases,
version_description=model.version_description,
)
def delete_version(
self,
version: str,
) -> None:
"""Deletes a model version from the registry.
Cannot delete a version if it is the last remaining version.
Use Model.delete() in that case.
Args:
version (str): Required. The model version ID or alias to delete.
"""
lro = self.client.delete_model_version(
name=self._get_versioned_name(self.model_resource_name, version),
)
_LOGGER.info(f"Deleting version {version} for {self.model_resource_name}")
lro.result()
_LOGGER.info(f"Deleted version {version} for {self.model_resource_name}")
def add_version_aliases(
self,
new_aliases: List[str],
version: str,
) -> None:
"""Adds version alias(es) to a model version.
Args:
new_aliases (List[str]): Required. The alias(es) to add to a model version.
version (str): Required. The version ID to receive the new alias(es).
"""
self._merge_version_aliases(
version_aliases=new_aliases,
version=version,
)
def remove_version_aliases(
self,
target_aliases: List[str],
version: str,
) -> None:
"""Removes version alias(es) from a model version.
Args:
target_aliases (List[str]): Required. The alias(es) to remove from a model version.
version (str): Required. The version ID to be stripped of the target alias(es).
"""
self._merge_version_aliases(
version_aliases=[f"-{alias}" for alias in target_aliases],
version=version,
)
def _merge_version_aliases(
self,
version_aliases: List[str],
version: str,
) -> None:
"""Merges a list of version aliases with a model's existing alias list.
Args:
version_aliases (List[str]): Required. The version alias change list.
version (str): Required. The version ID to have its alias list changed.
"""
_LOGGER.info(f"Merging version aliases for {self.model_resource_name}")
self.client.merge_version_aliases(
name=self._get_versioned_name(self.model_resource_name, version),
version_aliases=version_aliases,
)
_LOGGER.info(
f"Completed merging version aliases for {self.model_resource_name}"
)
@staticmethod
def _get_versioned_name(
resource_name: str,
version: Optional[str] = None,
) -> str:
"""Creates a versioned form of a model resource name.
Args:
resource_name (str): Required. A fully-qualified resource name or resource ID.
version (str): Optional. The version or alias of the resource.
Returns:
versioned_name (str): The versioned resource name in revisioned format.
"""
if version:
return f"{resource_name}@{version}"
return resource_name
@staticmethod
def _parse_versioned_name(
model_name: str,
) -> Tuple[str, Optional[str]]:
"""Return a model name and, if included in the model name, a model version.
Args:
model_name (str): Required. A fully-qualified model name or model ID,
optionally with an included version.
Returns:
parsed_version_name (Tuple[str, Optional[str]]):
A tuple containing the model name or ID as the first element,
and the model version as the second element, if present in `model_name`.
Raises:
ValueError: If the `model_name` is invalid and contains too many '@' symbols.
"""
if "@" not in model_name:
return model_name, None
elif model_name.count("@") > 1:
raise ValueError(
f"Received an invalid model_name with too many `@`s: {model_name}"
)
else:
return model_name.split("@")
@staticmethod
def _get_true_version_parent(
parent_model: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
) -> Optional[str]:
"""Gets the true `parent_model` with full resource name.
Args:
parent_model (str): Optional. A fully-qualified resource name or resource ID
of the model that would be the parent of another model.
project (str): Optional. The project of `parent_model`, if not included in `parent_model`.
location (str): Optional. The location of `parent_model`, if not included in `parent_model`.
Returns:
true_parent_model (str):
Optional. The true resource name of the parent model, if one should exist.
"""
if parent_model:
existing_resource = utils.full_resource_name(
resource_name=parent_model,
resource_noun="models",
parse_resource_name_method=Model._parse_resource_name,
format_resource_name_method=Model._format_resource_name,
project=project,
location=location,
)
parent_model = existing_resource
return parent_model
@staticmethod
def _get_true_alias_list(
version_aliases: Optional[Sequence[str]] = None,
is_default_version: bool = True,
) -> Optional[Sequence[str]]:
"""Gets the true `version_aliases` list based on `is_default_version`.
Args:
version_aliases (Sequence[str]): Optional. The user-provided list of model aliases.
is_default_version (bool):
Optional. When set, includes the "default" alias in `version_aliases`.
Defaults to True.
Returns:
true_alias_list (Sequence[str]):
Optional: The true alias list, should one exist,
containing "default" if specified.
"""
if is_default_version:
if version_aliases and "default" not in version_aliases:
version_aliases.append("default")
elif not version_aliases:
version_aliases = ["default"]
return version_aliases
| {
"content_hash": "ac1cfb06bf5a12420de482f17804c8b8",
"timestamp": "",
"source": "github",
"line_count": 5034,
"max_line_length": 278,
"avg_line_length": 46.72367898291617,
"alnum_prop": 0.608417266492919,
"repo_name": "googleapis/python-aiplatform",
"id": "222f7df1a8c765fd00854b169eb1991b9efc2912",
"size": "235808",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/aiplatform/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import time
import WatchmanEdenTestCase
class TestEdenJournal(WatchmanEdenTestCase.WatchmanEdenTestCase):
def test_eden_journal(self):
def populate(repo):
repo.write_file("hello", "hola\n")
repo.commit("initial commit.")
root = self.makeEdenMount(populate)
repo = self.repoForPath(root)
initial_commit = repo.get_head_hash()
res = self.watchmanCommand("watch", root)
self.assertEqual("eden", res["watcher"])
clock = self.watchmanCommand("clock", root)
self.touchRelative(root, "newfile")
res = self.watchmanCommand("query", root, {"fields": ["name"], "since": clock})
clock = res["clock"]
self.assertFileListsEqual(res["files"], ["newfile"])
repo.add_file("newfile")
repo.commit(message="add newfile")
res = self.watchmanCommand(
"query",
root,
{
"expression": [
"not",
[
"anyof",
["dirname", ".hg"],
["match", "checklink*"],
["match", "hg-check*"],
],
],
"fields": ["name"],
"since": clock,
},
)
clock = res["clock"]
self.assertFileListsEqual(
res["files"],
["newfile"],
message="We expect to report the files changed in the commit",
)
# Test the the journal has the correct contents across a "reset" like
# operation where the parents are poked directly. This is using
# debugsetparents rather than reset because the latter isn't enabled
# by default for hg in the watchman test machinery.
self.touchRelative(root, "unclean")
repo.hg("debugsetparents", initial_commit)
res = self.watchmanCommand(
"query",
root,
{
"expression": ["not", ["dirname", ".hg"]],
"fields": ["name"],
"since": clock,
},
)
self.assertFileListsEqual(
res["files"],
["newfile", "unclean"],
message=(
"We expect to report the file changed in the commit "
"as well as the unclean file"
),
)
# make sure that we detect eden getting unmounted. This sleep is unfortunate
# and ugly. Without it, the unmount will fail because something is accessing
# the filesystem. I haven't been able to find out what it is because fuser
# takes too long to run and by the time it has run, whatever that blocker
# was is not longer there. Ordinarily I'd prefer to poll on some condition
# in a loop rather than just sleeping an arbitrary amount, but I just don't
# know what the offending thing is and running the unmount in a loop is prone
# to false negatives.
time.sleep(1)
self.eden.remove(root)
watches = self.watchmanCommand("watch-list")
self.assertNotIn(root, watches["roots"])
| {
"content_hash": "907fbbf70cae675b97325c98600f764d",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 87,
"avg_line_length": 35.747252747252745,
"alnum_prop": 0.5410390408853366,
"repo_name": "wez/watchman",
"id": "1adb2474d81f307cc3ac1fd4886b8494eb160144",
"size": "3385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/test_eden_journal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "68354"
},
{
"name": "C++",
"bytes": "1017051"
},
{
"name": "CMake",
"bytes": "33772"
},
{
"name": "CSS",
"bytes": "42513"
},
{
"name": "HTML",
"bytes": "36593"
},
{
"name": "Java",
"bytes": "165025"
},
{
"name": "JavaScript",
"bytes": "35291"
},
{
"name": "Python",
"bytes": "677902"
},
{
"name": "Ruby",
"bytes": "21741"
},
{
"name": "Rust",
"bytes": "69015"
},
{
"name": "Shell",
"bytes": "13265"
},
{
"name": "Thrift",
"bytes": "32316"
}
],
"symlink_target": ""
} |
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.shortcuts import render
from django.views.generic import DetailView, TemplateView
from django.http import HttpResponse
from ojoalplato.cards.forms import RestaurantSearchForm
from ojoalplato.cards.models import Restaurant, Wine
from haystack.query import SearchQuerySet
import json
# class MapView(SearchView):
# model = Restaurant
# template_name = "blog/wpfamily/map_list.html"
# form = HighlightedModelSearchForm
class MapView(TemplateView):
template_name = "blog/wpfamily/map_list.html"
class RestaurantDetailView(DetailView):
model = Restaurant
template_name = "cards/restaurant_detail.html"
context_object_name = "card"
class WineDetailView(DetailView):
model = Wine
template_name = "cards/wine_detail.html"
context_object_name = "card"
def restaurant_search(request):
query = request.GET.get('q')
page = request.GET.get('page')
form = RestaurantSearchForm(request.GET)
restaurants = form.search()
paginator = Paginator(restaurants, 10) # Show 10 results per page
try:
restaurants = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
restaurants = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
restaurants = paginator.page(paginator.num_pages)
return render(request, 'blog/wpfamily/map_list.html',
{
'query': query,
'page': restaurants
})
def autocomplete(request):
sqs = SearchQuerySet().autocomplete(content_auto=request.GET.get('q', ''))
suggestions = [{"name": r.object.name,
"chef": r.object.chef,
"address": r.object.address,
"url": r.object.get_absolute_url(),
"img": r.object.img_src,
"stars": r.object.stars,
"suns": r.object.suns} for r in sqs]
# Make sure you return a JSON object, not a bare list.
# Otherwise, you could be vulnerable to an XSS attack.
the_data = json.dumps(suggestions)
return HttpResponse(the_data, content_type='application/json')
| {
"content_hash": "2cf3daabd858f9dc2c8caf2a4a2657a2",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 78,
"avg_line_length": 33.23943661971831,
"alnum_prop": 0.6411016949152543,
"repo_name": "javipalanca/ojoalplato",
"id": "1138ab1874daa616e4701a0cc9a4b3bbce495cc2",
"size": "2360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ojoalplato/cards/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "185428"
},
{
"name": "Dockerfile",
"bytes": "1516"
},
{
"name": "HTML",
"bytes": "429279"
},
{
"name": "JavaScript",
"bytes": "123727"
},
{
"name": "Python",
"bytes": "471786"
},
{
"name": "SCSS",
"bytes": "1360"
},
{
"name": "Shell",
"bytes": "10755"
}
],
"symlink_target": ""
} |
import time
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, live_only)
from knack.util import CLIError
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
class SBNSMigrationCRUDScenarioTest(ScenarioTest):
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
# Test playback fails and the live-only flag will be removed once it is addressed
@live_only()
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_test_sb_migration')
def test_sb_migration(self, resource_group):
from azure.mgmt.servicebus.models import ProvisioningStateDR
self.kwargs.update({
'loc_south': 'SouthCentralUS',
'loc_north': 'NorthCentralUS',
'namespacenamestandard': self.create_random_name(prefix='sb-std-nscli', length=20),
'namespacenamepremium': self.create_random_name(prefix='sb-pre-nscli', length=20),
'tags': {'tag1: value1', 'tag2: value2'},
'sku': 'Premium',
'sku_std': 'Standard',
'authoname': self.create_random_name(prefix='cliAutho', length=20),
'defaultauthorizationrule': 'RootManageSharedAccessKey',
'accessrights': 'Send',
'primary': 'PrimaryKey',
'secondary': 'SecondaryKey',
'postmigrationname': self.create_random_name(prefix='clipostmigration', length=20),
'alternatename': self.create_random_name(prefix='cliAlter', length=20),
'id': '',
'test': '',
'queuename': '',
'topicname': '',
'partnernamespaceid': ''
})
self.cmd('servicebus namespace exists --name {namespacenamestandard}',
checks=[self.check('nameAvailable', True)])
# Create Namespace - Standard
self.cmd(
'servicebus namespace create --resource-group {rg} --name {namespacenamestandard} --location {loc_south} --tags {tags} --sku {sku_std}',
checks=[self.check('sku.name', '{sku_std}')])
# Get Created Namespace - Standard
self.cmd('servicebus namespace show --resource-group {rg} --name {namespacenamestandard}',
checks=[self.check('sku.name', '{sku_std}')])
# Create Namespace - Primary
self.cmd(
'servicebus namespace create --resource-group {rg} --name {namespacenamepremium} --location {loc_north} --tags {tags} --sku {sku}',
checks=[self.check('sku.name', '{sku}')])
# Get Created Namespace - Primary
getnamespace2result = self.cmd(
'servicebus namespace show --resource-group {rg} --name {namespacenamepremium}',
checks=[self.check('sku.name', '{sku}')]).get_output_in_json()
# Create Authoriazation Rule
self.cmd(
'servicebus namespace authorization-rule create --resource-group {rg} --namespace-name {namespacenamestandard} --name {authoname} --rights {accessrights}',
checks=[self.check('name', '{authoname}')])
partnernamespaceid = getnamespace2result['id']
self.kwargs.update({'id': partnernamespaceid})
# Get Create Authorization Rule
self.cmd(
'servicebus namespace authorization-rule show --resource-group {rg} --namespace-name {namespacenamestandard} --name {authoname}',
checks=[self.check('name', '{authoname}')])
# Create Queues under Standrad namespace
for x in range(0, 10):
queuenamestr = 'queue' + repr(x)
self.kwargs.update({'queuename': queuenamestr})
self.cmd(
'servicebus queue create --resource-group {rg} --namespace-name {namespacenamestandard} --name {queuename}',
checks=[self.check('name', '{queuename}')])
# Create Topics under Standrad namespace
for x in range(0, 10):
topicnamestr = 'topic' + repr(x)
self.kwargs.update({'topicname': topicnamestr})
self.cmd(
'servicebus topic create --resource-group {rg} --namespace-name {namespacenamestandard} --name {topicname}',
checks=[self.check('name', '{topicname}')])
time.sleep(10)
# Create Migration
self.cmd(
'servicebus migration start --resource-group {rg} --name {namespacenamestandard} --target-namespace {id} --post-migration-name {postmigrationname}')
# get Migration
getmigration = self.cmd(
'servicebus migration show --resource-group {rg} --name {namespacenamestandard}').get_output_in_json()
# Complete Migration
self.cmd(
'servicebus migration complete --resource-group {rg} --name {namespacenamestandard}')
# get Migration
getmigration = self.cmd(
'servicebus migration show --resource-group {rg} --name {namespacenamestandard}').get_output_in_json()
# check for the migration provisioning succeeded
while getmigration['provisioningState'] != ProvisioningStateDR.succeeded.value:
time.sleep(30)
getmigration = self.cmd(
'servicebus migration show --resource-group {rg} --name {namespacenamestandard}').get_output_in_json()
# check for the migration PendingReplicationOperationsCount is 0 or null
while getmigration['migrationState'] != 'Active':
time.sleep(30)
getmigration = self.cmd(
'servicebus migration show --resource-group {rg} --name {namespacenamestandard}').get_output_in_json()
# Get Authorization Rule - Premium
self.cmd(
'servicebus namespace authorization-rule show --resource-group {rg} --namespace-name {namespacenamepremium} --name {authoname}',
checks=[self.check('name', '{authoname}')])
# Get all queues from Premium namespace
listqueues1 = self.cmd(
'servicebus queue list --resource-group {rg} --namespace-name {namespacenamepremium}').get_output_in_json()
self.assertIsNotNone(listqueues1)
self.assertGreaterEqual(len(listqueues1), 10, 'Premium - get all queues count not 10')
# Get all queues from Premium namespace
listtopics = self.cmd(
'servicebus topic list --resource-group {rg} --namespace-name {namespacenamepremium}').get_output_in_json()
self.assertIsNotNone(listtopics)
self.assertGreaterEqual(len(listtopics), 10, 'Premium - get all topics count not 10')
time.sleep(30)
# get namespace
getnamespace = self.cmd(
'servicebus namespace show --resource-group {rg} --name {namespacenamestandard}').get_output_in_json()
# check for the namespace provisioning succeeded
while getnamespace['provisioningState'] != ProvisioningStateDR.succeeded.value:
time.sleep(30)
getnamespace = self.cmd(
'servicebus namespace show --resource-group {rg} --name {namespacenamestandard}').get_output_in_json()
# Delete Namespace - Standard
self.cmd('servicebus namespace delete --resource-group {rg} --name {namespacenamestandard}')
# get namespace
getnamespace = self.cmd(
'servicebus namespace show --resource-group {rg} --name {namespacenamepremium}').get_output_in_json()
# check for the namespace provisioning succeeded
while getnamespace['provisioningState'] != ProvisioningStateDR.succeeded.value:
time.sleep(30)
getnamespace = self.cmd(
'servicebus namespace show --resource-group {rg} --name {namespacenamepremium}').get_output_in_json()
| {
"content_hash": "cc418564fabaa40d422b62045e72326d",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 167,
"avg_line_length": 47.382716049382715,
"alnum_prop": 0.6293642522146952,
"repo_name": "yugangw-msft/azure-cli",
"id": "a3ddc2b106dd9400ea4faa5d0d87e49d4a3ab4d8",
"size": "8070",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/azure-cli/azure/cli/command_modules/servicebus/tests/latest/test_servicebus_migration_commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "5355"
},
{
"name": "Batchfile",
"bytes": "14110"
},
{
"name": "Bicep",
"bytes": "1679"
},
{
"name": "C#",
"bytes": "1971"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "Dockerfile",
"bytes": "8427"
},
{
"name": "HTML",
"bytes": "794"
},
{
"name": "JavaScript",
"bytes": "1404"
},
{
"name": "Jupyter Notebook",
"bytes": "389"
},
{
"name": "PowerShell",
"bytes": "1781"
},
{
"name": "Python",
"bytes": "24270340"
},
{
"name": "Rich Text Format",
"bytes": "12032"
},
{
"name": "Roff",
"bytes": "1036959"
},
{
"name": "Shell",
"bytes": "56023"
},
{
"name": "TSQL",
"bytes": "1145"
}
],
"symlink_target": ""
} |
"""
kindlepush
~~~~~~~~~~
Kindlepush is trying to rescue you from manually clicking the deliver
button to send the doc from your kindle library to your kindle. It
is for 3G devices, such as kindle dx.
It was created by @blahgeek, now maintained by @lord63.
:copyright: (c) 2014 BlahGeek.
:copyright: (c) 2014 lord63.
:license: MIT, see LICENSE for more details.
"""
__version__ = "0.3.3"
__title__ = "kindlepush"
__author__ = "BlahGeek"
__maintainer__ = "lord63"
__license__ = "MIT"
__copyright__ = "Copyright 2014 BlahGeek 2014 lord63"
| {
"content_hash": "8b671216a420d3f2db22926131082711",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 73,
"avg_line_length": 28.8,
"alnum_prop": 0.6493055555555556,
"repo_name": "lord63/kindledxpush",
"id": "a80b26cd6abc18d5a7d50a23dffd9f3cbb13bebd",
"size": "622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kindlepush/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9897"
}
],
"symlink_target": ""
} |
"""The base classes for RDFValue tests."""
import time
from grr.client.components.rekall_support import rekall_types as rdf_rekall_types
from grr.lib import aff4
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib import type_info
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import structs as rdf_structs
# pylint:mode=test
class RDFValueBaseTest(test_lib.GRRBaseTest):
pass
class GenericRDFProtoTest(RDFValueBaseTest):
def testNestedProtobufAssignment(self):
"""Check that we can assign a nested protobuf."""
container = rdf_rekall_types.RekallRequest()
pathspec = rdf_paths.PathSpec(path=r"\\.\pmem", pathtype=1)
# Should raise - incompatible RDFType.
self.assertRaises(ValueError, setattr, container, "device",
rdfvalue.RDFString("hello"))
# Should raise - incompatible RDFProto type.
self.assertRaises(
ValueError,
setattr,
container,
"device",
rdf_client.StatEntry(st_size=5))
# Assign directly.
container.device = pathspec
self.assertEqual(container.device.path, r"\\.\pmem")
# Clear the field.
container.device = None
# Check the protobuf does not have the field set at all.
self.assertFalse(container.HasField("device"))
def testSimpleTypeAssignment(self):
sample = rdf_client.StatEntry()
sample.AddDescriptor(
rdf_structs.ProtoRDFValue(
name="test",
field_number=45,
default=rdfvalue.RDFInteger(0),
rdf_type=rdfvalue.RDFInteger))
self.assertIsInstance(sample.test, rdfvalue.RDFInteger)
# Can we assign an RDFValue instance?
sample.test = rdfvalue.RDFInteger(5)
self.assertEqual(sample.test, 5)
# Check that bare values can be coerced.
sample.test = 6
self.assertIsInstance(sample.test, rdfvalue.RDFInteger)
self.assertEqual(sample.test, 6)
# Assign an enum.
sample.registry_type = sample.RegistryType.REG_DWORD
self.assertEqual(sample.registry_type, sample.RegistryType.REG_DWORD)
sample.registry_type = rdf_client.StatEntry.RegistryType.REG_SZ
self.assertEqual(sample.registry_type, sample.RegistryType.REG_SZ)
# We can also assign the string value.
sample.registry_type = "REG_QWORD"
self.assertEqual(sample.registry_type, sample.RegistryType.REG_QWORD)
# Check that coercing works.
sample.test = "10"
self.assertEqual(sample.test, 10)
# Assign an RDFValue which can not be coerced.
self.assertRaises(type_info.TypeValueError, setattr, sample, "test",
rdfvalue.RDFString("hello"))
def testComplexConstruction(self):
"""Test that we can construct RDFProtos with nested fields."""
pathspec = rdf_paths.PathSpec(
path="/foobar", pathtype=rdf_paths.PathSpec.PathType.TSK)
sample = rdf_client.StatEntry(pathspec=pathspec, st_size=5)
self.assertEqual(sample.pathspec.path, "/foobar")
self.assertEqual(sample.st_size, 5)
self.assertRaises(AttributeError, rdf_client.StatEntry, foobar=1)
def testUnicodeSupport(self):
pathspec = rdf_paths.PathSpec(
path="/foobar", pathtype=rdf_paths.PathSpec.PathType.TSK)
pathspec.path = u"Grüezi"
self.assertEqual(pathspec.path, u"Grüezi")
def testRDFTypes(self):
"""Test that types are properly serialized."""
# Create an object to carry attributes
obj = aff4.FACTORY.Create("foobar", aff4.AFF4Object, token=self.token)
# Make a url object
str_url = "aff4:/users"
url = rdfvalue.RDFURN(str_url, age=1)
# Store it
# We must use a proper Attribute() instance
self.assertRaises(AttributeError, obj.Set, "aff4:stored", url)
self.assertRaises(ValueError, obj.Set, obj.Schema.STORED, str_url)
old_time = time.time
try:
time.time = lambda: 100
obj.Set(obj.Schema.STORED, url)
obj.Close()
# Check that its ok
obj = aff4.FACTORY.Open("foobar", token=self.token)
url = obj.Get(obj.Schema.STORED)
# It must be a real RDFURN and be the same as the original string
self.assertEqual(url.__class__, rdfvalue.RDFURN)
self.assertEqual(str(url), str_url)
# The time of the stored property reflects the time of the Set() call.
self.assertEqual(url.age, 100 * 1e6)
finally:
time.time = old_time
def testRepeatedFields(self):
"""Test handling of protobuf repeated fields."""
sample = rdf_client.Interface()
# Add a simple string.
sample.ip4_addresses.Append("127.0.0.1")
self.assertEqual(sample.ip4_addresses[0], "127.0.0.1")
# Add an invalid type.
self.assertRaises(type_info.TypeValueError, sample.addresses.Append, 2)
# Add a protobuf
sample.addresses.Append(human_readable="127.0.0.1")
self.assertEqual(sample.addresses[0].human_readable, "127.0.0.1")
self.assertEqual(len(sample.addresses), 1)
def testEnums(self):
"""Check that enums are wrapped in a descriptor class."""
sample = rdf_flows.GrrStatus()
self.assertEqual(str(sample.status), "OK")
class RDFValueTestCase(RDFValueBaseTest):
"""The base class for testing RDFValue implementations."""
# This should be overridden by the RDFValue class we want to test.
rdfvalue_class = lambda *args, **kw: None
__abstract = True # Do not register this class so pylint: disable=g-bad-name
def GenerateSample(self, number=0):
"""Create a pre-populated instance of the RDFValue.
Args:
number: A sample number. Derived classes should return a different sample
for each number.
"""
_ = number
return self.rdfvalue_class()
def CheckRDFValue(self, value, sample):
"""Check that the rdfproto is the same as the sample."""
self.assertIsInstance(sample, self.rdfvalue_class)
self.assertIsInstance(value, self.rdfvalue_class)
self.assertRDFValuesEqual(value, sample)
def testComparisons(self):
"""Checks that object comparisons work."""
sample1 = self.GenerateSample(1)
self.assertTrue(sample1 == self.GenerateSample(1))
self.assertFalse(sample1 == self.GenerateSample(2))
self.assertTrue(sample1 != self.GenerateSample(2))
self.assertFalse(sample1 != self.GenerateSample(1))
def testHashability(self):
"""RDFValue instances need to act as keys in a dict."""
sample1 = self.GenerateSample(1)
# Different instances with the same value need to hash to the same.
self.assertTrue(hash(sample1) == hash(self.GenerateSample(1)))
self.assertTrue(hash(sample1) != hash(self.GenerateSample(2)))
def testInitialization(self):
"""Check that we can use an empty initializer.
RDFValues are created in many different ways, sometimes in stages by
gradually populating fields. The only time you can be sure the user has
finished creating a proto is when it is serialized. This means strong
validation that requires all fields populated can't be done in init, but
should be done in SerializeToString.
"""
self.rdfvalue_class()
# Initialize from another instance.
sample = self.GenerateSample()
self.CheckRDFValue(self.rdfvalue_class(sample), sample)
def testSerialization(self, sample=None):
"""Make sure the RDFValue instance can be serialized."""
if sample is None:
sample = self.GenerateSample()
# Serializing to a string must produce a string.
serialized = sample.SerializeToString()
self.assertIsInstance(serialized, str)
# Ensure we can parse it again.
rdfvalue_object = self.rdfvalue_class.FromSerializedString(serialized)
self.CheckRDFValue(rdfvalue_object, sample)
# Serializing to data store must produce something the data store can
# handle.
serialized = sample.SerializeToDataStore()
if self.rdfvalue_class.data_store_type == "bytes":
self.assertIsInstance(serialized, str)
elif self.rdfvalue_class.data_store_type == "string":
self.assertIsInstance(serialized, unicode)
elif self.rdfvalue_class.data_store_type in ["unsigned_integer", "integer"]:
self.assertIsInstance(serialized, (int, long))
else:
self.fail("%s has no valid data_store_type" % self.rdfvalue_class)
# Ensure we can parse it again.
rdfvalue_object = self.rdfvalue_class.FromDatastoreValue(serialized)
self.CheckRDFValue(rdfvalue_object, sample)
class RDFProtoTestCase(RDFValueTestCase):
"""A harness for testing RDFProto implementations."""
__abstract = True # Do not register this class so pylint: disable=g-bad-name
def testInitializationEx(self):
"""Check we can initialize from additional parts."""
sample = self.GenerateSample()
# RDFProto can be initialized from a serialized protobuf.
serialized = sample.SerializeToString()
rdfvalue_sample = self.rdfvalue_class.FromSerializedString(serialized)
self.CheckRDFValue(rdfvalue_sample, sample)
# RDFProto can be initialized from another RDFProto.
new_rdfvalue_sample = self.rdfvalue_class(rdfvalue_sample)
self.CheckRDFValue(new_rdfvalue_sample, rdfvalue_sample)
# In this case the ages should be identical
self.assertEqual(int(new_rdfvalue_sample.age), int(rdfvalue_sample.age))
| {
"content_hash": "5956bbd92510d4b17be407b9675f5f90",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 81,
"avg_line_length": 33.90181818181818,
"alnum_prop": 0.7054596160034323,
"repo_name": "pidydx/grr",
"id": "7b173ac1b08273c448c045f16b27364752733752",
"size": "9371",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "grr/lib/rdfvalues/test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "3409"
},
{
"name": "C",
"bytes": "10658"
},
{
"name": "C++",
"bytes": "304935"
},
{
"name": "CMake",
"bytes": "3228"
},
{
"name": "CSS",
"bytes": "26531"
},
{
"name": "HTML",
"bytes": "175613"
},
{
"name": "JavaScript",
"bytes": "25418"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Protocol Buffer",
"bytes": "308592"
},
{
"name": "Python",
"bytes": "6428769"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "Ruby",
"bytes": "5604"
},
{
"name": "Shell",
"bytes": "40128"
},
{
"name": "Standard ML",
"bytes": "8172"
}
],
"symlink_target": ""
} |
from asynctest import TestCase
from parameterized import parameterized
from async_hvac import AsyncClient
from async_hvac.tests.util import RequestsMocker
class TestApproleRoutes(TestCase):
"""Unit tests providing coverage for approle auth backend-related methods/routes."""
@parameterized.expand([
("default mount point", None, "application1"),
("custom mount point", "my-approle-path", "application2"),
])
@RequestsMocker()
async def test_create_role(self, test_label, mount_point, role_name, requests_mocker):
expected_status_code = 204
mock_url = 'http://127.0.0.1:8200/v1/auth/{0}/role/{1}'.format(
'approle' if mount_point is None else mount_point,
role_name,
)
requests_mocker.register_uri(
method='POST',
url=mock_url,
status_code=expected_status_code,
)
client = AsyncClient()
if mount_point is None:
actual_response = await client.create_role(
role_name=role_name,
)
else:
actual_response = await client.create_role(
role_name=role_name,
mount_point=mount_point,
)
self.assertEqual(
first=expected_status_code,
second=actual_response.status,
)
await client.close()
@parameterized.expand([
("default mount point", None, "application1"),
("custom mount point", "my-approle-path", "application2"),
])
@RequestsMocker()
async def test_list_roles(self, test_label, mount_point, role_name, requests_mocker):
expected_status_code = 200
mock_response = {
"auth": None,
"data": {
"keys": [
role_name,
]
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "e4c219fb-0a78-2be2-8d3c-b3715dccb920",
"warnings": None,
"wrap_info": None
}
mock_url = 'http://127.0.0.1:8200/v1/auth/{0}/role?list=true'.format(
'approle' if mount_point is None else mount_point,
)
requests_mocker.register_uri(
method='GET',
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
client = AsyncClient()
if mount_point is None:
actual_response = await client.list_roles()
else:
actual_response = await client.list_roles(
mount_point=mount_point,
)
# ensure we received our mock response data back successfully
self.assertEqual(mock_response, actual_response)
await client.close()
@parameterized.expand([
("default mount point", None, "application1", "40b3c82d-12a6-838c-9e74-1f1133867e06"),
("custom mount point", "my-approle-path", "application2", "5fs3c82d-12a6-838c-9e74-1f1133867esf"),
])
@RequestsMocker()
async def test_get_role_id(self, test_label, mount_point, role_name, role_id, requests_mocker):
expected_status_code = 200
mock_response = {
"auth": None,
"data": {
"role_id": role_id
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "85590a1a-6dd7-de79-01b0-1c285d505bf2",
"warnings": None,
"wrap_info": None
}
mock_url = 'http://127.0.0.1:8200/v1/auth/{0}/role/{1}/role-id'.format(
'approle' if mount_point is None else mount_point,
role_name,
)
requests_mocker.register_uri(
method='GET',
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
client = AsyncClient()
if mount_point is None:
actual_response = await client.get_role_id(
role_name=role_name
)
else:
actual_response = await client.get_role_id(
role_name=role_name,
mount_point=mount_point
)
# ensure we received our mock response data back successfully
self.assertEqual(
first=role_id,
second=actual_response
)
await client.close()
@parameterized.expand([
("default mount point", None, "application1", "custom-role-id-1"),
("custom mount point", "my-approle-path", "application2", "custom-role-id-2"),
])
@RequestsMocker()
async def test_set_role_id(self, test_label, mount_point, role_name, role_id, requests_mocker):
expected_status_code = 204
mock_url = 'http://127.0.0.1:8200/v1/auth/{0}/role/{1}/role-id'.format(
'approle' if mount_point is None else mount_point,
role_name,
)
requests_mocker.register_uri(
method='POST',
url=mock_url,
status_code=expected_status_code,
)
client = AsyncClient()
if mount_point is None:
actual_response = await client.set_role_id(
role_name=role_name,
role_id=role_id
)
else:
actual_response = await client.set_role_id(
role_name=role_name,
role_id=role_id,
mount_point=mount_point,
)
self.assertEqual(
first=expected_status_code,
second=actual_response.status,
)
await client.close()
@parameterized.expand([
("default mount point", None, "application1"),
("custom mount point", "my-approle-path", "application2"),
])
@RequestsMocker()
async def test_get_role(self, test_label, mount_point, role_name, requests_mocker):
expected_status_code = 200
mock_response = {
"auth": None,
"data": {
"bind_secret_id": True,
"bound_cidr_list": "",
"period": 0,
"policies": [
"default"
],
"secret_id_num_uses": 0,
"secret_id_ttl": 0,
"token_max_ttl": 900,
"token_num_uses": 0,
"token_ttl": 600
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "0aab655f-ecd2-b3d4-3817-35b5bdfd3f28",
"warnings": None,
"wrap_info": None
}
mock_url = 'http://127.0.0.1:8200/v1/auth/{0}/role/{1}'.format(
'approle' if mount_point is None else mount_point,
role_name,
)
requests_mocker.register_uri(
method='GET',
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
client = AsyncClient()
if mount_point is None:
actual_response = await client.get_role(
role_name=role_name,
)
else:
actual_response = await client.get_role(
role_name=role_name,
mount_point=mount_point,
)
self.assertEqual(
first=mock_response,
second=actual_response,
)
await client.close()
@parameterized.expand([
("default mount point", None, "application1"),
("custom mount point", "my-approle-path", "application2"),
])
@RequestsMocker()
async def test_create_role_secret_id(self, test_label, mount_point, role_name, requests_mocker):
expected_status_code = 200
mock_response = {
"auth": None,
"data": {
"secret_id": "be78e3ca-f644-b099-3291-e8a6f5985cfe",
"secret_id_accessor": "b58fd0ee-130c-33bb-5f69-6d4fd1731e5f"
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "2310dc21-0fea-a2de-2d94-bb4edd59f1e9",
"warnings": None,
"wrap_info": None
}
mock_url = 'http://127.0.0.1:8200/v1/auth/{0}/role/{1}/secret-id'.format(
'approle' if mount_point is None else mount_point,
role_name,
)
requests_mocker.register_uri(
method='POST',
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
client = AsyncClient()
if mount_point is None:
actual_response = await client.create_role_secret_id(
role_name=role_name,
)
else:
actual_response = await client.create_role_secret_id(
role_name=role_name,
mount_point=mount_point,
)
self.assertEqual(
first=mock_response,
second=actual_response,
)
await client.close()
@parameterized.expand([
("default mount point", None, "application1", "be78e3ca-f644-b099-3291-e8a6f5985cfe"),
("custom mount point", "my-approle-path", "application2", "ce78e3ca-f644-b099-3291-e8a6f5985cfe"),
])
@RequestsMocker()
async def test_get_role_secret_id(self, test_label, mount_point, role_name, secret_id, requests_mocker):
expected_status_code = 200
mock_response = {
"auth": None,
"data": {
"SecretIDNumUses": 0,
"cidr_list": [],
"creation_time": "2018-06-11T07:33:57.771908-05:00",
"expiration_time": "0001-01-01T00:00:00Z",
"last_updated_time": "2018-06-11T07:33:57.771908-05:00",
"metadata": {},
"secret_id_accessor": "b58fd0ee-130c-33bb-5f69-6d4fd1731e5f",
"secret_id_num_uses": 0,
"secret_id_ttl": 0
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "718a00fa-e76f-f1fc-9b9e-f9c4baa766b3",
"wrap_info": None
}
mock_url = 'http://127.0.0.1:8200/v1/auth/{0}/role/{1}/secret-id/lookup'.format(
'approle' if mount_point is None else mount_point,
role_name,
)
requests_mocker.register_uri(
method='POST',
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
client = AsyncClient()
if mount_point is None:
actual_response = await client.get_role_secret_id(
role_name=role_name,
secret_id=secret_id,
)
else:
actual_response = await client.get_role_secret_id(
role_name=role_name,
secret_id=secret_id,
mount_point=mount_point,
)
self.assertEqual(
first=mock_response,
second=actual_response,
)
await client.close()
@parameterized.expand([
("default mount point", None, "application1", "be78e3ca-f644-b099-3291-e8a6f5985cfe"),
("custom mount point", "my-approle-path", "application2", "ce78e3ca-f644-b099-3291-e8a6f5985cfe"),
])
@RequestsMocker()
async def test_list_role_secrets(self, test_label, mount_point, role_name, secret_id, requests_mocker):
expected_status_code = 200
mock_response = {
"auth": None,
"data": {
"keys": [
secret_id
]
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "eb805845-f6ce-a514-9238-6914664dd601",
"warnings": None,
"wrap_info": None
}
mock_url = 'http://127.0.0.1:8200/v1/auth/{0}/role/{1}/secret-id?list=true'.format(
'approle' if mount_point is None else mount_point,
role_name,
)
requests_mocker.register_uri(
method='GET',
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
client = AsyncClient()
if mount_point is None:
actual_response = await client.list_role_secrets(
role_name=role_name,
)
else:
actual_response = await client.list_role_secrets(
role_name=role_name,
mount_point=mount_point,
)
self.assertEqual(
first=mock_response,
second=actual_response,
)
await client.close()
@parameterized.expand([
("default mount point", None, "application1", "be78e3ca-f644-b099-3291-e8a6f5985cfe"),
("custom mount point", "my-approle-path", "application2", "ce78e3ca-f644-b099-3291-e8a6f5985cfe"),
])
@RequestsMocker()
async def test_get_role_secret_id_accessor(self, test_label, mount_point, role_name, secret_id_accessor, requests_mocker):
expected_status_code = 200
mock_response = {
"auth": None,
"data": {
"SecretIDNumUses": 0,
"cidr_list": [],
"creation_time": "2018-06-11T07:33:57.771908-05:00",
"expiration_time": "0001-01-01T00:00:00Z",
"last_updated_time": "2018-06-11T07:33:57.771908-05:00",
"metadata": {},
"secret_id_accessor": secret_id_accessor,
"secret_id_num_uses": 0,
"secret_id_ttl": 0
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "2c9fcba6-425d-e4c0-45fa-ee90450a3c00",
"wrap_info": None
}
mock_url = 'http://127.0.0.1:8200/v1/auth/{0}/role/{1}/secret-id-accessor/lookup'.format(
'approle' if mount_point is None else mount_point,
role_name,
)
requests_mocker.register_uri(
method='POST',
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
client = AsyncClient()
if mount_point is None:
actual_response = await client.get_role_secret_id_accessor(
role_name=role_name,
secret_id_accessor=secret_id_accessor,
)
else:
actual_response = await client.get_role_secret_id_accessor(
role_name=role_name,
secret_id_accessor=secret_id_accessor,
mount_point=mount_point,
)
self.assertEqual(
first=mock_response,
second=actual_response,
)
await client.close()
@parameterized.expand([
("default mount point", None, "application1", "be78e3ca-f644-b099-3291-e8a6f5985cfe"),
("custom mount point", "my-approle-path", "application2", "ce78e3ca-f644-b099-3291-e8a6f5985cfe"),
])
@RequestsMocker()
async def test_delete_role_secret_id(self, test_label, mount_point, role_name, secret_id, requests_mocker):
expected_status_code = 204
mock_url = 'http://127.0.0.1:8200/v1/auth/{0}/role/{1}/secret-id/destroy'.format(
'approle' if mount_point is None else mount_point,
role_name,
)
requests_mocker.register_uri(
method='POST',
url=mock_url,
status_code=expected_status_code,
)
client = AsyncClient()
if mount_point is None:
actual_response = await client.delete_role_secret_id(
role_name=role_name,
secret_id=secret_id,
)
else:
actual_response = await client.delete_role_secret_id(
role_name=role_name,
secret_id=secret_id,
mount_point=mount_point,
)
self.assertEqual(
first=expected_status_code,
second=actual_response.status,
)
await client.close()
@parameterized.expand([
("default mount point", None, "application1", "be78e3ca-f644-b099-3291-e8a6f5985cfe"),
("custom mount point", "my-approle-path", "application2", "ce78e3ca-f644-b099-3291-e8a6f5985cfe"),
])
@RequestsMocker()
async def test_delete_role_secret_id_accessor(self, test_label, mount_point, role_name, secret_id_accessor, requests_mocker):
expected_status_code = 204
mock_url = 'http://127.0.0.1:8200/v1/auth/{0}/role/{1}/secret-id-accessor/{2}'.format(
'approle' if mount_point is None else mount_point,
role_name,
secret_id_accessor,
)
requests_mocker.register_uri(
method='DELETE',
url=mock_url,
status_code=expected_status_code,
)
client = AsyncClient()
if mount_point is None:
actual_response = await client.delete_role_secret_id_accessor(
role_name=role_name,
secret_id_accessor=secret_id_accessor,
)
else:
actual_response = await client.delete_role_secret_id_accessor(
role_name=role_name,
secret_id_accessor=secret_id_accessor,
mount_point=mount_point,
)
self.assertEqual(
first=expected_status_code,
second=actual_response.status,
)
await client.close()
@parameterized.expand([
("default mount point", None, "application1", "be78e3ca-f644-b099-3291-e8a6f5985cfe"),
("custom mount point", "my-approle-path", "application2", "ce78e3ca-f644-b099-3291-e8a6f5985cfe"),
])
@RequestsMocker()
async def test_create_role_custom_secret_id(self, test_label, mount_point, role_name, secret_id, requests_mocker):
expected_status_code = 200
mock_response = {
"auth": None,
"data": {
"secret_id": secret_id,
"secret_id_accessor": "f5cb4b7d-9111-320e-6f24-73bf45d3845d"
},
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "e7c8b2e1-95e8-cb17-e98a-6c428201f1d5",
"warnings": None,
"wrap_info": None
}
mock_url = 'http://127.0.0.1:8200/v1/auth/{0}/role/{1}/custom-secret-id'.format(
'approle' if mount_point is None else mount_point,
role_name,
)
requests_mocker.register_uri(
method='POST',
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
client = AsyncClient()
if mount_point is None:
actual_response = await client.create_role_custom_secret_id(
role_name=role_name,
secret_id=secret_id,
)
else:
actual_response = await client.create_role_custom_secret_id(
role_name=role_name,
secret_id=secret_id,
mount_point=mount_point,
)
self.assertEqual(
first=mock_response,
second=actual_response,
)
await client.close()
@parameterized.expand([
("default mount point", None, "c7f93182-c6b1-4b6a-9dfb-03bdb6df0026", "26089502-b7d3-412a-b3e6-3d44300f9bd1"),
("custom mount point", "my-approle-path", "cf6b7c2e-3866-48f8-a764-3bcb5782a85a", "7156c666-0491-4c49-af40-7a97300fbaff"),
])
@RequestsMocker()
async def test_auth_approle(self, test_label, mount_point, role_id, secret_id, requests_mocker):
expected_status_code = 200
mock_response = {
"auth": {
"accessor": "f8b576f9-9146-4173-e174-40257d58015a",
"client_token": "3db3d089-7d3c-f531-cd3e-bfe44696a92c",
"lease_duration": 600,
"metadata": {
"role_name": "application1"
},
"policies": [
"default"
],
"renewable": True
},
"data": None,
"lease_duration": 0,
"lease_id": "",
"renewable": False,
"request_id": "2eb635ad-a763-926a-9815-4cb4d14a40f9",
"warnings": None,
"wrap_info": None
}
mock_url = 'http://127.0.0.1:8200/v1/auth/{0}/login'.format(
'approle' if mount_point is None else mount_point,
)
requests_mocker.register_uri(
method='POST',
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
client = AsyncClient()
if mount_point is None:
actual_response = await client.auth_approle(
role_id=role_id,
secret_id=secret_id,
)
else:
actual_response = await client.auth_approle(
role_id=role_id,
secret_id=secret_id,
mount_point=mount_point,
)
self.assertEqual(
first=mock_response,
second=actual_response,
)
await client.close()
| {
"content_hash": "4f63012e2963a686d4de93b36a7f7030",
"timestamp": "",
"source": "github",
"line_count": 610,
"max_line_length": 130,
"avg_line_length": 35.1983606557377,
"alnum_prop": 0.5248474686786829,
"repo_name": "Aloomaio/async-hvac",
"id": "866c6031a4da11f5ae0e80c9c3fca4b217addafd",
"size": "21471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "async_hvac/tests/test_approle_routes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HCL",
"bytes": "194"
},
{
"name": "Makefile",
"bytes": "242"
},
{
"name": "Python",
"bytes": "195282"
},
{
"name": "Shell",
"bytes": "1346"
}
],
"symlink_target": ""
} |
"""
This module contains XMLSchema classes creator for xmlschema package.
Two schema classes are created at the end of this module, XMLSchema10 for XSD 1.0 and
XMLSchema11 for XSD 1.1. The latter class parses also XSD 1.0 schemas, as prescribed by
the standard.
Those are the differences between XSD 1.0 and XSD 1.1 and their current development status:
* All model extended for content groups
* Assertions for simple types
* Default attributes for complex types
* Alternative type for elements
* Inheritable attributes
* targetNamespace for restricted element and attributes
* Assert for complex types
* TODO: OpenContent and XSD 1.1 wildcards for complex types
* schema overrides
"""
import os
from collections import namedtuple, Counter
from abc import ABCMeta
import warnings
from ..compat import add_metaclass
from ..exceptions import XMLSchemaTypeError, XMLSchemaURLError, XMLSchemaValueError, XMLSchemaOSError
from ..qnames import XSD_SCHEMA, XSD_ANNOTATION, XSD_NOTATION, XSD_ATTRIBUTE, XSD_ATTRIBUTE_GROUP, \
XSD_GROUP, XSD_SIMPLE_TYPE, XSD_COMPLEX_TYPE, XSD_ELEMENT, XSD_SEQUENCE, XSD_ANY, \
XSD_ANY_ATTRIBUTE, XSD_REDEFINE, XSD_OVERRIDE
from ..helpers import has_xsd_components, get_xsd_derivation_attribute, get_xsd_form_attribute
from ..namespaces import XSD_NAMESPACE, XML_NAMESPACE, XSI_NAMESPACE, XHTML_NAMESPACE, \
XLINK_NAMESPACE, NamespaceResourcesMap, NamespaceView
from ..etree import etree_element, etree_tostring, ParseError
from ..resources import is_remote_url, url_path_is_file, fetch_resource, XMLResource
from ..converters import XMLSchemaConverter
from ..xpath import ElementPathMixin
from .exceptions import XMLSchemaParseError, XMLSchemaValidationError, XMLSchemaEncodeError, \
XMLSchemaNotBuiltError, XMLSchemaIncludeWarning, XMLSchemaImportWarning
from .xsdbase import XSD_VALIDATION_MODES, XsdValidator, ValidationMixin, XsdComponent
from .notations import XsdNotation
from .simple_types import xsd_simple_type_factory, XsdUnion, XsdAtomicRestriction, \
Xsd11AtomicRestriction, Xsd11Union
from .attributes import XsdAttribute, XsdAttributeGroup, Xsd11Attribute
from .complex_types import XsdComplexType, Xsd11ComplexType
from .groups import XsdGroup, Xsd11Group
from .elements import XsdElement, Xsd11Element
from .wildcards import XsdAnyElement, XsdAnyAttribute, Xsd11AnyElement, Xsd11AnyAttribute
from .globals_ import iterchildren_xsd_import, iterchildren_xsd_include, \
iterchildren_xsd_redefine, iterchildren_xsd_override, XsdGlobals
# Elements for building dummy groups
ATTRIBUTE_GROUP_ELEMENT = etree_element(XSD_ATTRIBUTE_GROUP)
ANY_ATTRIBUTE_ELEMENT = etree_element(
XSD_ANY_ATTRIBUTE, attrib={'namespace': '##any', 'processContents': 'lax'}
)
SEQUENCE_ELEMENT = etree_element(XSD_SEQUENCE)
ANY_ELEMENT = etree_element(
XSD_ANY,
attrib={
'namespace': '##any',
'processContents': 'lax',
'minOccurs': '0',
'maxOccurs': 'unbounded'
})
SCHEMAS_DIR = os.path.join(os.path.dirname(__file__), 'schemas/')
XML_SCHEMA_FILE = os.path.join(SCHEMAS_DIR, 'xml_minimal.xsd')
HFP_SCHEMA_FILE = os.path.join(SCHEMAS_DIR, 'XMLSchema-hasFacetAndProperty_minimal.xsd')
XSI_SCHEMA_FILE = os.path.join(SCHEMAS_DIR, 'XMLSchema-instance_minimal.xsd')
XLINK_SCHEMA_FILE = os.path.join(SCHEMAS_DIR, 'xlink.xsd')
class XMLSchemaMeta(ABCMeta):
def __new__(mcs, name, bases, dict_):
def get_attribute(attr, *args):
for obj in args:
if hasattr(obj, attr):
return getattr(obj, attr)
meta_schema = dict_.get('meta_schema') or get_attribute('meta_schema', *bases)
if meta_schema is None:
# Defining a subclass without a meta-schema (eg. XMLSchemaBase)
return super(XMLSchemaMeta, mcs).__new__(mcs, name, bases, dict_)
dict_['meta_schema'] = None
xsd_version = dict_.get('XSD_VERSION') or get_attribute('XSD_VERSION', *bases)
if xsd_version not in ('1.0', '1.1'):
raise XMLSchemaValueError("Validator class XSD version must be '1.0' or '1.1', not %r." % xsd_version)
builders = dict_.get('BUILDERS') or get_attribute('BUILDERS', *bases)
if isinstance(builders, dict):
dict_['BUILDERS'] = namedtuple('Builders', builders)(**builders)
dict_['BUILDERS_MAP'] = {
XSD_NOTATION: builders['notation_class'],
XSD_SIMPLE_TYPE: builders['simple_type_factory'],
XSD_COMPLEX_TYPE: builders['complex_type_class'],
XSD_ATTRIBUTE: builders['attribute_class'],
XSD_ATTRIBUTE_GROUP: builders['attribute_group_class'],
XSD_GROUP: builders['group_class'],
XSD_ELEMENT: builders['element_class'],
}
elif builders is None:
raise XMLSchemaValueError("Validator class doesn't have defined XSD builders.")
elif get_attribute('BUILDERS_MAP', *bases) is None:
raise XMLSchemaValueError("Validator class doesn't have a builder map for XSD globals.")
# Build the new meta-schema class
meta_schema_class_name = 'Meta' + name
meta_schema_class = super(XMLSchemaMeta, mcs).__new__(mcs, meta_schema_class_name, bases, dict_)
meta_schema_class.__qualname__ = meta_schema_class_name
globals()[meta_schema_class_name] = meta_schema_class
# Build the new meta-schema instance
schema_location = meta_schema.url if isinstance(meta_schema, XMLSchemaBase) else meta_schema
meta_schema = meta_schema_class.create_meta_schema(schema_location)
meta_schema.maps.build()
dict_['meta_schema'] = meta_schema
return super(XMLSchemaMeta, mcs).__new__(mcs, name, bases, dict_)
def __init__(cls, name, bases, dict_):
super(XMLSchemaMeta, cls).__init__(name, bases, dict_)
@add_metaclass(XMLSchemaMeta)
class XMLSchemaBase(XsdValidator, ValidationMixin, ElementPathMixin):
"""
Base class for an XML Schema instance.
:param source: an URI that reference to a resource or a file path or a file-like \
object or a string containing the schema or an Element or an ElementTree document.
:type source: Element or ElementTree or str or file-like object
:param namespace: is an optional argument that contains the URI of the namespace. \
When specified it must be equal to the *targetNamespace* declared in the schema.
:type namespace: str or None
:param validation: defines the XSD validation mode to use for build the schema, \
it's value can be 'strict', 'lax' or 'skip'.
:type validation: str
:param global_maps: is an optional argument containing an :class:`XsdGlobals` \
instance, a mediator object for sharing declaration data between dependents \
schema instances.
:type global_maps: XsdGlobals or None
:param converter: is an optional argument that can be an :class:`XMLSchemaConverter` \
subclass or instance, used for defining the default XML data converter for XML Schema instance.
:type converter: XMLSchemaConverter or None
:param locations: schema location hints for namespace imports. Can be a dictionary or \
a sequence of couples (namespace URI, resource URL).
:type locations: dict or list or None
:param base_url: is an optional base URL, used for the normalization of relative paths \
when the URL of the schema resource can't be obtained from the source argument.
:type base_url: str or None
:param defuse: defines when to defuse XML data. Can be 'always', 'remote' or 'never'. \
For default defuse only remote XML data.
:type defuse: str or None
:param timeout: the timeout in seconds for fetching resources. Default is `300`.
:type timeout: int
:param build: defines whether build the schema maps. Default is `True`.
:type build: bool
:param use_meta: if `True` the schema processor uses the package meta-schema, otherwise the \
meta-schema is added at the end. In the latter case the meta-schema is rebuilt if any base \
namespace has been overridden by an import. Ignored if the argument *global_maps* is provided.
:type use_meta: bool
:cvar XSD_VERSION: store the XSD version (1.0 or 1.1).
:vartype XSD_VERSION: str
:cvar BUILDERS: a namedtuple with attributes related to schema components classes. \
Used for build local components within parsing methods.
:vartype BUILDERS: namedtuple
:cvar BUILDERS_MAP: a dictionary that maps from tag to class for XSD global components. \
Used for build global components within lookup functions.
:vartype BUILDERS_MAP: dict
:cvar BASE_SCHEMAS: a dictionary from namespace to schema resource for meta-schema bases.
:vartype BASE_SCHEMAS: dict
:cvar meta_schema: the XSD meta-schema instance.
:vartype meta_schema: XMLSchema
:cvar attribute_form_default: the schema's *attributeFormDefault* attribute, defaults to 'unqualified'.
:vartype attribute_form_default: str
:cvar element_form_default: the schema's *elementFormDefault* attribute, defaults to 'unqualified'
:vartype element_form_default: str
:cvar block_default: the schema's *blockDefault* attribute, defaults to ''.
:vartype block_default: str
:cvar final_default: the schema's *finalDefault* attribute, defaults to ''.
:vartype final_default: str
:cvar default_attributes: the XSD 1.1 schema's *defaultAttributes* attribute, defaults to ``None``.
:vartype default_attributes: XsdAttributeGroup
:ivar target_namespace: is the *targetNamespace* of the schema, the namespace to which \
belong the declarations/definitions of the schema. If it's empty no namespace is associated \
with the schema. In this case the schema declarations can be reused from other namespaces as \
*chameleon* definitions.
:vartype target_namespace: str
:ivar validation: validation mode, can be 'strict', 'lax' or 'skip'.
:vartype validation: str
:ivar maps: XSD global declarations/definitions maps. This is an instance of :class:`XsdGlobal`, \
that store the global_maps argument or a new object when this argument is not provided.
:vartype maps: XsdGlobals
:ivar converter: the default converter used for XML data decoding/encoding.
:vartype converter: XMLSchemaConverter
:ivar locations: schema location hints.
:vartype locations: NamespaceResourcesMap
:ivar namespaces: a dictionary that maps from the prefixes used by the schema into namespace URI.
:vartype namespaces: dict
:ivar imports: a dictionary of namespace imports of the schema, that maps namespace URI to imported schema \
object, or `None` in case of unsuccessful import.
:vartype imports: dict
:ivar includes: a dictionary of included schemas, that maps a schema location to an included schema. \
It also comprehend schemas included by "xs:redefine" or "xs:override" statements.
:vartype warnings: dict
:ivar warnings: warning messages about failure of import and include elements.
:vartype warnings: list
:ivar notations: `xsd:notation` declarations.
:vartype notations: NamespaceView
:ivar types: `xsd:simpleType` and `xsd:complexType` global declarations.
:vartype types: NamespaceView
:ivar attributes: `xsd:attribute` global declarations.
:vartype attributes: NamespaceView
:ivar attribute_groups: `xsd:attributeGroup` definitions.
:vartype attribute_groups: NamespaceView
:ivar groups: `xsd:group` global definitions.
:vartype groups: NamespaceView
:ivar elements: `xsd:element` global declarations.
:vartype elements: NamespaceView
"""
XSD_VERSION = None
BUILDERS = None
BUILDERS_MAP = None
BASE_SCHEMAS = None
meta_schema = None
# Schema defaults
target_namespace = ''
attribute_form_default = 'unqualified'
element_form_default = 'unqualified'
block_default = ''
final_default = ''
default_attributes = None # for XSD 1.1
def __init__(self, source, namespace=None, validation='strict', global_maps=None, converter=None,
locations=None, base_url=None, defuse='remote', timeout=300, build=True, use_meta=True):
super(XMLSchemaBase, self).__init__(validation)
self.source = XMLResource(source, base_url, defuse, timeout, lazy=False)
self.imports = {}
self.includes = {}
self.warnings = []
self._root_elements = None
root = self.source.root
# Parse namespaces and targetNamespace
self.namespaces = {'xml': XML_NAMESPACE} # the XML namespace is implicit
self.namespaces.update(self.source.get_namespaces())
try:
self.target_namespace = root.attrib['targetNamespace']
except KeyError:
pass
else:
if self.target_namespace == '':
# Ref: https://www.w3.org/TR/2004/REC-xmlschema-1-20041028/structures.html#element-schema
self.parse_error("The attribute 'targetNamespace' cannot be an empty string.", root)
if namespace is not None and self.target_namespace != namespace:
if self.target_namespace:
msg = u"wrong namespace (%r instead of %r) for XSD resource %r."
self.parse_error(msg % (self.target_namespace, namespace, self.url), root)
# Chameleon schema case: set the target namespace and the default namespace
self.target_namespace = namespace
if '' not in self.namespaces:
self.namespaces[''] = namespace
# Parses the schema defaults
if 'attributeFormDefault' in root.attrib:
try:
self.attribute_form_default = get_xsd_form_attribute(root, 'attributeFormDefault')
except ValueError as err:
self.parse_error(err, root)
if 'elementFormDefault' in root.attrib:
try:
self.element_form_default = get_xsd_form_attribute(root, 'elementFormDefault')
except ValueError as err:
self.parse_error(err, root)
if 'blockDefault' in root.attrib:
try:
self.block_default = get_xsd_derivation_attribute(
root, 'blockDefault', {'extension', 'restriction', 'substitution'}
)
except ValueError as err:
self.parse_error(err, root)
if 'finalDefault' in root.attrib:
try:
self.final_default = get_xsd_derivation_attribute(root, 'finalDefault')
except ValueError as err:
self.parse_error(err, root)
if self.XSD_VERSION > '1.0':
# XSD 1.1: "defaultAttributes" and "xpathDefaultNamespace"
self.xpath_default_namespace = self._parse_xpath_default_namespace(root)
if 'defaultAttributes' in root.attrib:
try:
self.default_attributes = self.resolve_qname(root.attrib['defaultAttributes'])
except XMLSchemaValueError as error:
self.parse_error(str(error), root)
# Set locations hints map and converter
self.locations = NamespaceResourcesMap(self.source.get_locations(locations))
if self.meta_schema is not None:
# Add fallback schema location hint for XHTML
self.locations[XHTML_NAMESPACE] = os.path.join(SCHEMAS_DIR, 'xhtml1-strict.xsd')
self.converter = self.get_converter(converter)
# Create or set the XSD global maps instance
if self.meta_schema is None:
self.maps = global_maps or XsdGlobals(self)
return # Meta-schemas don't need to be checked or built and don't process include/imports
elif global_maps is None:
if use_meta is False:
self.maps = XsdGlobals(self, validation)
self.locations.update(self.BASE_SCHEMAS)
elif self.target_namespace not in self.BASE_SCHEMAS:
self.maps = self.meta_schema.maps.copy(self, validation=validation)
else:
base_schemas = {k: v for k, v in self.BASE_SCHEMAS.items() if k != self.target_namespace}
meta_schema = self.create_meta_schema(base_schemas=base_schemas)
self.maps = meta_schema.maps
self.meta_schema = meta_schema
elif isinstance(global_maps, XsdGlobals):
self.maps = global_maps
else:
raise XMLSchemaTypeError("'global_maps' argument must be a %r instance." % XsdGlobals)
# Validate the schema document
if validation == 'strict':
self.check_schema(root, self.namespaces)
elif validation == 'lax':
self.errors.extend([e for e in self.meta_schema.iter_errors(root, namespaces=self.namespaces)])
# Includes and imports schemas (errors are treated as warnings)
self._include_schemas()
self._import_namespaces()
if '' not in self.namespaces:
self.namespaces[''] = '' # For default local names are mapped to no namespace
if build:
self.maps.build()
def __repr__(self):
if self.url:
basename = os.path.basename(self.url)
return u'%s(basename=%r, namespace=%r)' % (self.__class__.__name__, basename, self.target_namespace)
else:
return u'%s(namespace=%r)' % (self.__class__.__name__, self.target_namespace)
def __setattr__(self, name, value):
if name == 'root' and value.tag not in (XSD_SCHEMA, 'schema'):
raise XMLSchemaValueError("schema root element must has %r tag." % XSD_SCHEMA)
elif name == 'maps':
if self.meta_schema is None and hasattr(self, 'maps'):
raise XMLSchemaValueError("cannot change the global maps instance of a meta-schema")
super(XMLSchemaBase, self).__setattr__(name, value)
self.notations = NamespaceView(value.notations, self.target_namespace)
self.types = NamespaceView(value.types, self.target_namespace)
self.attributes = NamespaceView(value.attributes, self.target_namespace)
self.attribute_groups = NamespaceView(value.attribute_groups, self.target_namespace)
self.groups = NamespaceView(value.groups, self.target_namespace)
self.elements = NamespaceView(value.elements, self.target_namespace)
self.substitution_groups = NamespaceView(value.substitution_groups, self.target_namespace)
self.constraints = NamespaceView(value.constraints, self.target_namespace)
self.global_maps = (self.notations, self.types, self.attributes,
self.attribute_groups, self.groups, self.elements)
value.register(self)
elif name == 'validation' and value not in ('strict', 'lax', 'skip'):
raise XMLSchemaValueError("Wrong value %r for attribute 'validation'." % value)
else:
super(XMLSchemaBase, self).__setattr__(name, value)
def __iter__(self):
for xsd_element in sorted(self.elements.values(), key=lambda x: x.name):
yield xsd_element
def __reversed__(self):
for xsd_element in sorted(self.elements.values(), key=lambda x: x.name, reverse=True):
yield xsd_element
def __len__(self):
return len(self.elements)
# XML resource attributes access
@property
def root(self):
"""Root element of the schema."""
return self.source.root
def get_text(self):
"""
Gets the XSD text of the schema. If the source text is not available creates
an encoded string representation of the XSD tree.
"""
if self.source.text is None:
if self.source.url is None:
return etree_tostring(self.source.root, self.namespaces, xml_declaration=True)
else:
try:
self.source.load()
except XMLSchemaOSError:
return etree_tostring(self.source.root, self.namespaces, xml_declaration=True)
return self.source.text
@property
def url(self):
"""Schema resource URL, is `None` if the schema is built from a string."""
return self.source.url
@property
def base_url(self):
"""The base URL of the source of the schema."""
return self.source.base_url
@property
def defuse(self):
"""Defines when to defuse XML data, can be 'always', 'remote' or 'never'."""
return self.source.defuse
@property
def timeout(self):
"""Timeout in seconds for fetching resources."""
return self.source.timeout
@property
def use_meta(self):
"""Returns `True` if the meta-schema is imported."""
return self.meta_schema is not None and XSD_NAMESPACE in self.maps.namespaces
# Schema root attributes
@property
def tag(self):
"""Schema root tag. For compatibility with the ElementTree API."""
return self.root.tag
@property
def id(self):
"""The schema's *id* attribute, defaults to ``None``."""
return self.root.get('id')
@property
def version(self):
"""The schema's *version* attribute, defaults to ``None``."""
return self.root.get('version')
@property
def schema_location(self):
"""A list of location hints extracted from the *xsi:schemaLocation* attribute of the schema."""
return [(k, v) for k, v in self.source.iter_location_hints() if k]
@property
def no_namespace_schema_location(self):
"""A location hint extracted from the *xsi:noNamespaceSchemaLocation* attribute of the schema."""
for k, v in self.source.iter_location_hints():
if not k:
return v
@property
def default_namespace(self):
"""The namespace associated to the empty prefix ''."""
return self.namespaces.get('')
@property
def target_prefix(self):
"""The prefix associated to the *targetNamespace*."""
for prefix, namespace in self.namespaces.items():
if namespace == self.target_namespace:
return prefix
return ''
@classmethod
def builtin_types(cls):
"""An accessor for XSD built-in types."""
try:
return cls.meta_schema.maps.namespaces[XSD_NAMESPACE][0].types
except KeyError:
raise XMLSchemaNotBuiltError(cls.meta_schema, "missing XSD namespace in meta-schema")
except AttributeError:
raise XMLSchemaNotBuiltError(cls.meta_schema, "meta-schema unavailable for %r" % cls)
@property
def root_elements(self):
"""
The list of global elements that are not used by reference in any model of the schema.
This is implemented as lazy property because it's computationally expensive to build
when the schema model is complex.
"""
if not self.elements:
return []
elif len(self.elements) == 1:
return list(self.elements.values())
elif self._root_elements is None:
names = set(e.name for e in self.elements.values())
for xsd_element in self.elements.values():
for e in xsd_element.iter():
if e is xsd_element or isinstance(e, XsdAnyElement):
continue
elif e.ref or e.is_global:
if e.name in names:
names.discard(e.name)
if not names:
break
self._root_elements = list(names)
return [e for e in self.elements.values() if e.name in self._root_elements]
@classmethod
def create_meta_schema(cls, source=None, base_schemas=None, global_maps=None):
"""
Creates a new meta-schema instance.
:param source: an optional argument referencing to or containing the XSD meta-schema \
resource. Required if the schema class doesn't already have a meta-schema.
:param base_schemas: an optional dictionary that contains namespace URIs and schema locations. \
If provided it's used as substitute for class 's BASE_SCHEMAS. Also a sequence of (namespace, \
location) items can be provided if there are more schema documents for one or more namespaces.
:param global_maps: is an optional argument containing an :class:`XsdGlobals` \
instance for the new meta schema. If not provided a new map is created.
"""
if source is None:
try:
source = cls.meta_schema.url
except AttributeError:
raise XMLSchemaValueError(
"The argument 'source' is required when the class doesn't already have a meta-schema"
)
if base_schemas is None:
base_schemas = cls.BASE_SCHEMAS.items()
elif isinstance(base_schemas, dict):
base_schemas = base_schemas.items()
else:
try:
base_schemas = [(n, l) for n, l in base_schemas]
except ValueError:
raise ValueError("The argument 'base_schemas' is not a dictionary nor a sequence of items")
meta_schema_class = cls if cls.meta_schema is None else cls.meta_schema.__class__
meta_schema = meta_schema_class(source, XSD_NAMESPACE, global_maps=global_maps, defuse='never', build=False)
for ns, location in base_schemas:
if ns == XSD_NAMESPACE:
meta_schema.include_schema(location=location)
else:
meta_schema.import_schema(namespace=ns, location=location)
return meta_schema
@classmethod
def create_schema(cls, *args, **kwargs):
"""Creates a new schema instance of the same class of the caller."""
return cls(*args, **kwargs)
def create_any_content_group(self, parent, name=None):
"""Creates a model group related to schema instance that accepts any content."""
group = self.BUILDERS.group_class(SEQUENCE_ELEMENT, self, parent, name)
group.append(XsdAnyElement(ANY_ELEMENT, self, group))
return group
def create_any_attribute_group(self, parent, name=None):
"""Creates an attribute group related to schema instance that accepts any attribute."""
attribute_group = self.BUILDERS.attribute_group_class(ATTRIBUTE_GROUP_ELEMENT, self, parent, name)
attribute_group[None] = XsdAnyAttribute(ANY_ATTRIBUTE_ELEMENT, self, attribute_group)
return attribute_group
def copy(self):
"""Makes a copy of the schema instance. The new instance has independent maps of shared XSD components."""
schema = object.__new__(self.__class__)
schema.__dict__.update(self.__dict__)
schema.source = self.source.copy()
schema.errors = self.errors[:]
schema.warnings = self.warnings[:]
schema.namespaces = self.namespaces.copy()
schema.locations = NamespaceResourcesMap(self.locations)
schema.imports = dict(self.imports)
schema.includes = dict(self.includes)
schema.maps = self.maps.copy(validator=schema)
return schema
__copy__ = copy
@classmethod
def check_schema(cls, schema, namespaces=None):
"""
Validates the given schema against the XSD meta-schema (:attr:`meta_schema`).
:param schema: the schema instance that has to be validated.
:param namespaces: is an optional mapping from namespace prefix to URI.
:raises: :exc:`XMLSchemaValidationError` if the schema is invalid.
"""
for error in cls.meta_schema.iter_errors(schema, namespaces=namespaces):
raise error
def build(self):
"""Builds the schema XSD global maps."""
self.maps.build()
@property
def built(self):
xsd_global = None
for xsd_global in self.iter_globals(self):
if not isinstance(xsd_global, XsdComponent):
return False
if not xsd_global.built:
return False
if xsd_global is not None:
return True
prefix = '{%s}' % self.target_namespace if self.target_namespace else ''
for child in filter(lambda x: x.tag != XSD_ANNOTATION, self.root):
if child.tag in (XSD_REDEFINE, XSD_OVERRIDE):
for e in filter(lambda x: x.tag in self.BUILDERS_MAP, child):
name = e.get('name')
if name is not None:
try:
if not self.maps.lookup(e.tag, prefix + name if prefix else name).built:
return False
except KeyError:
return False
elif child.tag in self.BUILDERS_MAP:
name = child.get('name')
if name is not None:
try:
if not self.maps.lookup(child.tag, prefix + name if prefix else name).built:
return False
except KeyError:
return False
return True
@property
def validation_attempted(self):
if self.built:
return 'full'
elif any([comp.validation_attempted == 'partial' for comp in self.iter_globals()]):
return 'partial'
else:
return 'none'
def iter_globals(self, schema=None):
"""
Creates an iterator for XSD global definitions/declarations related to schema namespace.
:param schema: Optional argument for filtering only globals related to a schema instance.
"""
if schema is None:
for global_map in self.global_maps:
for obj in global_map.values():
yield obj
else:
for global_map in self.global_maps:
for obj in global_map.values():
if isinstance(obj, tuple):
if obj[1] == schema:
yield obj
elif obj.schema == schema:
yield obj
def iter_components(self, xsd_classes=None):
if xsd_classes is None or isinstance(self, xsd_classes):
yield self
for xsd_global in self.iter_globals(self):
for obj in xsd_global.iter_components(xsd_classes):
yield obj
def get_locations(self, namespace):
"""
Get a list of location hints for a namespace.
"""
try:
return list(self.locations[namespace])
except KeyError:
return []
def get_converter(self, converter=None, namespaces=None, **kwargs):
"""
Returns a new converter instance.
:param converter: can be a converter class or instance. If it's an instance \
the new instance is copied from it and configured with the provided arguments.
:param namespaces: is an optional mapping from namespace prefix to URI.
:param kwargs: optional arguments for initialize the converter instance.
:return: a converter instance.
"""
if converter is None:
converter = getattr(self, 'converter', XMLSchemaConverter)
if isinstance(converter, XMLSchemaConverter):
return converter.copy(namespaces=namespaces, **kwargs)
elif issubclass(converter, XMLSchemaConverter):
return converter(namespaces, **kwargs)
else:
msg = "'converter' argument must be a %r subclass or instance: %r"
raise XMLSchemaTypeError(msg % (XMLSchemaConverter, converter))
def get_element(self, tag, path=None, namespaces=None):
if not path:
return self.find(tag)
elif path[-1] == '*':
return self.find(path[:-1] + tag, namespaces)
else:
return self.find(path, namespaces)
def _include_schemas(self):
"""Processes schema document inclusions and redefinitions."""
for child in iterchildren_xsd_include(self.root):
try:
self.include_schema(child.attrib['schemaLocation'], self.base_url)
except KeyError:
pass
except (OSError, IOError) as err:
# Attribute missing error already found by validation against meta-schema.
# It is not an error if the location fail to resolve:
# https://www.w3.org/TR/2012/REC-xmlschema11-1-20120405/#compound-schema
# https://www.w3.org/TR/2012/REC-xmlschema11-1-20120405/#src-include
self.warnings.append("Include schema failed: %s." % str(err))
warnings.warn(self.warnings[-1], XMLSchemaIncludeWarning, stacklevel=3)
except (XMLSchemaURLError, XMLSchemaParseError, XMLSchemaTypeError, ParseError) as err:
msg = 'cannot include schema %r: %s' % (child.attrib['schemaLocation'], err)
if isinstance(err, (XMLSchemaParseError, ParseError)):
self.parse_error(msg)
elif self.validation == 'strict':
raise type(err)(msg)
else:
self.errors.append(type(err)(msg))
for child in iterchildren_xsd_redefine(self.root):
try:
self.include_schema(child.attrib['schemaLocation'], self.base_url)
except KeyError:
pass # Attribute missing error already found by validation against meta-schema
except (OSError, IOError) as err:
# If the redefine doesn't contain components (annotation excluded) the statement
# is equivalent to an include, so no error is generated. Otherwise fails.
self.warnings.append("Redefine schema failed: %s." % str(err))
warnings.warn(self.warnings[-1], XMLSchemaIncludeWarning, stacklevel=3)
if has_xsd_components(child):
self.parse_error(str(err), child)
except (XMLSchemaURLError, XMLSchemaParseError, XMLSchemaTypeError, ParseError) as err:
msg = 'cannot redefine schema %r: %s' % (child.attrib['schemaLocation'], err)
if isinstance(err, (XMLSchemaParseError, ParseError)):
self.parse_error(msg)
elif self.validation == 'strict':
raise type(err)(msg)
else:
self.errors.append(type(err)(msg))
def include_schema(self, location, base_url=None):
"""
Includes a schema for the same namespace, from a specific URL.
:param location: is the URL of the schema.
:param base_url: is an optional base URL for fetching the schema resource.
:return: the included :class:`XMLSchema` instance.
"""
schema_url = fetch_resource(location, base_url)
for schema in self.maps.namespaces[self.target_namespace]:
if schema_url == schema.url:
break
else:
schema = self.create_schema(
schema_url, self.target_namespace, self.validation, self.maps, self.converter,
self.locations, self.base_url, self.defuse, self.timeout, False
)
if location not in self.includes:
self.includes[location] = schema
elif self.includes[location] != schema:
self.includes[schema_url] = schema
return schema
def _import_namespaces(self):
"""
Processes namespace imports. Imports are done on namespace basis not on resource: this
is the standard and also avoids import loops that sometimes are hard to detect.
"""
namespace_imports = NamespaceResourcesMap(map(
lambda x: (x.get('namespace'), x.get('schemaLocation')),
iterchildren_xsd_import(self.root)
))
for namespace, locations in namespace_imports.items():
# Checks the namespace against the targetNamespace of the schema
if namespace is None:
namespace = ''
if namespace == self.target_namespace:
self.parse_error("if the 'namespace' attribute is not present on the import statement "
"then the importing schema must has a 'targetNamespace'")
continue
elif namespace == self.target_namespace:
self.parse_error("the attribute 'namespace' must be different from schema's 'targetNamespace'")
continue
# Skip import of already imported namespaces
if self.imports.get(namespace) is not None:
continue
elif namespace in self.maps.namespaces:
self.imports[namespace] = self.maps.namespaces[namespace][0]
continue
locations = [url for url in locations if url]
if not namespace:
pass
elif not locations:
locations = self.get_locations(namespace)
elif all(is_remote_url(url) for url in locations):
# If all import schema locations are remote URLs and there are local hints
# that match a local file path, try the local hints before schema locations.
# This is not the standard processing for XSD imports, but resolve the problem
# of local processing of schemas tested to work from a http server, providing
# explicit local hints.
local_hints = [url for url in self.get_locations(namespace) if url and url_path_is_file(url)]
if local_hints:
locations = local_hints + locations
import_error = None
for url in locations:
try:
self.import_schema(namespace, url, self.base_url)
except (OSError, IOError) as err:
# It's not an error if the location access fails (ref. section 4.2.6.2):
# https://www.w3.org/TR/2012/REC-xmlschema11-1-20120405/#composition-schemaImport
if import_error is None:
import_error = err
except (XMLSchemaURLError, XMLSchemaParseError, XMLSchemaTypeError, ParseError) as err:
if namespace:
msg = "cannot import namespace %r: %s." % (namespace, err)
else:
msg = "cannot import chameleon schema: %s." % err
if isinstance(err, (XMLSchemaParseError, ParseError)):
self.parse_error(msg)
elif self.validation == 'strict':
raise type(err)(msg)
else:
self.errors.append(type(err)(msg))
except XMLSchemaValueError as err:
self.parse_error(err)
else:
break
else:
if import_error is not None:
self.warnings.append("Namespace import failed: %s." % str(import_error))
warnings.warn(self.warnings[-1], XMLSchemaImportWarning, stacklevel=3)
self.imports[namespace] = None
def import_schema(self, namespace, location, base_url=None, force=False):
"""
Imports a schema for an external namespace, from a specific URL.
:param namespace: is the URI of the external namespace.
:param location: is the URL of the schema.
:param base_url: is an optional base URL for fetching the schema resource.
:param force: is set to `True` imports the schema also if the namespace is already imported.
:return: the imported :class:`XMLSchema` instance.
"""
if not force:
if self.imports.get(namespace) is not None:
return self.imports[namespace]
elif namespace in self.maps.namespaces:
self.imports[namespace] = self.maps.namespaces[namespace][0]
return self.imports[namespace]
schema_url = fetch_resource(location, base_url)
if self.imports.get(namespace) is not None and self.imports[namespace].url == schema_url:
return self.imports[namespace]
elif namespace in self.maps.namespaces:
for schema in self.maps.namespaces[namespace]:
if schema_url == schema.url:
self.imports[namespace] = schema
return schema
schema = self.create_schema(
schema_url, None, self.validation, self.maps, self.converter,
self.locations, self.base_url, self.defuse, self.timeout, False
)
if schema.target_namespace != namespace:
raise XMLSchemaValueError('imported schema %r has an unmatched namespace %r' % (location, namespace))
self.imports[namespace] = schema
return schema
def resolve_qname(self, qname):
"""
QName resolution for a schema instance.
:param qname: a string in xs:QName format.
:returns: an expanded QName in the format "{*namespace-URI*}*local-name*".
:raises: `XMLSchemaValueError` for an invalid xs:QName or if the namespace prefix is not \
declared in the schema instance or if the namespace is not the *targetNamespace* and \
the namespace is not imported by the schema.
"""
qname = qname.strip()
if not qname or ' ' in qname or '\t' in qname or '\n' in qname:
raise XMLSchemaValueError("{!r} is not a valid value for xs:QName".format(qname))
if qname[0] == '{':
try:
namespace, local_name = qname[1:].split('}')
except ValueError:
raise XMLSchemaValueError("{!r} is not a valid value for xs:QName".format(qname))
elif ':' in qname:
try:
prefix, local_name = qname.split(':')
except ValueError:
raise XMLSchemaValueError("{!r} is not a valid value for xs:QName".format(qname))
else:
try:
namespace = self.namespaces[prefix]
except KeyError:
raise XMLSchemaValueError("prefix %r not found in namespace map" % prefix)
else:
namespace, local_name = self.namespaces.get('', ''), qname
if not namespace:
return local_name
elif self.meta_schema is not None and namespace != self.target_namespace and \
namespace not in {XSD_NAMESPACE, XSI_NAMESPACE} and namespace not in self.imports:
raise XMLSchemaValueError(
"the QName {!r} is mapped to the namespace {!r}, but this namespace has "
"not an xs:import statement in the schema.".format(qname, namespace)
)
return '{%s}%s' % (namespace, local_name)
def validate(self, source, path=None, schema_path=None, use_defaults=True, namespaces=None):
"""
Validates an XML data against the XSD schema/component instance.
:raises: :exc:`XMLSchemaValidationError` if XML *data* instance is not a valid.
"""
for error in self.iter_errors(source, path, schema_path, use_defaults, namespaces):
raise error
def is_valid(self, source, path=None, schema_path=None, use_defaults=True, namespaces=None):
"""
Like :meth:`validate` except that do not raises an exception but returns ``True`` if
the XML document is valid, ``False`` if it's invalid.
"""
error = next(self.iter_errors(source, path, schema_path, use_defaults, namespaces), None)
return error is None
def iter_errors(self, source, path=None, schema_path=None, use_defaults=True, namespaces=None):
"""
Creates an iterator for the errors generated by the validation of an XML data
against the XSD schema/component instance.
:param source: the source of XML data. Can be an :class:`XMLResource` instance, a \
path to a file or an URI of a resource or an opened file-like object or an Element \
instance or an ElementTree instance or a string containing the XML data.
:param path: is an optional XPath expression that matches the elements of the XML \
data that have to be decoded. If not provided the XML root element is selected.
:param schema_path: an alternative XPath expression to select the XSD element to use for \
decoding. Useful if the root of the XML data doesn't match an XSD global element of the schema.
:param use_defaults: Use schema's default values for filling missing data.
:param namespaces: is an optional mapping from namespace prefix to URI.
"""
if not self.built:
raise XMLSchemaNotBuiltError(self, "schema %r is not built." % self)
elif not isinstance(source, XMLResource):
source = XMLResource(source=source, defuse=self.defuse, timeout=self.timeout, lazy=False)
if not schema_path and path:
schema_path = path if path.startswith('/') else '/%s/%s' % (source.root.tag, path)
namespaces = {} if namespaces is None else namespaces.copy()
namespaces.update(source.get_namespaces())
id_map = Counter()
if source.is_lazy() and path is None:
# TODO: Document validation in lazy mode.
# Validation is done pushing a _no_deep argument for root node and with
# a path='*' for validating children. This is a feature under test.
xsd_element = self.get_element(source.root.tag, schema_path)
if xsd_element is None:
yield self.validation_error('lax', "%r is not an element of the schema" % source.root, source.root)
for result in xsd_element.iter_decode(source.root, source=source, namespaces=namespaces,
use_defaults=use_defaults, id_map=id_map, _no_deep=None):
if isinstance(result, XMLSchemaValidationError):
yield result
else:
del result
path = '*'
if not schema_path:
schema_path = '/%s/*' % source.root.tag
for elem in source.iterfind(path, namespaces):
xsd_element = self.get_element(elem.tag, schema_path, namespaces)
if xsd_element is None:
yield self.validation_error('lax', "%r is not an element of the schema" % elem, elem)
for result in xsd_element.iter_decode(elem, source=source, namespaces=namespaces,
use_defaults=use_defaults, id_map=id_map):
if isinstance(result, XMLSchemaValidationError):
yield result
else:
del result
def iter_decode(self, source, path=None, schema_path=None, validation='lax', process_namespaces=True,
namespaces=None, use_defaults=True, decimal_type=None, datetime_types=False,
converter=None, filler=None, fill_missing=False, **kwargs):
"""
Creates an iterator for decoding an XML source to a data structure.
:param source: the source of XML data. Can be an :class:`XMLResource` instance, a \
path to a file or an URI of a resource or an opened file-like object or an Element \
instance or an ElementTree instance or a string containing the XML data.
:param path: is an optional XPath expression that matches the elements of the XML \
data that have to be decoded. If not provided the XML root element is selected.
:param schema_path: an alternative XPath expression to select the XSD element to use for \
decoding. Useful if the root of the XML data doesn't match an XSD global element of the schema.
:param validation: defines the XSD validation mode to use for decode, can be 'strict', \
'lax' or 'skip'.
:param process_namespaces: indicates whether to use namespace information in the decoding \
process, using the map provided with the argument *namespaces* and the map extracted from \
the XML document.
:param namespaces: is an optional mapping from namespace prefix to URI.
:param use_defaults: indicates whether to use default values for filling missing data.
:param decimal_type: conversion type for `Decimal` objects (generated by XSD `decimal` \
built-in and derived types), useful if you want to generate a JSON-compatible data structure.
:param datetime_types: if set to `True` the datetime and duration XSD types are decoded, \
otherwise their origin XML string is returned.
:param converter: an :class:`XMLSchemaConverter` subclass or instance to use for the decoding.
:param filler: an optional callback function to fill undecodable data with a typed value. \
The callback function must accepts one positional argument, that can be an XSD Element or \
an attribute declaration. If not provided undecodable data is replaced by `None`.
:param fill_missing: if set to `True` the decoder fills also missing attributes. \
The filling value is `None` or a typed value if the *filler* callback is provided.
:param kwargs: keyword arguments with other options for converter and decoder.
:return: yields a decoded data object, eventually preceded by a sequence of validation \
or decoding errors.
"""
if not self.built:
raise XMLSchemaNotBuiltError(self, "schema %r is not built." % self)
elif validation not in XSD_VALIDATION_MODES:
raise XMLSchemaValueError("validation argument can be 'strict', 'lax' or 'skip': %r" % validation)
elif not isinstance(source, XMLResource):
source = XMLResource(source=source, defuse=self.defuse, timeout=self.timeout, lazy=False)
if not schema_path and path:
schema_path = path if path.startswith('/') else '/%s/%s' % (source.root.tag, path)
if process_namespaces:
namespaces = {} if namespaces is None else namespaces.copy()
namespaces.update(source.get_namespaces())
else:
namespaces = {}
converter = self.get_converter(converter, namespaces, **kwargs)
id_map = Counter()
if decimal_type is not None:
kwargs['decimal_type'] = decimal_type
for elem in source.iterfind(path, namespaces):
xsd_element = self.get_element(elem.tag, schema_path, namespaces)
if xsd_element is None:
yield self.validation_error(validation, "%r is not an element of the schema" % elem, elem)
for obj in xsd_element.iter_decode(
elem, validation, converter=converter, source=source, namespaces=namespaces,
use_defaults=use_defaults, datetime_types=datetime_types,
filler=filler, fill_missing=fill_missing, id_map=id_map, **kwargs):
yield obj
def decode(self, source, path=None, schema_path=None, validation='strict', *args, **kwargs):
"""
Decodes XML data. Takes the same arguments of the method :func:`XMLSchema.iter_decode`.
"""
data, errors = [], []
for result in self.iter_decode(source, path, schema_path, validation, *args, **kwargs):
if not isinstance(result, XMLSchemaValidationError):
data.append(result)
elif validation == 'lax':
errors.append(result)
else:
raise result
if not data:
return (None, errors) if validation == 'lax' else None
elif len(data) == 1:
return (data[0], errors) if validation == 'lax' else data[0]
else:
return (data, errors) if validation == 'lax' else data
to_dict = decode
def iter_encode(self, obj, path=None, validation='lax', namespaces=None, converter=None, **kwargs):
"""
Creates an iterator for encoding a data structure to an ElementTree's Element.
:param obj: the data that has to be encoded to XML data.
:param path: is an optional XPath expression for selecting the element of the schema \
that matches the data that has to be encoded. For default the first global element of \
the schema is used.
:param validation: the XSD validation mode. Can be 'strict', 'lax' or 'skip'.
:param namespaces: is an optional mapping from namespace prefix to URI.
:param converter: an :class:`XMLSchemaConverter` subclass or instance to use for the encoding.
:param kwargs: Keyword arguments containing options for converter and encoding.
:return: yields an Element instance/s or validation/encoding errors.
"""
if not self.built:
raise XMLSchemaNotBuiltError(self, "schema %r is not built." % self)
elif validation not in XSD_VALIDATION_MODES:
raise XMLSchemaValueError("validation argument can be 'strict', 'lax' or 'skip': %r" % validation)
elif not self.elements:
yield XMLSchemaValueError("encoding needs at least one XSD element declaration!")
namespaces = {} if namespaces is None else namespaces.copy()
converter = self.get_converter(converter, namespaces, **kwargs)
if path is not None:
xsd_element = self.find(path, namespaces=namespaces)
elif isinstance(obj, dict) and len(obj) == 1:
xsd_element = self.elements.get(list(obj.keys())[0])
elif len(self.elements) == 1:
xsd_element = list(self.elements.values())[0]
else:
root_elements = self.root_elements
xsd_element = root_elements[0] if len(root_elements) == 1 else None
if not isinstance(xsd_element, XsdElement):
if path is not None:
msg = "the path %r doesn't match any element of the schema!" % path
else:
msg = "unable to select an element for decoding data, provide a valid 'path' argument."
yield XMLSchemaEncodeError(self, obj, self.elements, reason=msg)
else:
for result in xsd_element.iter_encode(obj, validation, converter=converter, **kwargs):
yield result
def encode(self, obj, path=None, validation='strict', *args, **kwargs):
"""
Encodes to XML data. Takes the same arguments of the method :func:`XMLSchema.iter_encode`.
:return: An ElementTree's Element or a list containing a sequence of ElementTree's \
elements if the argument *path* matches multiple XML data chunks. If *validation* \
argument is 'lax' a 2-items tuple is returned, where the first item is the encoded \
object and the second item is a list containing the errors.
"""
data, errors = [], []
for result in self.iter_encode(obj, path, validation, *args, **kwargs):
if not isinstance(result, XMLSchemaValidationError):
data.append(result)
elif validation == 'lax':
errors.append(result)
else:
raise result
if not data:
return (None, errors) if validation == 'lax' else None
elif len(data) == 1:
return (data[0], errors) if validation == 'lax' else data[0]
else:
return (data, errors) if validation == 'lax' else data
to_etree = encode
class XMLSchema10(XMLSchemaBase):
"""
XSD 1.0 schema class.
<schema
attributeFormDefault = (qualified | unqualified) : unqualified
blockDefault = (#all | List of (extension | restriction | substitution)) : ''
elementFormDefault = (qualified | unqualified) : unqualified
finalDefault = (#all | List of (extension | restriction | list | union)) : ''
id = ID
targetNamespace = anyURI
version = token
xml:lang = language
{any attributes with non-schema namespace . . .}>
Content: ((include | import | redefine | annotation)*, (((simpleType | complexType | group |
attributeGroup) | element | attribute | notation), annotation*)*)
</schema>
"""
XSD_VERSION = '1.0'
BUILDERS = {
'notation_class': XsdNotation,
'complex_type_class': XsdComplexType,
'attribute_class': XsdAttribute,
'any_attribute_class': XsdAnyAttribute,
'attribute_group_class': XsdAttributeGroup,
'group_class': XsdGroup,
'element_class': XsdElement,
'any_element_class': XsdAnyElement,
'restriction_class': XsdAtomicRestriction,
'union_class': XsdUnion,
'simple_type_factory': xsd_simple_type_factory
}
meta_schema = os.path.join(SCHEMAS_DIR, 'XSD_1.0/XMLSchema.xsd')
BASE_SCHEMAS = {
XML_NAMESPACE: XML_SCHEMA_FILE,
# HFP_NAMESPACE: HFP_SCHEMA_FILE,
XSI_NAMESPACE: XSI_SCHEMA_FILE,
XLINK_NAMESPACE: XLINK_SCHEMA_FILE,
}
# ++++ UNDER DEVELOPMENT, DO NOT USE!!! ++++
class XMLSchema11(XMLSchemaBase):
"""
XSD 1.1 schema class.
<schema
attributeFormDefault = (qualified | unqualified) : unqualified
blockDefault = (#all | List of (extension | restriction | substitution)) : ''
defaultAttributes = QName
xpathDefaultNamespace = (anyURI | (##defaultNamespace | ##targetNamespace | ##local)) : ##local
elementFormDefault = (qualified | unqualified) : unqualified
finalDefault = (#all | List of (extension | restriction | list | union)) : ''
id = ID
targetNamespace = anyURI
version = token
xml:lang = language
{any attributes with non-schema namespace . . .}>
Content: ((include | import | redefine | override | annotation)*, (defaultOpenContent, annotation*)?,
((simpleType | complexType | group | attributeGroup | element | attribute | notation), annotation*)*)
</schema>
<schema
attributeFormDefault = (qualified | unqualified) : unqualified
blockDefault = (#all | List of (extension | restriction | substitution)) : ''
elementFormDefault = (qualified | unqualified) : unqualified
finalDefault = (#all | List of (extension | restriction | list | union)) : ''
id = ID
targetNamespace = anyURI
version = token
xml:lang = language
{any attributes with non-schema namespace . . .}>
Content: ((include | import | redefine | annotation)*, (((simpleType | complexType | group |
attributeGroup) | element | attribute | notation), annotation*)*)
</schema>
"""
XSD_VERSION = '1.1'
BUILDERS = {
'notation_class': XsdNotation,
'complex_type_class': Xsd11ComplexType,
'attribute_class': Xsd11Attribute,
'any_attribute_class': Xsd11AnyAttribute,
'attribute_group_class': XsdAttributeGroup,
'group_class': Xsd11Group,
'element_class': Xsd11Element,
'any_element_class': Xsd11AnyElement,
'restriction_class': Xsd11AtomicRestriction,
'union_class': Xsd11Union,
'simple_type_factory': xsd_simple_type_factory
}
meta_schema = os.path.join(SCHEMAS_DIR, 'XSD_1.1/XMLSchema.xsd')
BASE_SCHEMAS = {
XSD_NAMESPACE: os.path.join(SCHEMAS_DIR, 'XSD_1.1/list_builtins.xsd'),
XML_NAMESPACE: XML_SCHEMA_FILE,
# HFP_NAMESPACE: HFP_SCHEMA_FILE,
XSI_NAMESPACE: XSI_SCHEMA_FILE,
XLINK_NAMESPACE: XLINK_SCHEMA_FILE,
}
def _include_schemas(self):
super(XMLSchema11, self)._include_schemas()
for child in iterchildren_xsd_override(self.root):
try:
self.include_schema(child.attrib['schemaLocation'], self.base_url)
except KeyError:
pass # Attribute missing error already found by validation against meta-schema
except (OSError, IOError) as err:
# If the override doesn't contain components (annotation excluded) the statement
# is equivalent to an include, so no error is generated. Otherwise fails.
self.warnings.append("Override schema failed: %s." % str(err))
warnings.warn(self.warnings[-1], XMLSchemaIncludeWarning, stacklevel=3)
if has_xsd_components(child):
self.parse_error(str(err), child)
XMLSchema = XMLSchema10
"""The default class for schema instances."""
| {
"content_hash": "89dabdb1a4151baeb52fc48a7faec9f1",
"timestamp": "",
"source": "github",
"line_count": 1289,
"max_line_length": 116,
"avg_line_length": 47.24204809930178,
"alnum_prop": 0.6275884719599311,
"repo_name": "brunato/xmlschema",
"id": "e6f8768ddce6939d3aefe68265c52d90f3f5d523",
"size": "61251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xmlschema/validators/schema.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "814028"
}
],
"symlink_target": ""
} |
"""The :mod:`~deap.creator` is a meta-factory allowing to create classes that
will fulfill the needs of your evolutionary algorithms. In effect, new
classes can be built from any imaginable type, from :class:`list` to
:class:`set`, :class:`dict`, :class:`~deap.gp.PrimitiveTree` and more,
providing the possibility to implement genetic algorithms, genetic
programming, evolution strategies, particle swarm optimizers, and many more.
"""
import array
import copy
import warnings
import copyreg as copy_reg
class_replacers = {}
"""Some classes in Python's standard library as well as third party library
may be in part incompatible with the logic used in DEAP. To palliate
this problem, the method :func:`create` uses the dictionary
`class_replacers` to identify if the base type provided is problematic, and if
so the new class inherits from the replacement class instead of the
original base class.
`class_replacers` keys are classes to be replaced and the values are the
replacing classes.
"""
try:
import numpy
(numpy.ndarray, numpy.array)
except ImportError:
# Numpy is not present, skip the definition of the replacement class.
pass
except AttributeError:
# Numpy is present, but there is either no ndarray or array in numpy,
# also skip the definition of the replacement class.
pass
else:
class _numpy_array(numpy.ndarray):
def __deepcopy__(self, memo):
"""Overrides the deepcopy from numpy.ndarray that does not copy
the object's attributes. This one will deepcopy the array and its
:attr:`__dict__` attribute.
"""
copy_ = numpy.ndarray.copy(self)
copy_.__dict__.update(copy.deepcopy(self.__dict__, memo))
return copy_
@staticmethod
def __new__(cls, iterable):
"""Creates a new instance of a numpy.ndarray from a function call.
Adds the possibility to instanciate from an iterable."""
return numpy.array(list(iterable)).view(cls)
def __setstate__(self, state):
self.__dict__.update(state)
def __reduce__(self):
return (self.__class__, (list(self),), self.__dict__)
class_replacers[numpy.ndarray] = _numpy_array
class _array(array.array):
@staticmethod
def __new__(cls, seq=()):
return super(_array, cls).__new__(cls, cls.typecode, seq)
def __deepcopy__(self, memo):
"""Overrides the deepcopy from array.array that does not copy
the object's attributes and class type.
"""
cls = self.__class__
copy_ = cls.__new__(cls, self)
memo[id(self)] = copy_
copy_.__dict__.update(copy.deepcopy(self.__dict__, memo))
return copy_
def __reduce__(self):
return (self.__class__, (list(self),), self.__dict__)
class_replacers[array.array] = _array
class CreatorMeta(type):
def __new__(meta, name, base, dct):
return super(CreatorMeta, meta).__new__(meta, name, (base,), dct)
def __init__(cls, name, base, dct):
# A DeprecationWarning is raised when the object inherits from the
# class "object" which leave the option of passing arguments, but
# raise a warning stating that it will eventually stop permitting
# this option. Usually this happens when the base class does not
# override the __init__ method from object.
dict_inst = {}
dict_cls = {}
for obj_name, obj in dct.items():
if isinstance(obj, type):
dict_inst[obj_name] = obj
else:
dict_cls[obj_name] = obj
def initType(self, *args, **kargs):
"""Replace the __init__ function of the new type, in order to
add attributes that were defined with **kargs to the instance.
"""
for obj_name, obj in dict_inst.items():
setattr(self, obj_name, obj())
if base.__init__ is not object.__init__:
base.__init__(self, *args, **kargs)
cls.__init__ = initType
cls.reduce_args = (name, base, dct)
super(CreatorMeta, cls).__init__(name, (base,), dict_cls)
def __reduce__(cls):
return (meta_creator, cls.reduce_args)
copy_reg.pickle(CreatorMeta, CreatorMeta.__reduce__)
def meta_creator(name, base, dct):
class_ = CreatorMeta(name, base, dct)
globals()[name] = class_
return class_
def create(name, base, **kargs):
"""Creates a new class named *name* inheriting from *base* in the
:mod:`~deap.creator` module. The new class can have attributes defined by
the subsequent keyword arguments passed to the function create. If the
argument is a class (without the parenthesis), the __init__ function is
called in the initialization of an instance of the new object and the
returned instance is added as an attribute of the class' instance.
Otherwise, if the argument is not a class, (for example an :class:`int`),
it is added as a "static" attribute of the class.
:param name: The name of the class to create.
:param base: A base class from which to inherit.
:param attribute: One or more attributes to add on instanciation of this
class, optional.
The following is used to create a class :class:`Foo` inheriting from the
standard :class:`list` and having an attribute :attr:`bar` being an empty
dictionary and a static attribute :attr:`spam` initialized to 1. ::
create("Foo", list, bar=dict, spam=1)
This above line is exactly the same as defining in the :mod:`creator`
module something like the following. ::
class Foo(list):
spam = 1
def __init__(self):
self.bar = dict()
The :ref:`creating-types` tutorial gives more examples of the creator
usage.
"""
if name in globals():
warnings.warn("A class named '{0}' has already been created and it "
"will be overwritten. Consider deleting previous "
"creation of that class or rename it.".format(name),
RuntimeWarning)
# Check if the base class has to be replaced
if base in class_replacers:
base = class_replacers[base]
meta_creator(name, base, kargs)
| {
"content_hash": "1e07004c75977fcf0a0bf6eed87ba6a1",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 78,
"avg_line_length": 38.78048780487805,
"alnum_prop": 0.6283018867924528,
"repo_name": "Gab0/gekkoJaponicus",
"id": "1932ef24eadc6257d7b1dac21d0e3fdf2ae47e5c",
"size": "7054",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "promoterz/representation/deapCreator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1589"
},
{
"name": "Python",
"bytes": "103754"
}
],
"symlink_target": ""
} |
import os
from os.path import join
import numpy as np
import pandas as pd
from hagelslag.evaluation import DistributedROC, DistributedReliability
eval_path = "/glade/p/work/dgagne/ncar_coarse_neighbor_eval_2016_s_2/"
eval_files = sorted(os.listdir(eval_path))
eval_test = pd.read_csv(join(eval_path, eval_files[0]))
models = eval_test.columns[eval_test.columns.str.contains("mean")]
run_dates = pd.DatetimeIndex([e.split("_")[-1][:8] for e in eval_files])
thresholds = [25, 50, 75]
prob_thresholds = np.concatenate(([0, 0.01], np.arange(0.1, 1.1, 0.1), [1.05]))
brier = {}
roc = {}
for thresh in thresholds:
brier[thresh] = pd.DataFrame(index=run_dates, columns=models, dtype=object)
roc[thresh] = pd.DataFrame(index=run_dates, columns=models, dtype=object)
for ev, eval_file in enumerate(eval_files):
print(eval_file)
eval_data = pd.read_csv(join(eval_path, eval_file))
us_mask = eval_data["us_mask"] == 1
for thresh in thresholds:
obs = eval_data.loc[us_mask, "MESH_Max_60min_00.50_{0:2d}".format(thresh)]
for model in models:
brier[thresh].loc[run_dates[ev], model] = DistributedReliability(thresholds=prob_thresholds)
brier[thresh].loc[run_dates[ev], model].update(eval_data.loc[us_mask, model],
obs)
roc[thresh].loc[run_dates[ev], model] = DistributedROC(thresholds=prob_thresholds)
roc[thresh].loc[run_dates[ev], model].update(eval_data.loc[us_mask, model],
obs)
out_path = "/glade/p/work/dgagne/ncar_coarse_neighbor_scores_2016/"
for thresh in [25, 50, 75]:
brier[thresh].to_csv(join(out_path, "ncar_2016_s_2_brier_objs_{0:02d}.csv".format(thresh)), index_label="Date")
roc[thresh].to_csv(join(out_path, "ncar_2016_s_2_roc_objs_{0:02d}.csv".format(thresh)), index_label="Date")
| {
"content_hash": "1fce9b1178f5f6929278498c9904a259",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 115,
"avg_line_length": 51.351351351351354,
"alnum_prop": 0.6447368421052632,
"repo_name": "djgagne/hagelslag",
"id": "8343a7051a84e911c248685849fcac7c617fdef9",
"size": "1900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demos/eval_sspf_days.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "6617598"
},
{
"name": "Python",
"bytes": "933497"
},
{
"name": "Shell",
"bytes": "5545"
}
],
"symlink_target": ""
} |
from zeus.models import TestCaseMeta
def test_repository_test_details(
client,
db_session,
default_login,
default_testcase,
default_build,
default_repo,
default_repo_access,
):
db_session.add(
TestCaseMeta(
repository_id=default_testcase.repository_id,
name=default_testcase.name,
hash=default_testcase.hash,
first_build_id=default_build.id,
)
)
resp = client.get(
"/api/repos/{}/tests/{}".format(
default_repo.get_full_name(), default_testcase.hash
)
)
assert resp.status_code == 200
data = resp.json()
assert data["hash"] == str(default_testcase.hash)
assert data["name"] == default_testcase.name
assert data["first_build"]["id"] == str(default_build.id)
assert data["last_build"]["id"] == str(default_build.id)
| {
"content_hash": "c6fa444131239f95123b738bcb5f54f9",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 63,
"avg_line_length": 27.40625,
"alnum_prop": 0.6054732041049031,
"repo_name": "getsentry/zeus",
"id": "f9989ae4c399b4809b74bd31b76e7daf93082228",
"size": "877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/zeus/api/resources/test_repository_test_details.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3596"
},
{
"name": "HTML",
"bytes": "13037"
},
{
"name": "JavaScript",
"bytes": "327335"
},
{
"name": "Makefile",
"bytes": "1130"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "822392"
},
{
"name": "Shell",
"bytes": "2564"
}
],
"symlink_target": ""
} |
from copy import deepcopy
# Copied from django/utils/html.py
# TODO(robryk): Remove once we start using a version of Django that includes these
from django.utils.html import conditional_escape, mark_safe
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = dict([(k, conditional_escape(v)) for (k, v) in
kwargs.iteritems()])
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper format_html, for the common case of a group of arguments that need
to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{0} {1}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *tuple(args))
for args in args_generator))
class FilterFunction(object):
def __init__(self, prefix, name='', choices=[], check=(lambda table, i, value : True),default=None,showall=True):
self.check = check
self.choices = choices
if showall:
self.choices = [['disable_filter','-------']]+self.choices
self.name = name
self.prefix = prefix
self.default = default
class TableField(object):
def __init__(self,id,name,sortable=True,value=(lambda table,i: unicode(i)+'th value'),render=None,filter=None,choices=[],css=None):
if render == None:
render = value
self.id = unicode(id)
self.name = name
self.sortable = sortable
self.render = render
self.value = value
self.filter = filter
self.choices = choices
self.css = css
def class_string(self):
if isinstance(self.css,str) or isinstance(self.css,unicode):
self.css=[self.css]
if self.css:
return format_html(u' class="{0}"', ' '.join(self.css))
else:
return ''
class ResultTable(object):
def length(self):
return len(self.results)
@staticmethod
def default_limit():
return 0
def __init__(self, req = {}, prefix='', autosort=True, default_sort=None, default_desc=False):
self.results = []
self.params = {}
self.filters = {}
self.filter_functions = []
self.other = {}
self.total = 0
self.prefix = prefix
self.autosort = autosort
self.fields = []
for k in req.keys():
if k.find(prefix)==0:
self.params[k[len(prefix)+1:]] = req[k]
else:
self.other[k] = req[k]
for k in self.params.keys():
if k.find('filter')==0:
if self.params[k]!='disable_filter':
self.filters[k[7:]] = self.params[k]
del self.params[k]
if 'page' not in self.params.keys():
self.params['page'] = 1
else:
self.params['page'] = int(self.params['page'])
if self.params['page'] <= 0:
self.params['page'] = 1
if 'limit' not in self.params.keys():
self.params['limit'] = self.default_limit()
else:
self.params['limit'] = int(self.params['limit'])
if self.params['limit'] < 0:
self.params['limit'] = self.default_limit()
if 'sort' not in self.params.keys() and default_sort:
self.params['sort'] = unicode(default_sort)
if default_desc:
self.params['order'] = 'desc'
def add_autofilter(self,field):
def autocheck(table,i,v):
return v!='disable_filter' and field.value(table,i)==v
choices = []
for i in range(0,self.length()):
choices.append(field.value(self,i))
choices.sort()
choices = [ [choices[i],choices[i]] for i in range(0,len(choices)) if i==0 or choices[i]!=choices[i-1]]
self.filter_functions.append(FilterFunction(name=field.name,prefix=unicode(field.id),choices=choices,check=autocheck))
def getparams(self,filters={},**kwargs):
p = deepcopy(self.params)
for key in kwargs:
p[key] = kwargs[key]
for key in filters:
p['filter_'+key] = filters[key]
for key in self.filters:
if not key in filters.keys():
p['filter_'+key] = self.filters[key]
return '?'+'&'.join([self.prefix+'_'+unicode(k)+'='+unicode(v) for (k,v) in p.iteritems()])
def render_header(self):
def sort_link(f):
if self.params.get('sort',None)!=unicode(f.id) or self.params.get('order',None)=='desc':
return self.getparams(sort=f.id,order='asc')
return self.getparams(sort=f.id,order='desc')
def header(f):
if self.autosort and f.sortable:
return format_html(u'<a class="stdlink" href="{0}">{1}</a>', sort_link(f), f.name)
else:
return f.name
s = format_html_join(u'', u'<th>{0}</th>', [(header(f),) for f in self.fields])
return format_html(u'<tr>{0}</tr>', s)
def render_row(self,i):
s = format_html_join(u'', u'<td{0}>{1}</td>', [(f.class_string(), f.render(self,i)) for f in self.fields])
return format_html(u'<tr>{0}</tr>', s)
def render_table(self):
f_key = None
order = []
for i in range(0,self.length()):
ok = True
for ff in self.filter_functions:
v = self.filters.get(ff.prefix,ff.default)
if v and not ff.check(self,i,v):
ok = False
for f in self.fields:
if f.filter=='auto' and unicode(f.id) in self.filters.keys() and self.filters[f.id]!='disable_filter' and f.value(self,i)!=self.filters[f.id]:
ok = False
if ok:
order.append(i)
if 'sort' in self.params.keys():
for f in self.fields:
if unicode(f.id) == self.params['sort']:
f_key = f
if self.autosort and f_key:
order.sort(key=lambda i: f_key.value(self,i),reverse=(self.params.get('order',None)=='desc'))
limit = self.params['limit']
page = self.params['page']
if self.autosort and limit>0:
order = order[(page-1)*limit:page*limit]
s = format_html_join(u'', u'{0}', [(self.render_row(i),) for i in order])
return format_html(u'{0}{1}', self.render_header(), s)
def render_scrollbar(self):
limit = self.params['limit']
if limit==0:
return ''
page = self.params['page']
tpages = (self.total+limit-1)/limit + 1
def render_wheelitem(i):
if i == page:
return format_html(u'<span class="wheelsel">{0}</span>', i)
else:
return format_html(u'<a class="wheelitem" href="{0}">{1}</a>',self.getparams(page=i), i)
s = format_html_join(u'', u'{0}', [(render_wheelitem(i),) for i in range(1, tpages)])
return format_html(u'<div class="wheel">{0}</div>', s)
def render_filters(self):
s = '<form action="" method="GET">'
s += ''.join(['<input type="hidden" name="'+self.prefix+'_'+unicode(k)+'" value="'+unicode(v)+'"/>' for (k,v) in self.params.iteritems() if k!='page'])
s += ''.join(['<input type="hidden" name="'+unicode(k)+'" value="'+unicode(v)+'"/>' for (k,v) in self.other.iteritems()])
for ff in self.filter_functions:
current = self.filters.get(ff.prefix,None)
s += ff.name+': <select name="'+self.prefix+'_filter_'+ff.prefix+'">'
for v in ff.choices:
s += '<option value="'+unicode(v[0])+'"'
if current and unicode(v[0])==unicode(current):
s+='selected'
s += ' >'+ unicode(v[1])+'</option>'
s += '</select>'
for f in self.fields:
if f.filter=='custom':
current=self.filters.get(unicode(f.id),None)
s += f.name+': <select name="'+self.prefix+'_filter_'+unicode(f.id)+'">'
s += '<option value="disable_filter">Show all</option>'
for v in f.choices:
s += '<option value="'+unicode(v[1])+'"'
if unicode(v[1])==current:
s+='selected'
s += ' >'+ unicode(v[0])+'</option>'
s += '</select>'
s += '<input type="submit" class="button" value="Filter"/></form>'
# FIXME(robryk): Make this saner, although this doesn't seem to be a vulnerability now.
return mark_safe(s)
| {
"content_hash": "c4a81caa52568c34b4187cc74b840eb1",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 159,
"avg_line_length": 41.769911504424776,
"alnum_prop": 0.5315677966101695,
"repo_name": "zielmicha/satori",
"id": "5869a698783f51bcc979e5c50263043fb8e9fc77",
"size": "9440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "satori.web/satori/web/utils/tables.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "165337"
},
{
"name": "CSS",
"bytes": "72202"
},
{
"name": "HTML",
"bytes": "56647"
},
{
"name": "Java",
"bytes": "270392"
},
{
"name": "JavaScript",
"bytes": "300430"
},
{
"name": "Makefile",
"bytes": "1223"
},
{
"name": "Perl",
"bytes": "1572"
},
{
"name": "Python",
"bytes": "1011796"
},
{
"name": "Shell",
"bytes": "231478"
},
{
"name": "TeX",
"bytes": "17071"
}
],
"symlink_target": ""
} |
"""The Euler sampling method for ito processes."""
from typing import Callable, List, Optional
import tensorflow.compat.v2 as tf
from tf_quant_finance import types
from tf_quant_finance import utils as tff_utils
from tf_quant_finance.math import custom_loops
from tf_quant_finance.math import random
from tf_quant_finance.models import utils
def sample(
dim: int,
drift_fn: Callable[..., types.RealTensor],
volatility_fn: Callable[..., types.RealTensor],
times: types.RealTensor,
time_step: Optional[types.RealTensor] = None,
num_time_steps: Optional[types.IntTensor] = None,
num_samples: types.IntTensor = 1,
initial_state: Optional[types.RealTensor] = None,
random_type: Optional[random.RandomType] = None,
seed: Optional[types.IntTensor] = None,
swap_memory: bool = True,
skip: types.IntTensor = 0,
precompute_normal_draws: bool = True,
times_grid: Optional[types.RealTensor] = None,
normal_draws: Optional[types.RealTensor] = None,
watch_params: Optional[List[types.RealTensor]] = None,
validate_args: bool = False,
tolerance: Optional[types.RealTensor] = None,
dtype: Optional[tf.DType] = None,
name: Optional[str] = None) -> types.RealTensor:
"""Returns a sample paths from the process using Euler method.
For an Ito process,
```
dX = a(t, X_t) dt + b(t, X_t) dW_t
X(t=0) = x0
```
with given drift `a` and volatility `b` functions Euler method generates a
sequence {X_n} as
```
X_{n+1} = X_n + a(t_n, X_n) dt + b(t_n, X_n) (N(0, t_{n+1}) - N(0, t_n)),
X_0 = x0
```
where `dt = t_{n+1} - t_n` and `N` is a sample from the Normal distribution.
See [1] for details.
#### Example
Sampling from 2-dimensional Ito process of the form:
```none
dX_1 = mu_1 * sqrt(t) dt + s11 * dW_1 + s12 * dW_2
dX_2 = mu_2 * sqrt(t) dt + s21 * dW_1 + s22 * dW_2
```
```python
import tensorflow as tf
import tf_quant_finance as tff
import numpy as np
mu = np.array([0.2, 0.7])
s = np.array([[0.3, 0.1], [0.1, 0.3]])
num_samples = 10000
dim = 2
dtype = tf.float64
# Define drift and volatility functions
def drift_fn(t, x):
return mu * tf.sqrt(t) * tf.ones([num_samples, dim], dtype=dtype)
def vol_fn(t, x):
return s * tf.ones([num_samples, dim, dim], dtype=dtype)
# Set starting location
x0 = np.array([0.1, -1.1])
# Sample `num_samples` paths at specified `times` using Euler scheme.
times = [0.1, 1.0, 2.0]
paths = tff.models.euler_sampling.sample(
dim=dim,
drift_fn=drift_fn,
volatility_fn=vol_fn,
times=times,
num_samples=num_samples,
initial_state=x0,
time_step=0.01,
seed=42,
dtype=dtype)
# Expected: paths.shape = [10000, 3, 2]
```
#### References
[1]: Wikipedia. Euler-Maruyama method:
https://en.wikipedia.org/wiki/Euler-Maruyama_method
Args:
dim: Python int greater than or equal to 1. The dimension of the Ito
Process.
drift_fn: A Python callable to compute the drift of the process. The
callable should accept two real `Tensor` arguments of the same dtype.
The first argument is the scalar time t, the second argument is the
value of Ito process X - tensor of shape
`batch_shape + [num_samples, dim]`. `batch_shape` is the shape of the
independent stochastic processes being modelled and is inferred from the
initial state `x0`.
The result is value of drift a(t, X). The return value of the callable
is a real `Tensor` of the same dtype as the input arguments and of shape
`batch_shape + [num_samples, dim]`.
volatility_fn: A Python callable to compute the volatility of the process.
The callable should accept two real `Tensor` arguments of the same dtype
and shape `times_shape`. The first argument is the scalar time t, the
second argument is the value of Ito process X - tensor of shape
`batch_shape + [num_samples, dim]`. The result is value of drift b(t, X).
The return value of the callable is a real `Tensor` of the same dtype as
the input arguments and of shape `batch_shape + [num_samples, dim, dim]`.
times: Rank 1 `Tensor` of increasing positive real values. The times at
which the path points are to be evaluated.
time_step: An optional scalar real `Tensor` - maximal distance between
points in grid in Euler schema.
Either this or `num_time_steps` should be supplied.
Default value: `None`.
num_time_steps: An optional Scalar integer `Tensor` - a total number of time
steps performed by the algorithm. The maximal distance betwen points in
grid is bounded by `times[-1] / (num_time_steps - times.shape[0])`.
Either this or `time_step` should be supplied.
Default value: `None`.
num_samples: Positive scalar `int`. The number of paths to draw.
Default value: 1.
initial_state: `Tensor` of shape broadcastable with
`batch_shape + [num_samples, dim]`. The initial state of the process.
`batch_shape` represents the shape of the independent batches of the
stochastic process. Note that `batch_shape` is inferred from
the `initial_state` and hence when sampling is requested for a batch of
stochastic processes, the shape of `initial_state` should be at least
`batch_shape + [1, 1]`.
Default value: None which maps to a zero initial state.
random_type: Enum value of `RandomType`. The type of (quasi)-random
number generator to use to generate the paths.
Default value: None which maps to the standard pseudo-random numbers.
seed: Seed for the random number generator. The seed is
only relevant if `random_type` is one of
`[STATELESS, PSEUDO, HALTON_RANDOMIZED, PSEUDO_ANTITHETIC,
STATELESS_ANTITHETIC]`. For `PSEUDO`, `PSEUDO_ANTITHETIC` and
`HALTON_RANDOMIZED` the seed should be a Python integer. For
`STATELESS` and `STATELESS_ANTITHETIC `must be supplied as an integer
`Tensor` of shape `[2]`.
Default value: `None` which means no seed is set.
swap_memory: A Python bool. Whether GPU-CPU memory swap is enabled for this
op. See an equivalent flag in `tf.while_loop` documentation for more
details. Useful when computing a gradient of the op since `tf.while_loop`
is used to propagate stochastic process in time.
Default value: True.
skip: `int32` 0-d `Tensor`. The number of initial points of the Sobol or
Halton sequence to skip. Used only when `random_type` is 'SOBOL',
'HALTON', or 'HALTON_RANDOMIZED', otherwise ignored.
Default value: `0`.
precompute_normal_draws: Python bool. Indicates whether the noise increments
`N(0, t_{n+1}) - N(0, t_n)` are precomputed. For `HALTON` and `SOBOL`
random types the increments are always precomputed. While the resulting
graph consumes more memory, the performance gains might be significant.
Default value: `True`.
times_grid: An optional rank 1 `Tensor` representing time discretization
grid. If `times` are not on the grid, then the nearest points from the
grid are used. When supplied, `num_time_steps` and `time_step` are
ignored.
Default value: `None`, which means that times grid is computed using
`time_step` and `num_time_steps`.
normal_draws: A `Tensor` of shape broadcastable with
`batch_shape + [num_samples, num_time_points, dim]` and the same
`dtype` as `times`. Represents random normal draws to compute increments
`N(0, t_{n+1}) - N(0, t_n)`. When supplied, `num_samples` argument is
ignored and the first dimensions of `normal_draws` is used instead.
Default value: `None` which means that the draws are generated by the
algorithm. By default normal_draws for each model in the batch are
independent.
watch_params: An optional list of zero-dimensional `Tensor`s of the same
`dtype` as `initial_state`. If provided, specifies `Tensor`s with respect
to which the differentiation of the sampling function will happen.
A more efficient algorithm is used when `watch_params` are specified.
Note the the function becomes differentiable onlhy wrt to these `Tensor`s
and the `initial_state`. The gradient wrt any other `Tensor` is set to be
zero.
validate_args: Python `bool`. When `True` performs multiple checks:
* That `times` are increasing with the minimum increments of the
specified tolerance.
* If `normal_draws` are supplied, checks that `normal_draws.shape[1]` is
equal to `num_time_steps` that is either supplied as an argument or
computed from `time_step`.
When `False` invalid dimension may silently render incorrect outputs.
Default value: `False`.
tolerance: A non-negative scalar `Tensor` specifying the minimum tolerance
for discernible times on the time grid. Times that are closer than the
tolerance are perceived to be the same.
Default value: `None` which maps to `1-e6` if the for single precision
`dtype` and `1e-10` for double precision `dtype`.
dtype: `tf.Dtype`. If supplied the dtype for the input and output `Tensor`s.
Default value: None which means that the dtype implied by `times` is
used.
name: Python string. The name to give this op.
Default value: `None` which maps to `euler_sample`.
Returns:
A real `Tensor` of shape batch_shape_process + [num_samples, k, n] where `k`
is the size of the `times`, `n` is the dimension of the process.
Raises:
ValueError:
(a) When `times_grid` is not supplied, and neither `num_time_steps` nor
`time_step` are supplied or if both are supplied.
(b) If `normal_draws` is supplied and `dim` is mismatched.
tf.errors.InvalidArgumentError: If `normal_draws` is supplied and
`num_time_steps` is mismatched.
"""
name = name or 'euler_sample'
with tf.name_scope(name):
times = tf.convert_to_tensor(times, dtype=dtype)
if dtype is None:
dtype = times.dtype
asserts = []
if tolerance is None:
tolerance = 1e-10 if dtype == tf.float64 else 1e-6
tolerance = tf.convert_to_tensor(tolerance, dtype=dtype)
if validate_args:
asserts.append(
tf.assert_greater(
times[1:], times[:-1] + tolerance,
message='`times` increments should be greater '
'than tolerance {0}'.format(tolerance)))
if initial_state is None:
initial_state = tf.zeros(dim, dtype=dtype)
initial_state = tf.convert_to_tensor(initial_state, dtype=dtype,
name='initial_state')
batch_shape = tff_utils.get_shape(initial_state)[:-2]
num_requested_times = tff_utils.get_shape(times)[0]
# Create a time grid for the Euler scheme.
if num_time_steps is not None and time_step is not None:
raise ValueError(
'When `times_grid` is not supplied only one of either '
'`num_time_steps` or `time_step` should be defined but not both.')
if times_grid is None:
if time_step is None:
if num_time_steps is None:
raise ValueError(
'When `times_grid` is not supplied, either `num_time_steps` '
'or `time_step` should be defined.')
num_time_steps = tf.convert_to_tensor(
num_time_steps, dtype=tf.int32, name='num_time_steps')
time_step = times[-1] / tf.cast(num_time_steps, dtype=dtype)
else:
time_step = tf.convert_to_tensor(time_step, dtype=dtype,
name='time_step')
else:
times_grid = tf.convert_to_tensor(times_grid, dtype=dtype,
name='times_grid')
if validate_args:
asserts.append(
tf.assert_greater(
times_grid[1:], times_grid[:-1] + tolerance,
message='`times_grid` increments should be greater '
'than tolerance {0}'.format(tolerance)))
times, keep_mask, time_indices = utils.prepare_grid(
times=times,
time_step=time_step,
num_time_steps=num_time_steps,
times_grid=times_grid,
tolerance=tolerance,
dtype=dtype)
if normal_draws is not None:
normal_draws = tf.convert_to_tensor(normal_draws, dtype=dtype,
name='normal_draws')
# Shape [num_time_points] + batch_shape + [num_samples, dim]
normal_draws_rank = normal_draws.shape.rank
perm = tf.concat(
[[normal_draws_rank-2], tf.range(normal_draws_rank-2),
[normal_draws_rank-1]], axis=0)
normal_draws = tf.transpose(normal_draws, perm=perm)
num_samples = tf.shape(normal_draws)[-2]
draws_dim = normal_draws.shape[-1]
if dim != draws_dim:
raise ValueError(
'`dim` should be equal to `normal_draws.shape[2]` but are '
'{0} and {1} respectively'.format(dim, draws_dim))
if validate_args:
draws_times = tff_utils.get_shape(normal_draws)[0]
asserts.append(tf.assert_equal(
draws_times, tf.shape(keep_mask)[0] - 1,
message='`num_time_steps` should be equal to '
'`tf.shape(normal_draws)[1]`'))
if validate_args:
with tf.control_dependencies(asserts):
times = tf.identity(times)
if watch_params is not None:
watch_params = [tf.convert_to_tensor(param, dtype=dtype)
for param in watch_params]
return _sample(
dim=dim,
batch_shape=batch_shape,
drift_fn=drift_fn,
volatility_fn=volatility_fn,
times=times,
keep_mask=keep_mask,
num_requested_times=num_requested_times,
num_samples=num_samples,
initial_state=initial_state,
random_type=random_type,
seed=seed,
swap_memory=swap_memory,
skip=skip,
precompute_normal_draws=precompute_normal_draws,
normal_draws=normal_draws,
watch_params=watch_params,
time_indices=time_indices,
dtype=dtype)
def _sample(*,
dim,
batch_shape,
drift_fn,
volatility_fn,
times,
keep_mask,
num_requested_times,
num_samples,
initial_state,
random_type,
seed, swap_memory,
skip,
precompute_normal_draws,
watch_params,
time_indices,
normal_draws,
dtype):
"""Returns a sample of paths from the process using Euler method."""
dt = times[1:] - times[:-1]
sqrt_dt = tf.sqrt(dt)
# current_state.shape = batch_shape + [num_samples, dim]
current_state = initial_state + tf.zeros([num_samples, dim], dtype=dtype)
steps_num = tff_utils.get_shape(dt)[-1]
wiener_mean = None
if normal_draws is None:
# In order to use low-discrepancy random_type we need to generate the
# sequence of independent random normals upfront. We also precompute random
# numbers for stateless random type in order to ensure independent samples
# for multiple function calls with different seeds.
if precompute_normal_draws or random_type in (
random.RandomType.SOBOL,
random.RandomType.HALTON,
random.RandomType.HALTON_RANDOMIZED,
random.RandomType.STATELESS,
random.RandomType.STATELESS_ANTITHETIC):
normal_draws = utils.generate_mc_normal_draws(
num_normal_draws=dim, num_time_steps=steps_num,
num_sample_paths=num_samples, batch_shape=batch_shape,
random_type=random_type, dtype=dtype, seed=seed, skip=skip)
wiener_mean = None
else:
# If pseudo or anthithetic sampling is used, proceed with random sampling
# at each step.
wiener_mean = tf.zeros((dim,), dtype=dtype, name='wiener_mean')
normal_draws = None
if watch_params is None:
# Use while_loop if `watch_params` is not passed
return _while_loop(
steps_num=steps_num,
current_state=current_state,
drift_fn=drift_fn, volatility_fn=volatility_fn, wiener_mean=wiener_mean,
num_samples=num_samples, times=times,
dt=dt, sqrt_dt=sqrt_dt, keep_mask=keep_mask,
num_requested_times=num_requested_times,
swap_memory=swap_memory,
random_type=random_type, seed=seed, normal_draws=normal_draws,
dtype=dtype)
else:
# Use custom for_loop if `watch_params` is specified
return _for_loop(
batch_shape=batch_shape, steps_num=steps_num,
current_state=current_state,
drift_fn=drift_fn, volatility_fn=volatility_fn, wiener_mean=wiener_mean,
num_samples=num_samples, times=times,
dt=dt, sqrt_dt=sqrt_dt, time_indices=time_indices,
keep_mask=keep_mask, watch_params=watch_params,
random_type=random_type, seed=seed, normal_draws=normal_draws)
def _while_loop(*, steps_num, current_state,
drift_fn, volatility_fn, wiener_mean,
num_samples, times, dt, sqrt_dt, num_requested_times,
keep_mask, swap_memory, random_type, seed, normal_draws, dtype):
"""Sample paths using tf.while_loop."""
written_count = 0
if isinstance(num_requested_times, int) and num_requested_times == 1:
record_samples = False
result = current_state
else:
# If more than one sample has to be recorded, create a TensorArray
record_samples = True
element_shape = current_state.shape
result = tf.TensorArray(dtype=dtype,
size=num_requested_times,
element_shape=element_shape,
clear_after_read=False)
# Include initial state, if necessary
result = result.write(written_count, current_state)
written_count += tf.cast(keep_mask[0], dtype=tf.int32)
# Define sampling while_loop body function
def cond_fn(i, written_count, *args):
# It can happen that `times_grid[-1] > times[-1]` in which case we have
# to terminate when `written_count` reaches `num_requested_times`
del args
return tf.math.logical_and(i < steps_num,
written_count < num_requested_times)
def step_fn(i, written_count, current_state, result):
return _euler_step(
i=i,
written_count=written_count,
current_state=current_state,
result=result,
drift_fn=drift_fn,
volatility_fn=volatility_fn,
wiener_mean=wiener_mean,
num_samples=num_samples,
times=times,
dt=dt,
sqrt_dt=sqrt_dt,
keep_mask=keep_mask,
random_type=random_type,
seed=seed,
normal_draws=normal_draws,
record_samples=record_samples)
# Sample paths
_, _, _, result = tf.while_loop(
cond_fn, step_fn, (0, written_count, current_state, result),
maximum_iterations=steps_num,
swap_memory=swap_memory)
if not record_samples:
# shape batch_shape + [num_samples, 1, dim]
return tf.expand_dims(result, axis=-2)
# Shape [num_time_points] + batch_shape + [num_samples, dim]
result = result.stack()
# transpose to shape batch_shape + [num_samples, num_time_points, dim]
n = result.shape.rank
perm = list(range(1, n-1)) + [0, n - 1]
return tf.transpose(result, perm)
def _for_loop(*, batch_shape, steps_num, current_state,
drift_fn, volatility_fn, wiener_mean, watch_params,
num_samples, times, dt, sqrt_dt, time_indices,
keep_mask, random_type, seed, normal_draws):
"""Sample paths using custom for_loop."""
del batch_shape
num_time_points = time_indices.shape.as_list()[:-1]
if isinstance(num_time_points, int) and num_time_points == 1:
iter_nums = steps_num
else:
iter_nums = time_indices
def step_fn(i, current_state):
# Unpack current_state
current_state = current_state[0]
_, _, next_state, _ = _euler_step(
i=i,
written_count=0,
current_state=current_state,
result=current_state,
drift_fn=drift_fn,
volatility_fn=volatility_fn,
wiener_mean=wiener_mean,
num_samples=num_samples,
times=times,
dt=dt,
sqrt_dt=sqrt_dt,
keep_mask=keep_mask,
random_type=random_type,
seed=seed,
normal_draws=normal_draws,
record_samples=False)
return [next_state]
result = custom_loops.for_loop(
body_fn=step_fn,
initial_state=[current_state],
params=watch_params,
num_iterations=iter_nums)[0]
if num_time_points == 1:
return tf.expand_dims(result, axis=-2)
# result.shape=[num_time_points] + batch_shape + [num_samples, dim]
# transpose to shape=batch_shape + [num_time_points, num_samples, dim]
n = result.shape.rank
perm = list(range(1, n-1)) + [0, n - 1]
return tf.transpose(result, perm)
def _euler_step(*, i, written_count, current_state,
drift_fn, volatility_fn, wiener_mean,
num_samples, times, dt, sqrt_dt, keep_mask,
random_type, seed, normal_draws, result,
record_samples):
"""Performs one step of Euler scheme."""
current_time = times[i + 1]
written_count = tf.cast(written_count, tf.int32)
if normal_draws is not None:
dw = normal_draws[i]
else:
dw = random.mv_normal_sample(
(num_samples,), mean=wiener_mean, random_type=random_type,
seed=seed)
dw = dw * sqrt_dt[i]
dt_inc = dt[i] * drift_fn(current_time, current_state) # pylint: disable=not-callable
dw_inc = tf.linalg.matvec(volatility_fn(current_time, current_state), dw) # pylint: disable=not-callable
next_state = current_state + dt_inc + dw_inc
if record_samples:
result = result.write(written_count, next_state)
else:
result = next_state
written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
return i + 1, written_count, next_state, result
__all__ = ['sample']
| {
"content_hash": "d93cad7210954e28b4d9385c86dc3419",
"timestamp": "",
"source": "github",
"line_count": 527,
"max_line_length": 107,
"avg_line_length": 41.9146110056926,
"alnum_prop": 0.6386889401964779,
"repo_name": "google/tf-quant-finance",
"id": "a9326753dd606bd776dbbb47fb96ef0c367f8f90",
"size": "22664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tf_quant_finance/models/euler_sampling.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "5759"
},
{
"name": "Jupyter Notebook",
"bytes": "1634001"
},
{
"name": "Python",
"bytes": "3661863"
},
{
"name": "Shell",
"bytes": "2338"
},
{
"name": "Starlark",
"bytes": "109192"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Series(models.Model):
name = models.CharField(max_length=128)
number = models.PositiveSmallIntegerField()
year = models.PositiveSmallIntegerField()
parent = models.ForeignKey('Series', default=None, blank=True, null=True)
imdb_id = models.CharField(max_length=9, unique=True, default=None, blank=True, null=True)
trakt_id = models.IntegerField(unique=True, default=None, blank=True, null=True)
class Meta(object):
ordering = ('year',)
verbose_name_plural = 'series'
def __str__(self):
if self.parent:
return '{series} ({season})'.format(series=self.parent.name, season=self.name)
return '{name}'.format(name=self.name)
@property
def seasons(self):
return Series.objects.filter(parent=self)
@python_2_unicode_compatible
class Episode(models.Model):
series = models.ForeignKey(Series, related_name='episodes')
name = models.CharField(max_length=128)
number = models.PositiveSmallIntegerField()
imdb_id = models.CharField(max_length=9, unique=True, default=None, blank=True, null=True)
trakt_id = models.IntegerField(unique=True, default=None, blank=True, null=True)
def __str__(self):
return self.name
| {
"content_hash": "777bc6298dae492ad164df44f2428bfa",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 94,
"avg_line_length": 34.87179487179487,
"alnum_prop": 0.6963235294117647,
"repo_name": "reiniervdwindt/power-rangers-api",
"id": "63c9e29bb0e0f2470998476d3e6d0ff6cba456cb",
"size": "1360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/series/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "617"
},
{
"name": "Python",
"bytes": "70343"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import appointment.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('appointment', '0002_vaccineappointment_20181031_1852'),
]
operations = [
migrations.AddField(
model_name='appointment',
name='pt_showed',
field=models.NullBooleanField(help_text=b'Did the patient come to this appointment?', verbose_name=b'Patient Showed'),
),
migrations.AddField(
model_name='historicalappointment',
name='pt_showed',
field=models.NullBooleanField(help_text=b'Did the patient come to this appointment?', verbose_name=b'Patient Showed'),
),
migrations.AlterField(
model_name='appointment',
name='clintime',
field=models.TimeField(default=appointment.models.generate_default_appointment_time, verbose_name=b'Time of Appointment'),
),
migrations.AlterField(
model_name='historicalappointment',
name='clintime',
field=models.TimeField(default=appointment.models.generate_default_appointment_time, verbose_name=b'Time of Appointment'),
),
]
| {
"content_hash": "2d1b244521934d9ae9f71e2dde214370",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 134,
"avg_line_length": 36.911764705882355,
"alnum_prop": 0.6438247011952192,
"repo_name": "SaturdayNeighborhoodHealthClinic/clintools",
"id": "db64fc02cda5eb0ea62f2071eb84b594b33cbba0",
"size": "1327",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "appointment/migrations/0003_pt_showed_and_default_time_function_20181103_1414.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "39945"
},
{
"name": "Python",
"bytes": "212180"
},
{
"name": "Shell",
"bytes": "436"
}
],
"symlink_target": ""
} |
'''
Copyright 2017 Dell Inc. or its subsidiaries. All Rights Reserved.
Author(s):
George Paulos
RackHD Functional Integration Test (FIT) library
This is the main common function library for RackHD FIT tests.
'''
# Standard imports
import fit_path # NOQA: unused import
import os
import sys
import json
import subprocess
import time
import unittest # NOQA: imported but unused
import re
import requests
import pexpect
import inspect
import argparse
from mkcfg import mkcfg
VERBOSITY = 1
TEST_PATH = None
CONFIG_PATH = None
API_PORT = "None"
API_PROTOCOL = "None"
AUTH_TOKEN = "None"
REDFISH_TOKEN = "None"
BMC_LIST = []
def fitcfg():
"""
returns the configuration dictionary
:return: dictionary
"""
return mkcfg().get()
def fitrackhd():
"""
returns the ['rackhd-config'] dictionary
:return: dictionary or None
"""
return fitcfg().get('rackhd-config', None)
def fitargs():
"""
returns the ['cmd-args-list'] dictionary
:return: dictionary or None
"""
return fitcfg().get('cmd-args-list', None)
def fitcreds():
"""
returns the ['credentials'] dictionary
:return: dictionary or None
"""
return fitcfg().get('credentials', None)
def fitinstall():
"""
returns the ['install-config']['install'] dictionary
:return: dictionary or None
"""
if 'install-config' not in fitcfg():
return None
return fitcfg()['install-config'].get('install', None)
def fitports():
"""
returns the ['install-config']['ports'] dictionary
:return: dictionary or None
"""
if 'install-config' not in fitcfg():
return None
return fitcfg()['install-config'].get('ports', None)
def fitcit():
"""
returns the ['cit-config'] dictionary
:return: dictionary or None
"""
return fitcfg().get('cit-config', None)
def fitglobals():
"""
returns the ['install-config']['global'] dictionary
:return: dictionary or None
"""
return fitcfg().get('globals', None)
def fitproxy():
"""
returns the ['install-config']['proxy'] dictionary
:return: dictionary or None
"""
if 'install-config' not in fitcfg():
return None
return fitcfg()['install-config'].get('proxy', None)
def fitskupack():
if 'install-config' not in fitcfg():
return None
return fitcfg()['install-config'].get('skupack', None)
def compose_config(use_sysargs=False):
"""
creates a configuration based on
:param use_sysargs: set to true if sys.argv is to be processed.
:return: None
"""
# create configuration object
cfg_obj = mkcfg()
if cfg_obj.config_is_loaded():
# a previously generated configuration has been loaded
# restore previously setup globals
update_globals()
else:
# create new configuration
# * add cmd-args-list section
# * add the default config json file composition.
# * add stack overlay
# * save off environment
# * generate a few globals
# * save (generate) the configuration to a file
args_list = {}
if use_sysargs:
# Args from command line, pass -config option to create
args_list['cmd-args-list'] = mkargs()
config = args_list['cmd-args-list']['config']
cfg_obj.create(config)
else:
# Args from default set
no_args = {}
args_list['cmd-args-list'] = mkargs(no_args)
cfg_obj.create()
# add the 'cmd-args-list' section
cfg_obj.add_from_dict(args_list)
if fitargs()['config'] != 'config':
print "*** Using config file path:", fitcfg()['cmd-args-list']['config']
if cfg_obj.get_path() is None:
default_composition = ['rackhd_default.json',
'credentials_default.json',
'install_default.json',
'cit_default.json']
# config file composition
cfg_obj.add_from_file_list(default_composition)
# stack overlay configuration
apply_stack_config()
# apply any additional configurations specified on command line
if fitcfg()['cmd-args-list']['extra']:
cfg_obj.add_from_file_list(fitcfg()['cmd-args-list']['extra'].split(','))
# add significant environment variables
cfg_obj.add_from_dict({
'env': {
'HOME': os.environ['HOME'],
'PATH': os.environ['PATH']
}
})
add_globals()
# generate the configuration file
cfg_obj.generate()
print "*** Using config file: {0}".format(cfg_obj.get_path())
def apply_stack_config():
"""
does the necessary stack configuration changes
:return: None
"""
stack = fitargs()['stack']
if stack is not None:
mkcfg().add_from_file('stack_config.json', stack)
if fitargs()['rackhd_host'] == 'localhost' and 'rackhd_host' in fitcfg():
fitargs()['rackhd_host'] = fitcfg()['rackhd_host']
if 'bmc' in fitcfg():
fitargs()['bmc'] = fitcfg()['bmc']
if 'hyper' in fitcfg():
fitargs()['hyper'] = fitcfg()['hyper']
def add_globals():
"""
create a handlful of global shortcuts
:return:
"""
global TEST_PATH
global CONFIG_PATH
global API_PORT
global API_PROTOCOL
global VERBOSITY
# set api port and protocol from command line
if fitargs()['http'] is True:
API_PROTOCOL = "http"
API_PORT = str(fitports()['http'])
elif fitargs()['https'] is True:
API_PROTOCOL = "https"
API_PORT = str(fitports()['https'])
else: # default protocol is http
API_PROTOCOL = "http"
API_PORT = str(fitports()['http'])
if fitargs()['port'] != "None": # port override via command line argument -port
API_PORT = fitargs()['port']
# add globals section to base configuration
TEST_PATH = fit_path.fit_path_root + '/'
CONFIG_PATH = TEST_PATH + fitargs()['config'] + "/"
mkcfg().add_from_dict({
'globals': {
'API_PORT': API_PORT,
'API_PROTOCOL': API_PROTOCOL,
'TEST_PATH': TEST_PATH,
'CONFIG_PATH': CONFIG_PATH,
'VERBOSITY': fitargs()['v']
}
})
# set OVA template from command line argument -template
if fitargs()["template"] == "None":
fitargs()["template"] = fitcfg()['install-config']['template']
def update_globals():
global API_PORT
global API_PROTOCOL
global TEST_PATH
global CONFIG_PATH
global VERBOSITY
API_PORT = fitglobals()['API_PORT']
API_PROTOCOL = fitglobals()['API_PROTOCOL']
TEST_PATH = fitglobals()['TEST_PATH']
CONFIG_PATH = fitglobals()['CONFIG_PATH']
VERBOSITY = fitglobals()['VERBOSITY']
def _fix_check_unicode(value):
"""
function to help with unicode characters in command line arguments.
* will subsitute a single '-' for a couple of different single-dash-like unicode chars
* will subsitute a double '--' for an em_dash unicode character
If there still unicode in the string once the substituion is complete that would
prevent converting to pure-ascii, None is returned. Otherwise the fixed string is.
"""
# First turn from byte-string to utf-8
value = value.decode("utf-8")
# These are the various hyphen/dashes that
# look like single '-'s...
h_minus = u'\u002d'
hyphen = u'\u2010'
en_dash = u'\u2013'
single_dash_list = [h_minus, hyphen, en_dash]
# walk through and substitute single-dash-like unicode to plain minus
for convert_dash in single_dash_list:
value = value.replace(convert_dash, '-')
# now do the em_dash, which is the '--'
em_dash = u'\u2014'
value = value.replace(em_dash, '--')
# Now convert to ascii and complain if we can't
try:
final_value = value.decode('ascii')
except UnicodeEncodeError:
final_value = None
return final_value
def mkargs(in_args=None):
"""
processes the command line options as passed in by in_args.
:param in_args: input arguments
:return: dictionary of processed arguments
"""
if in_args is None:
in_args = sys.argv[1:]
# command line argument parser returns cmd_args dict
arg_parser = argparse.ArgumentParser(
description="Command Help", add_help=False)
arg_parser.add_argument('-h', '--help', action='store_true', default=False,
help='show this help message and exit')
arg_parser.add_argument("-test", default="tests/",
help="test to execute, default: tests/")
arg_parser.add_argument("-config", default="config",
help="config file location, default: config")
arg_parser.add_argument("-extra", default=None,
help="comma separated list of extra config files (found in 'config' directory)")
arg_parser.add_argument("-group", default="all",
help="test group to execute: 'smoke', 'regression', 'extended', default: 'all'")
arg_parser.add_argument("-stack", default="vagrant",
help="stack label (test bed)")
arg_parser.add_argument("-rackhd_host", default="localhost",
help="RackHD appliance IP address or hostname, default: localhost")
arg_parser.add_argument("-template", default="None",
help="path or URL link to OVA template or RackHD OVA")
arg_parser.add_argument("-xunit", default="False", action="store_true",
help="generates xUnit XML report files")
arg_parser.add_argument("-numvms", default=1, type=int,
help="number of virtual machines for deployment on specified stack")
arg_parser.add_argument("-list", default="False", action="store_true",
help="generates test list only")
arg_parser.add_argument("-sku", default="all",
help="node SKU name, example: Quanta-T41, default=all")
group = arg_parser.add_mutually_exclusive_group(required=False)
group.add_argument("-obmmac", default="all",
help="node OBM MAC address, example:00:1e:67:b1:d5:64")
group.add_argument("-nodeid", default="None",
help="node identifier string of a discovered node, example: 56ddcf9a8eff16614e79ec74")
group2 = arg_parser.add_mutually_exclusive_group(required=False)
group2.add_argument("-http", default="False", action="store_true",
help="forces the tests to utilize the http API protocol")
group2.add_argument("-https", default="False", action="store_true",
help="forces the tests to utilize the https API protocol")
arg_parser.add_argument("-port", default="None",
help="API port number override, default from install_config.json")
arg_parser.add_argument("-v", default=4, type=int,
help="Verbosity level of console and log output (see -nose-help for more options), Built Ins: " +
"0: Minimal logging, " +
"1: Display ERROR and CRITICAL to console and to files, " +
"3: Display INFO to console and to files, " +
"4: (default) Display INFO to console, and DEBUG to files, " +
"5: Display infra.run and test.run DEBUG to both, " +
"6: Add display of test.data (rest calls and status) DEBUG to both, " +
"7: Add display of infra.data (ipmi, ssh) DEBUG to both, " +
"9: Display infra.* and test.* at DEBUG_9 (max output) ")
arg_parser.add_argument("-nose-help", default=False, action="store_true", dest="nose_help",
help="display help from underlying nosetests command, including additional log options")
fixed_args = []
for arg in in_args:
new_value = _fix_check_unicode(arg)
if new_value is None:
arg_parser.error(
"Argument '{0}' of {1} had unknown unicode characters in it, likely from a cut-and-paste.".format(
arg, in_args))
fixed_args.append(new_value)
in_args = fixed_args
# we want to grab the arguments we want, and pass the rest
# into the nosetest invocation.
parse_results, other_args = arg_parser.parse_known_args(in_args)
# if 'help' was set, handle it as best we can. We use argparse to
# display usage and arguments, and then give nose a shot at printing
# things out (if they set that option)
if parse_results.help:
arg_parser.print_help()
if parse_results.nose_help:
print
print "NOTE: below is the --help output from nosetests."
print
rcode = _run_nose_help()
else:
rcode = 0
sys.exit(rcode)
# And if they only did --nose-help
if parse_results.nose_help:
rcode = _run_nose_help()
sys.exit(rcode)
# Now handle mapping -v to infra-logging. Check stream-monitor/flogging/README.md
# for how loggers and handlers fit together.
if parse_results.v >= 9:
# Turn them all up to 11.
vargs = ['--sm-set-combo-level', 'console*', 'DEBUG_9']
elif parse_results.v >= 7:
# ends up turning everything up to DEBUG_5 (levels 5 + 6 + infra.data)
vargs = ['--sm-set-combo-level', 'console*', 'DEBUG_5']
elif parse_results.v >= 6:
# infra.run and test.* to DEBUG (level 5 + test.data)
vargs = ['--sm-set-combo-level', 'console*:(test.data|*.run)', 'DEBUG_5']
elif parse_results.v >= 5:
# infra and test.run to DEBUG
vargs = ['--sm-set-combo-level', 'console*:*.run', 'DEBUG_5']
elif parse_results.v >= 4:
# default
vargs = []
elif parse_results.v >= 3:
# dial BACK output to files to INFO_5
vargs = ['--sm-set-logger-level', '*', 'INFO_5']
elif parse_results.v >= 1:
# dial BACK output to everything to just ERROR, CRITICAL to console and logs
vargs = ['--sm-set-combo-level', '*', 'ERROR_5']
else:
# 0 and 1 currently try to squish ALL logging output.
vargs = ['--sm-set-combo-level', '*', 'CRITICAL_0']
other_args.extend(vargs)
# Put all the args we did not use and put them
# into the parse_results so they can be found
# by run_nose()
parse_results.unhandled_arguments = other_args
# parse arguments to cmd_args dict
cmd_args = vars(parse_results)
return cmd_args
def timestamp(): # return formatted current timestamp
return time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime())
# This routine executes a sleep with countdown
def countdown(sleep_time, sleep_interval=1):
sys.stdout.write("Sleeping for " + str(sleep_time * sleep_interval) + " seconds.")
sys.stdout.flush()
for _ in range(0, sleep_time):
time.sleep(sleep_interval)
sys.stdout.write(".")
sys.stdout.flush()
print "Waking!"
return
def remote_shell(shell_cmd, expect_receive="", expect_send="", timeout=300,
address=None, user=None, password=None, vmnum=1):
'''
Run ssh based shell command on a remote machine at fitargs()['rackhd_host']
:param shell_cmd: string based command
:param expect_receive:
:param expect_send:
:param timeout: in seconds
:param address: IP or hostname of remote host
:param user: username of remote host
:param password: password of remote host
:return: dict = {'stdout': str:ouput, 'exitcode': return code}
'''
if not address:
if (vmnum == 1):
address = fitargs()['rackhd_host']
else:
address = fitargs()['rackhd_host'].replace("ora", "ora-" + str(vmnum - 1))
if not user:
user = fitcreds()['rackhd_host'][0]['username']
if not password:
password = fitcreds()['rackhd_host'][0]['password']
logfile_redirect = None
if VERBOSITY >= 4:
print "VM number: ", vmnum
print "remote_shell: Host =", address
print "remote_shell: Command =", shell_cmd
if VERBOSITY >= 9:
print "remote_shell: STDOUT =\n"
logfile_redirect = sys.stdout
# if localhost just run the command local
if fitargs()['rackhd_host'] == 'localhost':
(command_output, exitstatus) = \
pexpect.run("sudo bash -c \"" + shell_cmd + "\"",
withexitstatus=1,
events={"assword": password + "\n"},
timeout=timeout, logfile=logfile_redirect)
return {'stdout': command_output, 'exitcode': exitstatus}
# this clears the ssh key from ~/.ssh/known_hosts
subprocess.call(["touch ~/.ssh/known_hosts;ssh-keygen -R " +
address + " -f ~/.ssh/known_hosts >/dev/null 2>&1"], shell=True)
shell_cmd.replace("'", "\\\'")
if expect_receive == "" or expect_send == "":
(command_output, exitstatus) = \
pexpect.run("ssh -q -o StrictHostKeyChecking=no -t " + user + "@" +
address + " sudo bash -c \\\"" + shell_cmd + "\\\"",
withexitstatus=1,
events={"assword": password + "\n"},
timeout=timeout, logfile=logfile_redirect)
else:
(command_output, exitstatus) = \
pexpect.run("ssh -q -o StrictHostKeyChecking=no -t " + user + "@" +
address + " sudo bash -c \\\"" + shell_cmd + "\\\"",
withexitstatus=1,
events={"assword": password + "\n",
expect_receive: expect_send + "\n"},
timeout=timeout, logfile=logfile_redirect)
if VERBOSITY >= 4:
print shell_cmd, "\nremote_shell: Exit Code =", exitstatus
return {'stdout': command_output, 'exitcode': exitstatus}
def scp_file_to_ora(src_file_name, vmnum=1):
# legacy call
scp_file_to_host(src_file_name, vmnum)
def scp_file_to_host(src_file_name, vmnum=1):
'''
scp the given file over to the RackHD host and place it in the home directory.
:param src_file_name: name of file to copy over. May include path
:type src_file_name: basestring
:return: just name of file on target (no path)
:rtype: basestring
'''
logfile_redirect = file('/dev/null', 'w')
just_fname = os.path.basename(src_file_name)
# if localhost just copy to home dir
if fitargs()['rackhd_host'] == 'localhost':
remote_shell('cp ' + src_file_name + ' ~/' + src_file_name)
return src_file_name
if (vmnum == 1):
rackhd_hostname = fitargs()['rackhd_host']
else:
rackhd_hostname = fitargs()['rackhd_host'].replace("ora", "ora-" + str(vmnum - 1))
scp_target = fitcreds()['rackhd_host'][0]['username'] + '@{0}:'.format(rackhd_hostname)
cmd = 'scp -o StrictHostKeyChecking=no {0} {1}'.format(src_file_name, scp_target)
if VERBOSITY >= 4:
print "scp_file_to_host: '{0}'".format(cmd)
if VERBOSITY >= 9:
logfile_redirect = sys.stdout
(command_output, ecode) = pexpect.run(
cmd, withexitstatus=1,
events={'(?i)assword: ': fitcreds()['rackhd_host'][0]['password'] + '\n'},
logfile=logfile_redirect)
if VERBOSITY >= 4:
print "scp_file_to_host: Exit Code = {0}".format(ecode)
assert ecode == 0, \
'failed "{0}" because {1}. Output={2}'.format(cmd, ecode, command_output)
return just_fname
def get_auth_token():
# This is run once to get an auth token which is set to global AUTH_TOKEN and used for rest of session
global AUTH_TOKEN
global REDFISH_TOKEN
api_login = {"username": fitcreds()["api"][0]["admin_user"], "password": fitcreds()["api"][0]["admin_pass"]}
redfish_login = {"UserName": fitcreds()["api"][0]["admin_user"], "Password": fitcreds()["api"][0]["admin_pass"]}
try:
restful("https://" + fitargs()['rackhd_host'] + ":" + str(fitports()['https']) +
"/login", rest_action="post", rest_payload=api_login, rest_timeout=2)
except:
AUTH_TOKEN = "Unavailable"
return False
else:
api_data = restful("https://" + fitargs()['rackhd_host'] + ":" + str(fitports()['https']) +
"/login", rest_action="post", rest_payload=api_login, rest_timeout=2)
if api_data['status'] == 200:
AUTH_TOKEN = str(api_data['json']['token'])
redfish_data = restful("https://" + fitargs()['rackhd_host'] + ":" + str(fitports()['https']) +
"/redfish/v1/SessionService/Sessions",
rest_action="post", rest_payload=redfish_login, rest_timeout=2)
if 'x-auth-token' in redfish_data['headers']:
REDFISH_TOKEN = redfish_data['headers']['x-auth-token']
return True
else:
print "WARNING: Redfish API token not available."
else:
AUTH_TOKEN = "Unavailable"
return False
def rackhdapi(url_cmd, action='get', payload=[], timeout=None, headers={}):
'''
This routine will build URL for RackHD API, enable port, execute, and return data
Example: rackhdapi('/api/current/nodes') - simple 'get' command
Example: rackhdapi("/api/current/nodes/ID/dhcp/whitelist", action="post")
:param url_cmd: url command for monorail api
:param action: rest action (get/put/post/delete)
:param payload: rest payload
:param timeout: rest timeout
:param headers: rest_headers
:return: {'json':result_data.json(), 'text':result_data.text,
'status':result_data.status_code,
'headers':result_data.headers.get('content-type'),
'timeout':False}
'''
# Retrieve authentication token for the session
if AUTH_TOKEN == "None":
get_auth_token()
return restful(API_PROTOCOL + "://" + fitargs()['rackhd_host'] + ":" + str(API_PORT) + url_cmd,
rest_action=action, rest_payload=payload, rest_timeout=timeout, rest_headers=headers)
def restful(url_command, rest_action='get', rest_payload=[], rest_timeout=None, sslverify=False, rest_headers={}):
'''
This routine executes a rest API call to the host.
:param url_command: the full URL for the command
:param rest_action: what the restful do (get/post/put/delete)
:param rest_payload: payload for rest request
:param rest_headers: headers (JSON dict)
:param rest_timeout: timeout for rest request
:param sslverify: ssl Verify (True/False)
:return: {'json':result_data.json(), 'text':result_data.text,
'status':result_data.status_code,
'headers':result_data.headers,
'timeout':False}
'''
result_data = None
# print URL and action
if VERBOSITY >= 4:
print "restful: Action = ", rest_action, ", URL = ", url_command
# prepare payload for XML output
payload_print = []
try:
json.dumps(rest_payload)
except:
payload_print = []
else:
payload_print = json.dumps(rest_payload, sort_keys=True, indent=4,)
if len(payload_print) > 4096:
payload_print = payload_print[0:4096] + '\n...truncated...\n'
if VERBOSITY >= 7 and rest_payload != []:
print "restful: Payload =\n", payload_print
rest_headers.update({"Content-Type": "application/json"})
if VERBOSITY >= 5:
print "restful: Request Headers =", rest_headers, "\n"
# If AUTH_TOKEN is set, add to header
if AUTH_TOKEN != "None" and AUTH_TOKEN != "Unavailable" and "authorization" not in rest_headers:
rest_headers.update({"authorization": "JWT " + AUTH_TOKEN, "X-Auth-Token": REDFISH_TOKEN})
# Perform rest request
try:
if rest_action == "get":
result_data = requests.get(url_command,
timeout=rest_timeout,
verify=sslverify,
headers=rest_headers)
if rest_action == "delete":
result_data = requests.delete(url_command,
data=json.dumps(rest_payload),
timeout=rest_timeout,
verify=sslverify,
headers=rest_headers)
if rest_action == "put":
result_data = requests.put(url_command,
data=json.dumps(rest_payload),
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify,
)
if rest_action == "binary-put":
rest_headers.update({"Content-Type": "application/x-www-form-urlencoded"})
result_data = requests.put(url_command,
data=rest_payload,
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify,
)
if rest_action == "text-put":
rest_headers.update({"Content-Type": "text/plain"})
result_data = requests.put(url_command,
data=rest_payload,
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify,
)
if rest_action == "post":
result_data = requests.post(url_command,
data=json.dumps(rest_payload),
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify
)
if rest_action == "binary-post":
rest_headers.update({"Content-Type": "application/x-www-form-urlencoded"})
result_data = requests.post(url_command,
data=rest_payload,
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify
)
if rest_action == "text-post":
rest_headers.update({"Content-Type": "text/plain"})
result_data = requests.post(url_command,
data=rest_payload,
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify
)
if rest_action == "patch":
result_data = requests.patch(url_command,
data=json.dumps(rest_payload),
headers=rest_headers,
timeout=rest_timeout,
verify=sslverify
)
except requests.exceptions.Timeout:
return {'json': {}, 'text': '',
'status': 0,
'headers': '',
'timeout': True}
try:
result_data.json()
except ValueError:
if VERBOSITY >= 9:
print "restful: TEXT =\n"
print result_data.text
if VERBOSITY >= 6:
print "restful: Response Headers =", result_data.headers, "\n"
if VERBOSITY >= 4:
print "restful: Status code =", result_data.status_code, "\n"
return {'json': {}, 'text': result_data.text, 'status': result_data.status_code,
'headers': result_data.headers,
'timeout': False}
else:
if VERBOSITY >= 9:
print "restful: JSON = \n"
print json.dumps(result_data.json(), sort_keys=True, indent=4)
if VERBOSITY >= 6:
print "restful: Response Headers =", result_data.headers, "\n"
if VERBOSITY >= 4:
print "restful: Status code =", result_data.status_code, "\n"
return {'json': result_data.json(), 'text': result_data.text,
'status': result_data.status_code,
'headers': result_data.headers,
'timeout': False}
# Get the list of BMC IP addresses that we can find
def get_bmc_ips():
idlist = [] # list of unique dcmi node IDs
# If we have already done this, use that list
if len(BMC_LIST) == 0:
ipscan = remote_shell('arp')['stdout'].split()
for ipaddr in ipscan:
if ipaddr[0:3] == "172" and remote_shell('ping -c 1 -w 5 ' + ipaddr)['exitcode'] == 0:
# iterate through all known IPMI users
for item in fitcreds()['bmc']:
# check BMC credentials
ipmicheck = remote_shell('ipmitool -I lanplus -H ' + ipaddr + ' -U ' + item['username'] +
' -P ' + item['password'] + ' -R 1 -N 3 chassis power status')
if ipmicheck['exitcode'] == 0:
# retrieve the ID string
return_code = remote_shell('ipmitool -I lanplus -H ' + ipaddr + ' -U ' + item['username'] +
' -P ' + item['password'] + ' -R 1 -N 3 dcmi get_mc_id_string')
bmc_info = {"ip": ipaddr, "user": item['username'], "pw": item['password']}
if return_code['exitcode'] == 0 and return_code['stdout'] not in idlist:
# add to list if unique
idlist.append(return_code['stdout'])
BMC_LIST.append(bmc_info)
break
else:
# simulated nodes don't yet support dcmi, remove this else branch when supported
BMC_LIST.append(bmc_info)
break
if VERBOSITY >= 6:
print "get_bmc_ips: "
print "**** BMC IP node count =", len(BMC_LIST), "****"
return len(BMC_LIST)
# power on/off all compute nodes in the stack via the BMC
def power_control_all_nodes(state):
if state != "on" and state != "off":
print "power_control_all_nodes: invalid state " + state
return
# Get the list of BMCs that we know about
node_count = get_bmc_ips()
# Send power on/off to all of them
for bmc in BMC_LIST:
return_code = remote_shell('ipmitool -I lanplus -H ' + bmc['ip'] +
' -U ' + bmc['user'] + ' -P ' +
bmc['pw'] + ' -R 4 -N 3 chassis power ' + state)
if return_code['exitcode'] != 0:
print "Error powering " + state + " node: " + bmc['ip']
return node_count
def mongo_reset():
# clears the Mongo database on host to default, returns True if successful
exitcode = 0
if int(remote_shell('pm2 stop rackhd-pm2-config.yml')['exitcode']) == 0: # for pm2-based source installations
exitcode = exitcode + int(remote_shell("mongo pxe --eval 'db.dropDatabase\\\(\\\)'")['exitcode'])
exitcode = exitcode + int(remote_shell('rm -f /var/lib/dhcp/dhcpd.leases')['exitcode'])
exitcode = exitcode + int(remote_shell('pm2 start rackhd-pm2-config.yml')['exitcode'])
else: # for package-based installations
exitcode = exitcode + int(remote_shell('sudo service on-http stop')['exitcode'])
exitcode = exitcode + int(remote_shell('sudo service on-dhcp-proxy stop')['exitcode'])
exitcode = exitcode + int(remote_shell('sudo service on-syslog stop')['exitcode'])
exitcode = exitcode + int(remote_shell('sudo service on-taskgraph stop')['exitcode'])
exitcode = exitcode + int(remote_shell('sudo service on-tftp stop')['exitcode'])
exitcode = exitcode + int(remote_shell("mongo pxe --eval 'db.dropDatabase\\\(\\\)'")['exitcode'])
exitcode = exitcode + int(remote_shell('rm -f /var/lib/dhcp/dhcpd.leases')['exitcode'])
exitcode = exitcode + int(remote_shell('sudo service on-http start')['exitcode'])
exitcode = exitcode + int(remote_shell('sudo service on-dhcp-proxy start')['exitcode'])
exitcode = exitcode + int(remote_shell('sudo service on-syslog start')['exitcode'])
exitcode = exitcode + int(remote_shell('sudo service on-taskgraph start')['exitcode'])
exitcode = exitcode + int(remote_shell('sudo service on-tftp start')['exitcode'])
if exitcode == 0:
return True
else:
return False
def node_select():
# returns a list with valid compute node IDs that match fitargs()["sku"] in 'Name' or 'Model' field
# and matches node BMC MAC address in fitargs()["obmmac"] if specified
# Otherwise returns list of all IDs that are not 'Unknown' or 'Unmanaged'
nodelist = []
skuid = "None"
# check if user specified a single nodeid to run against
# user must know the nodeid and any check for a valid nodeid is skipped
if fitargs()["nodeid"] != 'None':
nodelist.append(fitargs()["nodeid"])
return nodelist
else:
# Find SKU ID
skumap = rackhdapi('/api/2.0/skus')
if skumap['status'] != 200:
print '**** Unable to retrieve SKU list via API.\n'
sys.exit(255)
for skuentry in skumap['json']:
if str(fitargs()['sku']) in json.dumps(skuentry):
skuid = skuentry['id']
# Collect node IDs
catalog = rackhdapi('/api/2.0/nodes')
if skumap['status'] != 200:
print '**** Unable to retrieve node list via API.\n'
sys.exit(255)
# Select node by SKU
for nodeentry in catalog['json']:
if fitargs()["sku"] == 'all':
# Select only managed compute nodes
if nodeentry['type'] == 'compute':
nodelist.append(nodeentry['id'])
else:
if 'sku' in nodeentry and skuid in json.dumps(nodeentry['sku']):
nodelist.append(nodeentry['id'])
# Select by node BMC MAC addr
if fitargs()["obmmac"] != 'all':
idlist = nodelist
nodelist = []
for member in idlist:
nodeentry = rackhdapi('/api/2.0/nodes/' + member)
if fitargs()["obmmac"] in json.dumps(nodeentry['json']):
nodelist = [member]
break
if VERBOSITY >= 6:
print "Node List:"
print nodelist, '\n'
if len(nodelist) == 0:
print '**** Empty node list.\n'
return nodelist
def list_skus():
# return list of installed SKU names
skunames = []
api_data = rackhdapi('/api/2.0/skus')['json']
for item in api_data:
skunames.append(item['name'])
return skunames
def get_node_sku(nodeid):
# return name field of node SKU if available
nodetype = ""
sku = ""
# get node info
mondata = rackhdapi("/api/2.0/nodes/" + nodeid)
if mondata['status'] == 200:
# get the sku id contained in the node
sku = mondata['json'].get("sku")
if sku:
skudata = rackhdapi(sku)
if skudata['status'] == 200:
nodetype = skudata['json'].get("name")
else:
if VERBOSITY >= 2:
errmsg = "Error: SKU API failed {}, return code {} ".format(sku, skudata['status'])
print errmsg
return "unknown"
else:
return "unknown"
return nodetype
def check_active_workflows(nodeid):
# Return True if active workflows are found on node
workflows = rackhdapi('/api/2.0/nodes/' + nodeid + '/workflows')['json']
for item in workflows:
if '_status' in item:
if item['_status'] in ['running', 'pending']:
return True
if 'status' in item:
if item['status'] in ['running', 'pending']:
return True
else:
return False
return False
def cancel_active_workflows(nodeid):
# cancel all active workflows on node
exitstatus = True
apistatus = rackhdapi('/api/2.0/nodes/' + nodeid + '/workflows/action',
action='put', payload={"command": "cancel"})['status']
if apistatus != 202:
exitstatus = False
return exitstatus
def apply_obm_settings(retry=30):
# New routine to install OBM credentials via workflows in parallel
count = 0
for creds in fitcreds()['bmc']:
# greate graph for setting OBM credentials
payload = {
"friendlyName": "IPMI" + str(count),
"injectableName": 'Graph.Obm.Ipmi.CreateSettings' + str(count),
"options": {
"obm-ipmi-task": {
"user": creds["username"],
"password": creds["password"]
}
},
"tasks": [
{
"label": "obm-ipmi-task",
"taskName": "Task.Obm.Ipmi.CreateSettings"
}
]
}
api_data = rackhdapi("/api/2.0/workflows/graphs", action="put", payload=payload)
if api_data['status'] != 201:
print "**** OBM workflow failed to load!"
return False
count += 1
# Setup additional OBM settings for nodes that currently use RMM port (still same bmc username/password used)
count = 0
for creds in fitcreds()['bmc']:
# greate graph for setting OBM credentials for RMM
payload = {
"friendlyName": "RMM.IPMI" + str(count),
"injectableName": 'Graph.Obm.Ipmi.CreateSettings.RMM' + str(count),
"options": {
"obm-ipmi-task": {
"ipmichannel": "3",
"user": creds["username"],
"password": creds["password"]
}
},
"tasks": [
{
"label": "obm-ipmi-task",
"taskName": "Task.Obm.Ipmi.CreateSettings"
}
]
}
api_data = rackhdapi("/api/2.0/workflows/graphs", action="put", payload=payload)
if api_data['status'] != 201:
print "**** OBM workflow failed to load!"
return False
count += 1
# run each OBM credential workflow on each node in parallel until success
nodestatus = {} # dictionary with node IDs and status of each node
for dummy in range(0, retry):
nodelist = node_select()
for node in nodelist:
if node not in nodestatus:
nodestatus[node] = {"status": "pending", "instanceId": "", "sku": get_node_sku(node), "retry": 0}
for num in range(0, count):
for node in nodelist:
# try workflow
if nodestatus[node]['status'] == "pending":
skuid = rackhdapi('/api/2.0/nodes/' + node)['json'].get("sku")
if skuid:
if nodestatus[node]['sku'] == "unknown":
nodestatus[node].update({"sku": get_node_sku(node)})
skudata = rackhdapi(skuid)['text']
if "rmm.data.MAC" in skudata:
workflow = {"name": 'Graph.Obm.Ipmi.CreateSettings.RMM' + str(num)}
else:
workflow = {"name": 'Graph.Obm.Ipmi.CreateSettings' + str(num)}
result = rackhdapi("/api/2.0/nodes/" + node + "/workflows", action="post", payload=workflow)
if result['status'] == 201:
nodestatus[node].update({"status": "running", "instanceId": result['json']["instanceId"]})
for node in nodelist:
# check OBM workflow status
if nodestatus[node]['status'] == "running":
nodestatus[node]['retry'] += 1
state_data = rackhdapi("/api/2.0/workflows/" + nodestatus[node]['instanceId'])
if state_data['status'] == 200:
if "_status" in state_data['json']:
state = state_data['json']['_status']
else:
state = state_data['json']['status']
if state == "succeeded":
nodestatus[node]['status'] = "succeeded"
if state in ["failed", "cancelled", "timeout"]:
nodestatus[node]['status'] = "pending"
# if the workflow left an invalid OBM, delete it
result = rackhdapi("/api/2.0/nodes/" + node)
if result['status'] == 200:
if result['json']['obms']:
for ref in result['json']['obms']:
obmref = ref.get('ref')
if obmref:
rackhdapi(obmref, action="delete")
if VERBOSITY >= 4:
print "**** Node(s) OBM status:\n", json.dumps(nodestatus, sort_keys=True, indent=4,)
if "pending" not in str(nodestatus) and "running" not in str(nodestatus):
# All OBM settings successful
return True
time.sleep(30)
# Failures occurred
print "**** Node(s) OBM settings failed."
return False
def run_nose(nosepath=None):
if not nosepath:
nosepath = fitcfg()['cmd-args-list']['test']
# this routine runs nosetests from wrapper using path spec 'nosepath'
def _noserunner(pathspecs, noseopts):
xmlfile = str(time.time()) + ".xml" # XML report file name
env = {
'FIT_CONFIG': mkcfg().get_path(),
'HOME': os.environ['HOME'],
'PATH': os.environ['PATH'],
'PYTHONPATH': ':'.join(sys.path)
}
argv = ['nosetests']
argv.extend(noseopts)
argv.append('--xunit-file')
argv.append(xmlfile)
argv.extend(pathspecs)
argv.extend(fitcfg()['cmd-args-list']['unhandled_arguments'])
return subprocess.call(argv, env=env)
exitcode = 0
# set nose options
noseopts = ['--exe', '--with-nosedep', '--with-stream-monitor']
if fitargs()['group'] != 'all' and fitargs()['group'] != '':
noseopts.append('-a')
noseopts.append(str(fitargs()['group']))
if fitargs()['list'] is True or fitargs()['list'] == "True":
noseopts.append('--collect-only')
fitargs()['v'] = 0
print "\nTest Listing for:", fitargs()['test']
print "----------------------------------------------------------------------"
if fitargs()['xunit'] is True or fitargs()['xunit'] == "True":
noseopts.append('--with-xunit')
else:
noseopts.append('-s')
noseopts.append('-v')
# if nosepath is a directory, recurse through subdirs else run single test file
if os.path.isdir(nosepath):
# Skip the CIT test directories that match these expressions
regex = '(tests/*$)|(tests/api-cit/*)|(tests/api$)|(tests/api/.*)'
pathspecs = []
for root, _, _ in os.walk(nosepath):
if not re.search(regex, root):
pathspecs.append(root)
exitcode += _noserunner(pathspecs, noseopts)
else:
exitcode += _noserunner([nosepath], noseopts)
return exitcode
def _run_nose_help():
# This is used ONLY to fire off 'nosetests --help' for use from mkargs() when
# it is handling --help itself.
argv = ['nosetests', '--help']
return subprocess.call(argv)
def run_from_module(file_name):
# Use this method in 'name == "__main__"' style test invocations
# within individual test files
run_nose(file_name)
# determine who imported us.
importer = inspect.getframeinfo(inspect.getouterframes(inspect.currentframe())[1][0])[0]
if 'run_tests.py' in importer:
# we are being imported through run_tests.py (the fit wrapper)
# process sys.args as received by run_tests.py
compose_config(True)
else:
# we are being imported directly through a unittest module
# args will be nose-base args
compose_config(False)
| {
"content_hash": "f534866fb618ac03afe45d88d57239d9",
"timestamp": "",
"source": "github",
"line_count": 1138,
"max_line_length": 125,
"avg_line_length": 39.78822495606327,
"alnum_prop": 0.5439828618123191,
"repo_name": "DavidjohnBlodgett/RackHD",
"id": "387f14ead5d7ea3f8a4d592bdcc7bc61c957a163",
"size": "45279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/common/fit_common.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "696"
},
{
"name": "Python",
"bytes": "1133067"
},
{
"name": "Ruby",
"bytes": "10704"
},
{
"name": "Shell",
"bytes": "67452"
}
],
"symlink_target": ""
} |
"""
Unit tests for the NetApp Data ONTAP cDOT multi-SVM storage driver library.
"""
import copy
import ddt
import mock
from oslo_log import log
from manila import context
from manila import exception
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_multi_svm
from manila.share.drivers.netapp import utils as na_utils
from manila import test
from manila.tests.share.drivers.netapp.dataontap.client import fakes as c_fake
from manila.tests.share.drivers.netapp.dataontap import fakes as fake
@ddt.ddt
class NetAppFileStorageLibraryTestCase(test.TestCase):
def setUp(self):
super(NetAppFileStorageLibraryTestCase, self).setUp()
self.mock_object(na_utils, 'validate_driver_instantiation')
# Mock loggers as themselves to allow logger arg validation
mock_logger = log.getLogger('mock_logger')
self.mock_object(lib_multi_svm.LOG,
'warning',
mock.Mock(side_effect=mock_logger.warning))
self.mock_object(lib_multi_svm.LOG,
'error',
mock.Mock(side_effect=mock_logger.error))
kwargs = {
'configuration': fake.get_config_cmode(),
'private_storage': mock.Mock(),
'app_version': fake.APP_VERSION
}
self.library = lib_multi_svm.NetAppCmodeMultiSVMFileStorageLibrary(
fake.DRIVER_NAME, **kwargs)
self.library._client = mock.Mock()
self.library._client.get_ontapi_version.return_value = (1, 21)
self.client = self.library._client
def test_check_for_setup_error_cluster_creds_no_vserver(self):
self.library._have_cluster_creds = True
self.mock_object(self.library,
'_find_matching_aggregates',
mock.Mock(return_value=fake.AGGREGATES))
mock_super = self.mock_object(lib_base.NetAppCmodeFileStorageLibrary,
'check_for_setup_error')
self.library.check_for_setup_error()
self.assertTrue(self.library._find_matching_aggregates.called)
mock_super.assert_called_once_with()
def test_check_for_setup_error_cluster_creds_with_vserver(self):
self.library._have_cluster_creds = True
self.library.configuration.netapp_vserver = fake.VSERVER1
self.mock_object(self.library,
'_find_matching_aggregates',
mock.Mock(return_value=fake.AGGREGATES))
mock_super = self.mock_object(lib_base.NetAppCmodeFileStorageLibrary,
'check_for_setup_error')
self.library.check_for_setup_error()
mock_super.assert_called_once_with()
self.assertTrue(self.library._find_matching_aggregates.called)
self.assertTrue(lib_multi_svm.LOG.warning.called)
def test_check_for_setup_error_vserver_creds(self):
self.library._have_cluster_creds = False
self.assertRaises(exception.InvalidInput,
self.library.check_for_setup_error)
def test_check_for_setup_error_no_aggregates(self):
self.library._have_cluster_creds = True
self.mock_object(self.library,
'_find_matching_aggregates',
mock.Mock(return_value=[]))
self.assertRaises(exception.NetAppException,
self.library.check_for_setup_error)
self.assertTrue(self.library._find_matching_aggregates.called)
def test_get_vserver_no_share_server(self):
self.assertRaises(exception.InvalidInput,
self.library._get_vserver)
def test_get_vserver_no_backend_details(self):
fake_share_server = copy.deepcopy(fake.SHARE_SERVER)
fake_share_server.pop('backend_details')
kwargs = {'share_server': fake_share_server}
self.assertRaises(exception.VserverNotSpecified,
self.library._get_vserver,
**kwargs)
def test_get_vserver_none_backend_details(self):
fake_share_server = copy.deepcopy(fake.SHARE_SERVER)
fake_share_server['backend_details'] = None
kwargs = {'share_server': fake_share_server}
self.assertRaises(exception.VserverNotSpecified,
self.library._get_vserver,
**kwargs)
def test_get_vserver_no_vserver(self):
fake_share_server = copy.deepcopy(fake.SHARE_SERVER)
fake_share_server['backend_details'].pop('vserver_name')
kwargs = {'share_server': fake_share_server}
self.assertRaises(exception.VserverNotSpecified,
self.library._get_vserver,
**kwargs)
def test_get_vserver_none_vserver(self):
fake_share_server = copy.deepcopy(fake.SHARE_SERVER)
fake_share_server['backend_details']['vserver_name'] = None
kwargs = {'share_server': fake_share_server}
self.assertRaises(exception.VserverNotSpecified,
self.library._get_vserver,
**kwargs)
def test_get_vserver_not_found(self):
self.library._client.vserver_exists.return_value = False
kwargs = {'share_server': fake.SHARE_SERVER}
self.assertRaises(exception.VserverNotFound,
self.library._get_vserver,
**kwargs)
def test_get_vserver(self):
self.library._client.vserver_exists.return_value = True
self.mock_object(self.library,
'_get_api_client',
mock.Mock(return_value='fake_client'))
result = self.library._get_vserver(share_server=fake.SHARE_SERVER)
self.assertTupleEqual((fake.VSERVER1, 'fake_client'), result)
def test_get_ems_pool_info(self):
self.mock_object(self.library,
'_find_matching_aggregates',
mock.Mock(return_value=['aggr1', 'aggr2']))
result = self.library._get_ems_pool_info()
expected = {
'pools': {
'vserver': None,
'aggregates': ['aggr1', 'aggr2'],
},
}
self.assertEqual(expected, result)
def test_handle_housekeeping_tasks(self):
self.mock_object(self.client, 'prune_deleted_nfs_export_policies')
self.mock_object(self.client, 'prune_deleted_snapshots')
mock_super = self.mock_object(lib_base.NetAppCmodeFileStorageLibrary,
'_handle_housekeeping_tasks')
self.library._handle_housekeeping_tasks()
self.assertTrue(self.client.prune_deleted_nfs_export_policies.called)
self.assertTrue(self.client.prune_deleted_snapshots.called)
self.assertTrue(mock_super.called)
def test_find_matching_aggregates(self):
mock_list_non_root_aggregates = self.mock_object(
self.client, 'list_non_root_aggregates',
mock.Mock(return_value=fake.AGGREGATES))
self.library.configuration.netapp_aggregate_name_search_pattern = (
'.*_aggr_1')
result = self.library._find_matching_aggregates()
self.assertListEqual([fake.AGGREGATES[0]], result)
mock_list_non_root_aggregates.assert_called_once_with()
def test_setup_server(self):
mock_get_vserver_name = self.mock_object(
self.library,
'_get_vserver_name',
mock.Mock(return_value=fake.VSERVER1))
mock_create_vserver = self.mock_object(self.library, '_create_vserver')
mock_validate_network_type = self.mock_object(
self.library,
'_validate_network_type')
result = self.library.setup_server(fake.NETWORK_INFO)
self.assertTrue(mock_validate_network_type.called)
self.assertTrue(mock_get_vserver_name.called)
self.assertTrue(mock_create_vserver.called)
self.assertDictEqual({'vserver_name': fake.VSERVER1}, result)
def test_setup_server_with_error(self):
mock_get_vserver_name = self.mock_object(
self.library,
'_get_vserver_name',
mock.Mock(return_value=fake.VSERVER1))
fake_exception = exception.ManilaException("fake")
mock_create_vserver = self.mock_object(
self.library,
'_create_vserver',
mock.Mock(side_effect=fake_exception))
mock_validate_network_type = self.mock_object(
self.library,
'_validate_network_type')
self.assertRaises(
exception.ManilaException,
self.library.setup_server,
fake.NETWORK_INFO)
self.assertTrue(mock_validate_network_type.called)
self.assertTrue(mock_get_vserver_name.called)
self.assertTrue(mock_create_vserver.called)
self.assertDictEqual(
{'server_details': {'vserver_name': fake.VSERVER1}},
fake_exception.detail_data)
@ddt.data(
{'network_info': {'network_type': 'vlan', 'segmentation_id': 1000}},
{'network_info': {'network_type': None, 'segmentation_id': None}},
{'network_info': {'network_type': 'flat', 'segmentation_id': None}})
@ddt.unpack
def test_validate_network_type_with_valid_network_types(self,
network_info):
self.library._validate_network_type(network_info)
@ddt.data(
{'network_info': {'network_type': 'vxlan', 'segmentation_id': 1000}},
{'network_info': {'network_type': 'gre', 'segmentation_id': 100}})
@ddt.unpack
def test_validate_network_type_with_invalid_network_types(self,
network_info):
self.assertRaises(exception.NetworkBadConfigurationException,
self.library._validate_network_type,
network_info)
def test_get_vserver_name(self):
vserver_id = fake.NETWORK_INFO['server_id']
vserver_name = fake.VSERVER_NAME_TEMPLATE % vserver_id
actual_result = self.library._get_vserver_name(vserver_id)
self.assertEqual(vserver_name, actual_result)
def test_create_vserver(self):
versions = ['fake_v1', 'fake_v2']
self.library.configuration.netapp_enabled_share_protocols = versions
vserver_id = fake.NETWORK_INFO['server_id']
vserver_name = fake.VSERVER_NAME_TEMPLATE % vserver_id
vserver_client = mock.Mock()
self.mock_object(context,
'get_admin_context',
mock.Mock(return_value='fake_admin_context'))
self.mock_object(self.library,
'_get_api_client',
mock.Mock(return_value=vserver_client))
self.mock_object(self.library._client,
'vserver_exists',
mock.Mock(return_value=False))
self.mock_object(self.library,
'_find_matching_aggregates',
mock.Mock(return_value=fake.AGGREGATES))
self.mock_object(self.library,
'_create_ipspace',
mock.Mock(return_value=fake.IPSPACE))
self.mock_object(self.library, '_create_vserver_lifs')
self.mock_object(self.library, '_create_vserver_admin_lif')
self.mock_object(self.library, '_create_vserver_routes')
self.library._create_vserver(vserver_name, fake.NETWORK_INFO)
self.library._create_ipspace.assert_called_once_with(fake.NETWORK_INFO)
self.library._client.create_vserver.assert_called_once_with(
vserver_name, fake.ROOT_VOLUME_AGGREGATE, fake.ROOT_VOLUME,
fake.AGGREGATES, fake.IPSPACE)
self.library._get_api_client.assert_called_once_with(
vserver=vserver_name)
self.library._create_vserver_lifs.assert_called_once_with(
vserver_name, vserver_client, fake.NETWORK_INFO, fake.IPSPACE)
self.library._create_vserver_admin_lif.assert_called_once_with(
vserver_name, vserver_client, fake.NETWORK_INFO, fake.IPSPACE)
self.library._create_vserver_routes.assert_called_once_with(
vserver_client, fake.NETWORK_INFO)
vserver_client.enable_nfs.assert_called_once_with(versions)
self.library._client.setup_security_services.assert_called_once_with(
fake.NETWORK_INFO['security_services'], vserver_client,
vserver_name)
def test_create_vserver_already_present(self):
vserver_id = fake.NETWORK_INFO['server_id']
vserver_name = fake.VSERVER_NAME_TEMPLATE % vserver_id
self.mock_object(context,
'get_admin_context',
mock.Mock(return_value='fake_admin_context'))
self.mock_object(self.library._client,
'vserver_exists',
mock.Mock(return_value=True))
self.assertRaises(exception.NetAppException,
self.library._create_vserver,
vserver_name,
fake.NETWORK_INFO)
@ddt.data(netapp_api.NaApiError, exception.NetAppException)
def test_create_vserver_lif_creation_failure(self, lif_exception):
vserver_id = fake.NETWORK_INFO['server_id']
vserver_name = fake.VSERVER_NAME_TEMPLATE % vserver_id
vserver_client = mock.Mock()
self.mock_object(context,
'get_admin_context',
mock.Mock(return_value='fake_admin_context'))
self.mock_object(self.library,
'_get_api_client',
mock.Mock(return_value=vserver_client))
self.mock_object(self.library._client,
'vserver_exists',
mock.Mock(return_value=False))
self.mock_object(self.library,
'_find_matching_aggregates',
mock.Mock(return_value=fake.AGGREGATES))
self.mock_object(self.library,
'_create_ipspace',
mock.Mock(return_value=fake.IPSPACE))
self.mock_object(self.library,
'_create_vserver_lifs',
mock.Mock(side_effect=lif_exception))
self.mock_object(self.library, '_delete_vserver')
self.assertRaises(lif_exception,
self.library._create_vserver,
vserver_name,
fake.NETWORK_INFO)
self.library._get_api_client.assert_called_with(vserver=vserver_name)
self.assertTrue(self.library._client.create_vserver.called)
self.library._create_vserver_lifs.assert_called_with(
vserver_name,
vserver_client,
fake.NETWORK_INFO,
fake.IPSPACE)
self.library._delete_vserver.assert_called_once_with(
vserver_name,
security_services=None)
self.assertFalse(vserver_client.enable_nfs.called)
self.assertEqual(1, lib_multi_svm.LOG.error.call_count)
def test_get_valid_ipspace_name(self):
result = self.library._get_valid_ipspace_name(fake.IPSPACE_ID)
expected = 'ipspace_' + fake.IPSPACE_ID.replace('-', '_')
self.assertEqual(expected, result)
def test_create_ipspace_not_supported(self):
self.library._client.features.IPSPACES = False
result = self.library._create_ipspace(fake.NETWORK_INFO)
self.assertIsNone(result)
@ddt.data(None, 'flat')
def test_create_ipspace_not_vlan(self, network_type):
self.library._client.features.IPSPACES = True
network_info = copy.deepcopy(fake.NETWORK_INFO)
network_info['network_allocations'][0]['segmentation_id'] = None
network_info['network_allocations'][0]['network_type'] = network_type
result = self.library._create_ipspace(network_info)
self.assertEqual('Default', result)
def test_create_ipspace_already_present(self):
self.library._client.features.IPSPACES = True
self.mock_object(self.library._client,
'ipspace_exists',
mock.Mock(return_value=True))
result = self.library._create_ipspace(fake.NETWORK_INFO)
expected = self.library._get_valid_ipspace_name(
fake.NETWORK_INFO['neutron_subnet_id'])
self.assertEqual(expected, result)
self.library._client.ipspace_exists.assert_has_calls([
mock.call(expected)])
self.assertFalse(self.library._client.create_ipspace.called)
def test_create_ipspace(self):
self.library._client.features.IPSPACES = True
self.mock_object(self.library._client,
'ipspace_exists',
mock.Mock(return_value=False))
result = self.library._create_ipspace(fake.NETWORK_INFO)
expected = self.library._get_valid_ipspace_name(
fake.NETWORK_INFO['neutron_subnet_id'])
self.assertEqual(expected, result)
self.library._client.ipspace_exists.assert_has_calls([
mock.call(expected)])
self.library._client.create_ipspace.assert_has_calls([
mock.call(expected)])
def test_create_vserver_lifs(self):
self.mock_object(self.library._client,
'list_cluster_nodes',
mock.Mock(return_value=fake.CLUSTER_NODES))
self.mock_object(self.library,
'_get_lif_name',
mock.Mock(side_effect=['fake_lif1', 'fake_lif2']))
self.mock_object(self.library, '_create_lif')
self.library._create_vserver_lifs(fake.VSERVER1,
'fake_vserver_client',
fake.NETWORK_INFO,
fake.IPSPACE)
self.library._create_lif.assert_has_calls([
mock.call('fake_vserver_client', fake.VSERVER1, fake.IPSPACE,
fake.CLUSTER_NODES[0], 'fake_lif1',
fake.NETWORK_INFO['network_allocations'][0]),
mock.call('fake_vserver_client', fake.VSERVER1, fake.IPSPACE,
fake.CLUSTER_NODES[1], 'fake_lif2',
fake.NETWORK_INFO['network_allocations'][1])])
def test_create_vserver_admin_lif(self):
self.mock_object(self.library._client,
'list_cluster_nodes',
mock.Mock(return_value=fake.CLUSTER_NODES))
self.mock_object(self.library,
'_get_lif_name',
mock.Mock(return_value='fake_admin_lif'))
self.mock_object(self.library, '_create_lif')
self.library._create_vserver_admin_lif(fake.VSERVER1,
'fake_vserver_client',
fake.NETWORK_INFO,
fake.IPSPACE)
self.library._create_lif.assert_has_calls([
mock.call('fake_vserver_client', fake.VSERVER1, fake.IPSPACE,
fake.CLUSTER_NODES[0], 'fake_admin_lif',
fake.NETWORK_INFO['admin_network_allocations'][0])])
def test_create_vserver_admin_lif_no_admin_network(self):
fake_network_info = copy.deepcopy(fake.NETWORK_INFO)
fake_network_info['admin_network_allocations'] = []
self.mock_object(self.library._client,
'list_cluster_nodes',
mock.Mock(return_value=fake.CLUSTER_NODES))
self.mock_object(self.library,
'_get_lif_name',
mock.Mock(return_value='fake_admin_lif'))
self.mock_object(self.library, '_create_lif')
self.library._create_vserver_admin_lif(fake.VSERVER1,
'fake_vserver_client',
fake_network_info,
fake.IPSPACE)
self.assertFalse(self.library._create_lif.called)
@ddt.data(
fake.get_network_info(fake.USER_NETWORK_ALLOCATIONS,
fake.ADMIN_NETWORK_ALLOCATIONS),
fake.get_network_info(fake.USER_NETWORK_ALLOCATIONS_IPV6,
fake.ADMIN_NETWORK_ALLOCATIONS))
def test_create_vserver_routes(self, network_info):
expected_gateway = network_info['network_allocations'][0]['gateway']
vserver_client = mock.Mock()
self.mock_object(vserver_client, 'create_route')
retval = self.library._create_vserver_routes(
vserver_client, network_info)
self.assertIsNone(retval)
vserver_client.create_route.assert_called_once_with(expected_gateway)
def test_get_node_data_port(self):
self.mock_object(self.client,
'list_node_data_ports',
mock.Mock(return_value=fake.NODE_DATA_PORTS))
self.library.configuration.netapp_port_name_search_pattern = 'e0c'
result = self.library._get_node_data_port(fake.CLUSTER_NODE)
self.assertEqual('e0c', result)
self.library._client.list_node_data_ports.assert_has_calls([
mock.call(fake.CLUSTER_NODE)])
def test_get_node_data_port_no_match(self):
self.mock_object(self.client,
'list_node_data_ports',
mock.Mock(return_value=fake.NODE_DATA_PORTS))
self.library.configuration.netapp_port_name_search_pattern = 'ifgroup1'
self.assertRaises(exception.NetAppException,
self.library._get_node_data_port,
fake.CLUSTER_NODE)
def test_get_lif_name(self):
result = self.library._get_lif_name(
'fake_node', fake.NETWORK_INFO['network_allocations'][0])
self.assertEqual('os_132dbb10-9a36-46f2-8d89-3d909830c356', result)
@ddt.data(fake.MTU, None, 'not-present')
def test_create_lif(self, mtu):
"""Tests cases where MTU is a valid value, None or not present."""
expected_mtu = (mtu if mtu not in (None, 'not-present') else
fake.DEFAULT_MTU)
network_allocations = copy.deepcopy(
fake.NETWORK_INFO['network_allocations'][0])
network_allocations['mtu'] = mtu
if mtu == 'not-present':
network_allocations.pop('mtu')
vserver_client = mock.Mock()
vserver_client.network_interface_exists = mock.Mock(
return_value=False)
self.mock_object(self.library,
'_get_node_data_port',
mock.Mock(return_value='fake_port'))
self.library._create_lif(vserver_client,
'fake_vserver',
'fake_ipspace',
'fake_node',
'fake_lif',
network_allocations)
self.library._client.create_network_interface.assert_has_calls([
mock.call('10.10.10.10', '255.255.255.0', '1000', 'fake_node',
'fake_port', 'fake_vserver', 'fake_lif',
'fake_ipspace', expected_mtu)])
def test_create_lif_if_nonexistent_already_present(self):
vserver_client = mock.Mock()
vserver_client.network_interface_exists = mock.Mock(
return_value=True)
self.mock_object(self.library,
'_get_node_data_port',
mock.Mock(return_value='fake_port'))
self.library._create_lif(vserver_client,
'fake_vserver',
fake.IPSPACE,
'fake_node',
'fake_lif',
fake.NETWORK_INFO['network_allocations'][0])
self.assertFalse(self.library._client.create_network_interface.called)
def test_get_network_allocations_number(self):
self.library._client.list_cluster_nodes.return_value = (
fake.CLUSTER_NODES)
result = self.library.get_network_allocations_number()
self.assertEqual(len(fake.CLUSTER_NODES), result)
def test_get_admin_network_allocations_number(self):
result = self.library.get_admin_network_allocations_number(
'fake_admin_network_api')
self.assertEqual(1, result)
def test_get_admin_network_allocations_number_no_admin_network(self):
result = self.library.get_admin_network_allocations_number(None)
self.assertEqual(0, result)
def test_teardown_server(self):
self.library._client.vserver_exists.return_value = True
mock_delete_vserver = self.mock_object(self.library,
'_delete_vserver')
self.library.teardown_server(
fake.SHARE_SERVER['backend_details'],
security_services=fake.NETWORK_INFO['security_services'])
self.library._client.vserver_exists.assert_called_once_with(
fake.VSERVER1)
mock_delete_vserver.assert_called_once_with(
fake.VSERVER1,
security_services=fake.NETWORK_INFO['security_services'])
@ddt.data(None, {}, {'vserver_name': None})
def test_teardown_server_no_share_server(self, server_details):
mock_delete_vserver = self.mock_object(self.library,
'_delete_vserver')
self.library.teardown_server(server_details)
self.assertFalse(mock_delete_vserver.called)
self.assertTrue(lib_multi_svm.LOG.warning.called)
def test_teardown_server_no_vserver(self):
self.library._client.vserver_exists.return_value = False
mock_delete_vserver = self.mock_object(self.library,
'_delete_vserver')
self.library.teardown_server(
fake.SHARE_SERVER['backend_details'],
security_services=fake.NETWORK_INFO['security_services'])
self.library._client.vserver_exists.assert_called_once_with(
fake.VSERVER1)
self.assertFalse(mock_delete_vserver.called)
self.assertTrue(lib_multi_svm.LOG.warning.called)
def test_delete_vserver_no_ipspace(self):
self.mock_object(self.library._client,
'get_vserver_ipspace',
mock.Mock(return_value=None))
vserver_client = mock.Mock()
self.mock_object(self.library,
'_get_api_client',
mock.Mock(return_value=vserver_client))
mock_delete_vserver_vlans = self.mock_object(self.library,
'_delete_vserver_vlans')
net_interfaces = copy.deepcopy(c_fake.NETWORK_INTERFACES_MULTIPLE)
net_interfaces_with_vlans = [net_interfaces[0]]
self.mock_object(vserver_client,
'get_network_interfaces',
mock.Mock(return_value=net_interfaces))
security_services = fake.NETWORK_INFO['security_services']
self.library._delete_vserver(fake.VSERVER1,
security_services=security_services)
self.library._client.get_vserver_ipspace.assert_called_once_with(
fake.VSERVER1)
self.library._client.delete_vserver.assert_called_once_with(
fake.VSERVER1, vserver_client, security_services=security_services)
self.assertFalse(self.library._client.delete_ipspace.called)
mock_delete_vserver_vlans.assert_called_once_with(
net_interfaces_with_vlans)
def test_delete_vserver_ipspace_has_data_vservers(self):
self.mock_object(self.library._client,
'get_vserver_ipspace',
mock.Mock(return_value=fake.IPSPACE))
vserver_client = mock.Mock()
self.mock_object(self.library,
'_get_api_client',
mock.Mock(return_value=vserver_client))
self.mock_object(self.library._client,
'ipspace_has_data_vservers',
mock.Mock(return_value=True))
mock_delete_vserver_vlans = self.mock_object(self.library,
'_delete_vserver_vlans')
self.mock_object(
vserver_client, 'get_network_interfaces',
mock.Mock(return_value=c_fake.NETWORK_INTERFACES_MULTIPLE))
security_services = fake.NETWORK_INFO['security_services']
self.library._delete_vserver(fake.VSERVER1,
security_services=security_services)
self.library._client.get_vserver_ipspace.assert_called_once_with(
fake.VSERVER1)
self.library._client.delete_vserver.assert_called_once_with(
fake.VSERVER1, vserver_client, security_services=security_services)
self.assertFalse(self.library._client.delete_ipspace.called)
mock_delete_vserver_vlans.assert_called_once_with(
[c_fake.NETWORK_INTERFACES_MULTIPLE[0]])
@ddt.data([], c_fake.NETWORK_INTERFACES)
def test_delete_vserver_with_ipspace(self, interfaces):
self.mock_object(self.library._client,
'get_vserver_ipspace',
mock.Mock(return_value=fake.IPSPACE))
vserver_client = mock.Mock()
self.mock_object(self.library,
'_get_api_client',
mock.Mock(return_value=vserver_client))
self.mock_object(self.library._client,
'ipspace_has_data_vservers',
mock.Mock(return_value=False))
mock_delete_vserver_vlans = self.mock_object(self.library,
'_delete_vserver_vlans')
self.mock_object(vserver_client,
'get_network_interfaces',
mock.Mock(return_value=interfaces))
security_services = fake.NETWORK_INFO['security_services']
self.library._delete_vserver(fake.VSERVER1,
security_services=security_services)
self.library._client.get_vserver_ipspace.assert_called_once_with(
fake.VSERVER1)
self.library._client.delete_vserver.assert_called_once_with(
fake.VSERVER1, vserver_client, security_services=security_services)
self.library._client.delete_ipspace.assert_called_once_with(
fake.IPSPACE)
mock_delete_vserver_vlans.assert_called_once_with(interfaces)
def test_delete_vserver_vlans(self):
self.library._delete_vserver_vlans(c_fake.NETWORK_INTERFACES)
for interface in c_fake.NETWORK_INTERFACES:
home_port = interface['home-port']
port, vlan = home_port.split('-')
node = interface['home-node']
self.library._client.delete_vlan.assert_called_once_with(
node, port, vlan)
def test_delete_vserver_vlans_client_error(self):
mock_exception_log = self.mock_object(lib_multi_svm.LOG, 'exception')
self.mock_object(
self.library._client,
'delete_vlan',
mock.Mock(side_effect=exception.NetAppException("fake error")))
self.library._delete_vserver_vlans(c_fake.NETWORK_INTERFACES)
for interface in c_fake.NETWORK_INTERFACES:
home_port = interface['home-port']
port, vlan = home_port.split('-')
node = interface['home-node']
self.library._client.delete_vlan.assert_called_once_with(
node, port, vlan)
self.assertEqual(1, mock_exception_log.call_count)
| {
"content_hash": "1af487a85bb26ed64eb5506b90792a8f",
"timestamp": "",
"source": "github",
"line_count": 789,
"max_line_length": 79,
"avg_line_length": 40.98605830164765,
"alnum_prop": 0.584297111757066,
"repo_name": "bswartz/manila",
"id": "286947020b71a91a63e05ee9b11e840df41834cd",
"size": "32971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "9952105"
},
{
"name": "Shell",
"bytes": "106606"
}
],
"symlink_target": ""
} |
"""
Django settings for qr_code_demo project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import django
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from qr_code.qrcode import constants
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "8l4)()f1&tg*dtxh6whlew#k-d5&79npe#j_dg9l0b)m8^g#8u"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = ["django.contrib.auth", "django.contrib.contenttypes", "django.contrib.staticfiles", "qr_code", "qr_code_demo"]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "demo_site.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "demo_site.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
# Caches.
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
},
"qr-code": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache", "LOCATION": "qr-code-cache", "TIMEOUT": 3600},
}
# Django QR Code specific options.
QR_CODE_CACHE_ALIAS = "qr-code"
QR_CODE_URL_PROTECTION = {
constants.TOKEN_LENGTH: 30, # Optional random token length for URL protection. Defaults to 20.
constants.SIGNING_KEY: "my-secret-signing-key", # Optional signing key for URL token. Uses SECRET_KEY if not defined.
constants.SIGNING_SALT: "my-signing-salt", # Optional signing salt for URL token.
constants.ALLOWS_EXTERNAL_REQUESTS_FOR_REGISTERED_USER: lambda u: True, # Tells whether a registered user can request the QR code URLs from outside a site that uses this app. It can be a boolean value used for any user, or a callable that takes a user as parameter. Defaults to False (nobody can access the URL without the security token).
}
SERVE_QR_CODE_IMAGE_PATH = "qr-code-image/"
| {
"content_hash": "8c5154500390abaefeb683bb6c63f4fa",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 344,
"avg_line_length": 32.224,
"alnum_prop": 0.7055610724925522,
"repo_name": "dprog-philippe-docourt/django-qr-code",
"id": "68da1a3091c8dae69c06cd8d2344aa825545793b",
"size": "4028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo_site/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1094"
},
{
"name": "HTML",
"bytes": "28762"
},
{
"name": "Python",
"bytes": "204625"
},
{
"name": "Shell",
"bytes": "3307"
}
],
"symlink_target": ""
} |
import logging
import time
from universe import pyprofile
from universe.vectorized import core
logger = logging.getLogger(__name__)
DEFAULT_MAX_EPISODE_SECONDS = 20 * 60. # Default to 20 minutes if there is no explicit limit
class TimeLimit(core.Wrapper):
def _configure(self, **kwargs):
super(TimeLimit, self)._configure(**kwargs)
self._max_episode_seconds = self.env.spec.tags.get('wrapper_config.TimeLimit.max_episode_seconds', None)
self._max_episode_steps = self.env.spec.tags.get('wrapper_config.TimeLimit.max_episode_steps', None)
if self._max_episode_seconds is None and self._max_episode_steps is None:
self._max_episode_seconds = DEFAULT_MAX_EPISODE_SECONDS
self._elapsed_steps = 0
self._episode_started_at = None
@property
def _elapsed_seconds(self):
return time.time() - self._episode_started_at
def _past_limit(self):
"""Return true if we are past our limit"""
if self._max_episode_steps is not None and self._max_episode_steps <= self._elapsed_steps:
logger.debug("Env has passed the step limit defined by TimeLimit.")
return True
if self._max_episode_seconds is not None and self._max_episode_seconds <= self._elapsed_seconds:
logger.debug("Env has passed the seconds limit defined by TimeLimit.")
return True
return False
def _step(self, action_n):
assert self._episode_started_at is not None, "Cannot call env.step() before calling reset()"
observation_n, reward_n, done_n, info = self.env.step(action_n)
self._elapsed_steps += 1
if self._past_limit():
_ = self.reset() # Force a reset, discard the observation
done_n = [True] * self.n # Force a done = True
return observation_n, reward_n, done_n, info
def _reset(self):
self._episode_started_at = time.time()
self._elapsed_steps = 0
return self.env.reset()
| {
"content_hash": "eaa2b2a45d944bbee83533bf3a7551d3",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 112,
"avg_line_length": 37.092592592592595,
"alnum_prop": 0.6465302046929605,
"repo_name": "rht/universe",
"id": "49fb05aca45a27bc47c88396ffbbb5187e6a2903",
"size": "2003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "universe/wrappers/time_limit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2718"
},
{
"name": "Python",
"bytes": "536345"
}
],
"symlink_target": ""
} |
from .pulsectl import Module
class Module(Module):
def __init__(self, config, theme):
super().__init__(config, theme, "sink")
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| {
"content_hash": "81366dece002f0238c013be4aedb9fb6",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 53,
"avg_line_length": 21.77777777777778,
"alnum_prop": 0.6632653061224489,
"repo_name": "tobi-wan-kenobi/bumblebee-status",
"id": "31fecec2f4d74b17482f73ec5d96a5c7cec1e099",
"size": "196",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "bumblebee_status/modules/core/pulseout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "149"
},
{
"name": "Python",
"bytes": "629537"
},
{
"name": "Shell",
"bytes": "2431"
}
],
"symlink_target": ""
} |
"""Tests the Tensorboard mesh plugin."""
import collections.abc
import os
import shutil
import numpy as np
import tensorflow as tf
import time
from unittest import mock
from werkzeug import test as werkzeug_test
from werkzeug import wrappers
from tensorboard.backend import application
from tensorboard.backend.event_processing import data_provider
from tensorboard.backend.event_processing import (
plugin_event_multiplexer as event_multiplexer,
)
from tensorboard.plugins import base_plugin
from tensorboard.plugins.mesh import mesh_plugin
from tensorboard.plugins.mesh import summary
from tensorboard.plugins.mesh import plugin_data_pb2
from tensorboard.plugins.mesh import test_utils
from tensorboard.util import test_util as tensorboard_test_util
class MeshPluginTest(tf.test.TestCase):
"""Tests for mesh plugin server."""
def setUp(self):
# We use numpy.random to generate meshes. We seed to avoid non-determinism
# in this test.
np.random.seed(17)
# Log dir to save temp events into.
self.log_dir = self.get_temp_dir()
# Create mesh summary.
with tf.compat.v1.Graph().as_default():
tf_placeholder = tf.compat.v1.placeholder
sess = tf.compat.v1.Session()
point_cloud = test_utils.get_random_mesh(1000)
point_cloud_vertices = tf_placeholder(
tf.float32, point_cloud.vertices.shape
)
mesh_no_color = test_utils.get_random_mesh(2000, add_faces=True)
mesh_no_color_extended = test_utils.get_random_mesh(
2500, add_faces=True
)
mesh_no_color_vertices = tf_placeholder(tf.float32, [1, None, 3])
mesh_no_color_faces = tf_placeholder(tf.int32, [1, None, 3])
mesh_color = test_utils.get_random_mesh(
3000, add_faces=True, add_colors=True
)
mesh_color_vertices = tf_placeholder(
tf.float32, mesh_color.vertices.shape
)
mesh_color_faces = tf_placeholder(tf.int32, mesh_color.faces.shape)
mesh_color_colors = tf_placeholder(
tf.uint8, mesh_color.colors.shape
)
self.data = [
point_cloud,
mesh_no_color,
mesh_no_color_extended,
mesh_color,
]
# In case when name is present and display_name is not, we will reuse name
# as display_name. Summaries below intended to test both cases.
self.names = ["point_cloud", "mesh_no_color", "mesh_color"]
summary.op(
self.names[0],
point_cloud_vertices,
description="just point cloud",
)
summary.op(
self.names[1],
mesh_no_color_vertices,
faces=mesh_no_color_faces,
display_name="name_to_display_in_ui",
description="beautiful mesh in grayscale",
)
summary.op(
self.names[2],
mesh_color_vertices,
faces=mesh_color_faces,
colors=mesh_color_colors,
description="mesh with random colors",
)
merged_summary_op = tf.compat.v1.summary.merge_all()
self.runs = ["bar"]
self.steps = 20
bar_directory = os.path.join(self.log_dir, self.runs[0])
with tensorboard_test_util.FileWriterCache.get(
bar_directory
) as writer:
writer.add_graph(sess.graph)
for step in range(self.steps):
# Alternate between two random meshes with different number of
# vertices.
no_color = (
mesh_no_color
if step % 2 == 0
else mesh_no_color_extended
)
with mock.patch.object(time, "time", return_value=step):
writer.add_summary(
sess.run(
merged_summary_op,
feed_dict={
point_cloud_vertices: point_cloud.vertices,
mesh_no_color_vertices: no_color.vertices,
mesh_no_color_faces: no_color.faces,
mesh_color_vertices: mesh_color.vertices,
mesh_color_faces: mesh_color.faces,
mesh_color_colors: mesh_color.colors,
},
),
global_step=step,
)
# Start a server that will receive requests.
multiplexer = event_multiplexer.EventMultiplexer(
{
"bar": bar_directory,
}
)
provider = data_provider.MultiplexerDataProvider(
multiplexer, self.log_dir
)
self.context = base_plugin.TBContext(
logdir=self.log_dir, data_provider=provider
)
self.plugin = mesh_plugin.MeshPlugin(self.context)
# Wait until after plugin construction to reload the multiplexer because the
# plugin caches data from the multiplexer upon construction and this affects
# logic tested later down.
# TODO(https://github.com/tensorflow/tensorboard/issues/2579): Eliminate the
# caching of data at construction time and move this Reload() up to just
# after the multiplexer is created.
multiplexer.Reload()
wsgi_app = application.TensorBoardWSGI([self.plugin])
self.server = werkzeug_test.Client(wsgi_app, wrappers.Response)
self.routes = self.plugin.get_plugin_apps()
def tearDown(self):
shutil.rmtree(self.log_dir, ignore_errors=True)
def testRoutes(self):
"""Tests that the /tags route offers the correct run to tag mapping."""
self.assertIsInstance(self.routes["/tags"], collections.abc.Callable)
self.assertIsInstance(self.routes["/meshes"], collections.abc.Callable)
self.assertIsInstance(self.routes["/data"], collections.abc.Callable)
def testTagsRoute(self):
"""Tests that the /tags route offers the correct run to tag mapping."""
response = self.server.get("/data/plugin/mesh/tags")
self.assertEqual(200, response.status_code)
tags = test_utils.deserialize_json_response(response.get_data())
self.assertIn(self.runs[0], tags)
for name in self.names:
self.assertIn(name, tags[self.runs[0]])
def validate_data_response(
self, run, tag, sample, content_type, dtype, ground_truth_data, step=0
):
"""Makes request and checks that response has expected data."""
response = self.server.get(
"/data/plugin/mesh/data?run=%s&tag=%s&sample=%d&content_type="
"%s&step=%d" % (run, tag, sample, content_type, step)
)
self.assertEqual(200, response.status_code)
data = test_utils.deserialize_array_buffer_response(
next(response.response), dtype
)
self.assertEqual(ground_truth_data.reshape(-1).tolist(), data.tolist())
def testDataRoute(self):
"""Tests that the /data route returns correct data for meshes."""
self.validate_data_response(
self.runs[0],
self.names[0],
0,
"VERTEX",
np.float32,
self.data[0].vertices,
)
self.validate_data_response(
self.runs[0], self.names[1], 0, "FACE", np.int32, self.data[1].faces
)
# Validate that the same summary has mesh with different number of faces at
# different step=1.
self.validate_data_response(
self.runs[0],
self.names[1],
0,
"FACE",
np.int32,
self.data[2].faces,
step=1,
)
self.validate_data_response(
self.runs[0],
self.names[2],
0,
"COLOR",
np.uint8,
self.data[3].colors,
)
def testMetadataRoute(self):
"""Tests that the /meshes route returns correct metadata for meshes."""
response = self.server.get(
"/data/plugin/mesh/meshes?run=%s&tag=%s&sample=%d"
% (self.runs[0], self.names[0], 0)
)
self.assertEqual(200, response.status_code)
metadata = test_utils.deserialize_json_response(response.get_data())
self.assertEqual(len(metadata), self.steps)
self.assertAllEqual(
metadata[0]["content_type"], plugin_data_pb2.MeshPluginData.VERTEX
)
self.assertAllEqual(
metadata[0]["data_shape"], self.data[0].vertices.shape
)
def testsEventsAlwaysSortedByStep(self):
"""Tests that events always sorted by step."""
response = self.server.get(
"/data/plugin/mesh/meshes?run=%s&tag=%s&sample=%d"
% (self.runs[0], self.names[1], 0)
)
self.assertEqual(200, response.status_code)
metadata = test_utils.deserialize_json_response(response.get_data())
for i in range(1, self.steps):
# Step will be equal when two tensors of different content type
# belong to the same mesh.
self.assertLessEqual(metadata[i - 1]["step"], metadata[i]["step"])
def testIsActive(self):
self.assertFalse(self.plugin.is_active())
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "be6a3351bc3c606428b8b38ab7292e15",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 86,
"avg_line_length": 38.74603174603175,
"alnum_prop": 0.5625768127816468,
"repo_name": "tensorflow/tensorboard",
"id": "e56c1fa82d45e61d9ce12da1bd0b9f59f27acbca",
"size": "10453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorboard/plugins/mesh/mesh_plugin_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16222"
},
{
"name": "Dockerfile",
"bytes": "1226"
},
{
"name": "HTML",
"bytes": "154824"
},
{
"name": "Java",
"bytes": "20643"
},
{
"name": "JavaScript",
"bytes": "11869"
},
{
"name": "Jupyter Notebook",
"bytes": "7697"
},
{
"name": "Python",
"bytes": "2922179"
},
{
"name": "Rust",
"bytes": "311041"
},
{
"name": "SCSS",
"bytes": "136834"
},
{
"name": "Shell",
"bytes": "36731"
},
{
"name": "Starlark",
"bytes": "541743"
},
{
"name": "TypeScript",
"bytes": "5930550"
}
],
"symlink_target": ""
} |
"""Sliver manager.
The sliver manager has several functions. It is responsible for
creating, resource limiting, starting, stopping, and destroying
slivers. It provides an API for users to access these functions and
also to make inter-sliver resource loans. The sliver manager is also
responsible for handling delegation accounts.
"""
import string
import re
import time
import logger
import api, api_calls
import database
import account
import controller
try:
import sliver_lxc
implementation='lxc'
sliver_default_type='sliver.LXC'
sliver_class_to_register = sliver_lxc.Sliver_LXC
sliver_password_shell = sliver_lxc.Sliver_LXC.SHELL
except:
try:
import sliver_vs
implementation='vs'
sliver_default_type='sliver.VServer'
sliver_class_to_register = sliver_vs.Sliver_VS
sliver_password_shell = sliver_vs.Sliver_VS.SHELL
except:
logger.log("Could not import either sliver_lxc or sliver_vs - bailing out")
exit(1)
# just being safe
try:
from plnode.bwlimit import bwmin, bwmax
except:
bwmin, bwmax = 8, 1000*1000*1000
priority = 10
DEFAULT_ALLOCATION = {
'enabled': 1,
# CPU parameters
'cpu_pct': 0, # percent CPU reserved
'cpu_share': 1, # proportional share
'cpu_cores': "0b", # reserved cpu cores <num_cores>[b]
'cpu_freezable': 0, # freeze processes if cpu_cores is 0
# bandwidth parameters
'net_min_rate': bwmin / 1000, # kbps
'net_max_rate': bwmax / 1000, # kbps
'net_share': 1, # proportional share
# bandwidth parameters over routes exempt from node bandwidth limits
'net_i2_min_rate': bwmin / 1000, # kbps
'net_i2_max_rate': bwmax / 1000, # kbps
'net_i2_share': 1, # proportional share
'net_max_kbyte' : 10546875, #Kbyte
'net_thresh_kbyte': 9492187, #Kbyte
'net_i2_max_kbyte': 31640625,
'net_i2_thresh_kbyte': 28476562,
# disk space limit
'disk_max': 10000000, # bytes
# capabilities
'capabilities': '',
# IP addresses
'ip_addresses': '0.0.0.0',
# NOTE: this table is further populated with resource names and
# default amounts via the start() function below. This probably
# should be changeg and these values should be obtained via the
# API to myplc.
}
start_requested = False # set to True in order to request that all slivers be started
# check leases and adjust the 'reservation_alive' field in slivers
# this is not expected to be saved as it will change for the next round
def adjustReservedSlivers (data):
"""
On reservable nodes, tweak the 'reservation_alive' field to instruct cyclic loop
about what to do with slivers.
"""
# only impacts reservable nodes
if 'reservation_policy' not in data: return
policy=data['reservation_policy']
if policy not in ['lease_or_idle', 'lease_or_shared']:
if policy is not None:
logger.log ("unexpected reservation_policy %(policy)s"%locals())
return
logger.log("slivermanager.adjustReservedSlivers")
now=int(time.time())
# scan leases that are expected to show in ascending order
active_lease=None
for lease in data['leases']:
if lease['t_from'] <= now and now <= lease['t_until']:
active_lease=lease
break
def is_system_sliver (sliver):
for d in sliver['attributes']:
if d['tagname']=='system' and d['value']:
return True
return False
# mark slivers as appropriate
for sliver in data['slivers']:
# system slivers must be kept alive
if is_system_sliver(sliver):
sliver['reservation_alive']=True
continue
# regular slivers
if not active_lease:
# with 'idle_or_shared', just let the field out, behave like a shared node
# otherwise, mark all slivers as being turned down
if policy == 'lease_or_idle':
sliver['reservation_alive']=False
else:
# there is an active lease, mark it alive and the other not
sliver['reservation_alive'] = sliver['name']==active_lease['name']
@database.synchronized
def GetSlivers(data, config = None, plc=None, fullupdate=True):
"""This function has two purposes. One, convert GetSlivers() data
into a more convenient format. Two, even if no updates are coming
in, use the GetSlivers() heartbeat as a cue to scan for expired
slivers."""
logger.verbose("slivermanager: Entering GetSlivers with fullupdate=%r"%fullupdate)
for key in list(data.keys()):
logger.verbose('slivermanager: GetSlivers key : ' + key)
node_id = None
try:
f = open('/etc/planetlab/node_id')
try:
node_id = int(f.read())
finally:
f.close()
except:
logger.log_exc("slivermanager: GetSlivers failed to read /etc/planetlab/node_id")
if 'node_id' in data and data['node_id'] != node_id: return
if 'networks' in data:
for network in data['networks']:
if network['is_primary'] and network['bwlimit'] is not None:
DEFAULT_ALLOCATION['net_max_rate'] = network['bwlimit'] / 1000
# Take initscripts (global) returned by API, build a hash scriptname->code
iscripts_hash = {}
if 'initscripts' not in data:
logger.log_missing_data("slivermanager.GetSlivers", 'initscripts')
return
for initscript_rec in data['initscripts']:
logger.verbose("slivermanager: initscript: %s" % initscript_rec['name'])
iscripts_hash[str(initscript_rec['name'])] = initscript_rec['script']
adjustReservedSlivers (data)
for sliver in data['slivers']:
logger.verbose("slivermanager: %s: slivermanager.GetSlivers in slivers loop"%sliver['name'])
rec = sliver.copy()
rec.setdefault('timestamp', data['timestamp'])
# convert attributes field to a proper dict
attributes = {}
for attr in rec.pop('attributes'): attributes[attr['tagname']] = attr['value']
rec.setdefault("attributes", attributes)
# squash keys
keys = rec.pop('keys')
rec.setdefault('keys', '\n'.join([key_struct['key'] for key_struct in keys]))
## 'Type' isn't returned by GetSlivers() for whatever reason. We're overloading
## instantiation here, but i suppose its the same thing when you think about it. -FA
# Handle nm-controller here
if rec['instantiation'].lower() == 'nm-controller':
rec.setdefault('type', attributes.get('type', 'controller.Controller'))
else:
rec.setdefault('type', attributes.get('type', sliver_default_type))
# set the vserver reference. If none, set to default.
rec.setdefault('vref', attributes.get('vref', 'default'))
### set initscripts; set empty rec['initscript'] if not
# if tag 'initscript_code' is set, that's what we use
iscode = attributes.get('initscript_code', '')
if iscode:
rec['initscript'] = iscode
else:
isname = attributes.get('initscript')
if isname is not None and isname in iscripts_hash:
rec['initscript'] = iscripts_hash[isname]
else:
rec['initscript'] = ''
# set delegations, if none, set empty
rec.setdefault('delegations', attributes.get("delegations", []))
# extract the implied rspec
rspec = {}
rec['rspec'] = rspec
for resname, default_amount in DEFAULT_ALLOCATION.items():
try:
t = type(default_amount)
amount = t.__new__(t, attributes[resname])
except (KeyError, ValueError): amount = default_amount
rspec[resname] = amount
# add in sysctl attributes into the rspec
for key in list(attributes.keys()):
if key.find("sysctl.") == 0:
rspec[key] = attributes[key]
# also export tags in rspec so they make it to the sliver_vs.start call
rspec['tags'] = attributes
database.db.deliver_record(rec)
if fullupdate:
database.db.set_min_timestamp(data['timestamp'])
# slivers are created here.
database.db.sync()
def deliver_ticket(data):
return GetSlivers(data, fullupdate=False)
def start():
# No default allocation values for LXC yet, think if its necessary given
# that they are also default allocation values in this module
if implementation == 'vs':
for resname, default_amount in sliver_vs.DEFAULT_ALLOCATION.items():
DEFAULT_ALLOCATION[resname]=default_amount
account.register_class(sliver_class_to_register)
account.register_class(controller.Controller)
database.start()
api_calls.deliver_ticket = deliver_ticket
api.start()
### check if a sliver is running
### a first step to a unified code for codemux
def is_running (name):
if implementation=='vs':
import vserver
return vserver.VServer(name).is_running()
else:
import libvirt
running = False
try:
conn = libvirt.open('lxc://')
dom = conn.lookupByName(name)
running = dom.info()[0] == libvirt.VIR_DOMAIN_RUNNING
finally:
conn.close()
return running
| {
"content_hash": "c92cf6dfd7ae21d14acf240a1c1b2ec2",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 100,
"avg_line_length": 35.93076923076923,
"alnum_prop": 0.6343395418539928,
"repo_name": "dreibh/planetlab-lxc-nodemanager",
"id": "6db569a757f896dd659cddb24704dec82b251de3",
"size": "9342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slivermanager.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3519"
},
{
"name": "Makefile",
"bytes": "4704"
},
{
"name": "Python",
"bytes": "314431"
},
{
"name": "Shell",
"bytes": "2429"
}
],
"symlink_target": ""
} |
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('textbox11.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox('E9', 'This is some text',
{'fill': {'color': 'red'}})
workbook.close()
self.assertExcelEqual()
| {
"content_hash": "f527c0d1e0dfc1a4b5ff220e2b719ab5",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 76,
"avg_line_length": 25.11111111111111,
"alnum_prop": 0.6268436578171092,
"repo_name": "jmcnamara/XlsxWriter",
"id": "67ef2d7ac22f6821e34487883b66fd1ed876e1a4",
"size": "891",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "xlsxwriter/test/comparison/test_textbox11.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7748"
},
{
"name": "Perl",
"bytes": "3503"
},
{
"name": "Python",
"bytes": "2807230"
},
{
"name": "Shell",
"bytes": "7964"
}
],
"symlink_target": ""
} |
import RPi.GPIO as GPIO
import time
import os
import subprocess as sub
import sys
import socket
from clint.textui import colored
BCN3DSigmaxLCDPath = "home/pi/BCN3DSigmaxLCD"
BCN3DSigmaPath = "/home/pi/BCN3DSigma"
BCN3DPlusPath = "/home/pi/BCN3DSigmax"
BCN3DSigmaScreenPath = "home/pi/BCN3DSigmaLCD"
repoPath = "/home/pi/sd-auto-loader"
codePath = "/home/pi/sd-auto-loader/Code"
#Tuple that holds all the input options of the software
inputOptions = ['sync','help']
#Pin declarations
LED1 = 21
LED2 = 20
LED3 = 16
LED4 = 12
LED5 = 7
LED6 = 8
LED7 = 25
LED8 = 24
LED9 = 23
def haveInternet():
os.system("sudo ifup eth0")
REMOTE_SERVER = "www.google.com"
try:
host = socket.gethostbyname(REMOTE_SERVER)
s = socket.create_connection((host, 443))
return True
except:
pass
return False
def syncGithub():
#Update the Repo
if haveInternet():
print (colored.green("=========Internet is ON!==========="))
try:
print "Getting updates from Github"
os.chdir(repoPath)
currentDirectory = os.getcwd()
print "The current directory is: %s" % currentDirectory
os.system("git pull origin master")
except:
print "Something went wrong, check your internet connection"
pass
else:
print (colored.red("=============No internet, no github sync============="))
def manageInputs():
print "Setting the switches to inputs"
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
#Set pull-ups to pins
GPIO.setup(5, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(26, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(19, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(13, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(6, GPIO.IN, pull_up_down=GPIO.PUD_UP)
#Read inputs just one time
input_state_5 = GPIO.input(5)
input_state_26 = GPIO.input(26)
input_state_19 = GPIO.input(19)
input_state_13 = GPIO.input(13)
input_state_6 = GPIO.input(6)
GPIO.setmode(GPIO.BCM)
GPIO.setup(LED1, GPIO.OUT)
GPIO.setup(LED2, GPIO.OUT)
GPIO.setup(LED3, GPIO.OUT)
GPIO.setup(LED4, GPIO.OUT)
GPIO.setup(LED5, GPIO.OUT)
GPIO.setup(LED6, GPIO.OUT)
GPIO.setup(LED7, GPIO.OUT)
GPIO.setup(LED8, GPIO.OUT)
GPIO.setup(LED9, GPIO.OUT)
def turnOnLED(pin):
#print "turning on led: %d" % pin
GPIO.output(pin, GPIO.HIGH)
time.sleep(0.1)
def turnOffLED(pin):
#print "turning off led: %d" % pin
GPIO.output(pin, GPIO.LOW)
time.sleep(0.1)
def turnOffAllLEDs():
GPIO.output(21, GPIO.LOW)
GPIO.output(20, GPIO.LOW)
GPIO.output(16, GPIO.LOW)
GPIO.output(12, GPIO.LOW)
GPIO.output(7, GPIO.LOW)
GPIO.output(8, GPIO.LOW)
GPIO.output(25, GPIO.LOW)
GPIO.output(24, GPIO.LOW)
GPIO.output(23, GPIO.LOW)
def turnOnAllLEDs():
GPIO.output(21, GPIO.HIGH)
GPIO.output(20, GPIO.HIGH)
GPIO.output(16, GPIO.HIGH)
GPIO.output(12, GPIO.HIGH)
GPIO.output(7, GPIO.HIGH)
GPIO.output(8, GPIO.HIGH)
GPIO.output(25, GPIO.HIGH)
GPIO.output(24, GPIO.HIGH)
GPIO.output(23, GPIO.HIGH)
def blinkLED(LED):
turnOnLED(LED)
time.sleep(0.25)
turnOffLED(LED)
def startUpLEDS(times):
#Just a sequence of LEDs to know that the system is running the program
print "Lightning some LEDs..."
for x in range(0,times):
print ". . . . ."
turnOnLED(LED1)
turnOnLED(LED2)
turnOnLED(LED3)
turnOnLED(LED4)
turnOnLED(LED5)
turnOnLED(LED6)
turnOnLED(LED7)
turnOnLED(LED8)
turnOnLED(LED9)
time.sleep(0.2)
turnOffLED(LED9)
turnOffLED(LED8)
turnOffLED(LED7)
turnOffLED(LED6)
turnOffLED(LED5)
turnOffLED(LED4)
turnOffLED(LED3)
turnOffLED(LED2)
turnOffLED(LED1)
#GPIO.cleanup()
def loadBCN3DSigmaSD():
os.chdir(codePath)
startUpLEDS(1)
time.sleep(2)
proc = sub.Popen(['./formatAndCopy.sh', 'Sigma'])
while (proc.returncode == None):
turnOnAllLEDs()
time.sleep(0.5)
turnOffAllLEDs()
proc.poll()
if (proc.returncode != 0):
print (colored.red("*************An error ocurred loading SD's***********"))
turnOffAllLEDs()
else:
print (colored.green("----------------SD's Loaded Successfully!-----------------"))
turnOnAllLEDs()
time.sleep(2) #Sleep for 2 seconds
for x in range(0,5):
turnOnAllLEDs()
time.sleep(0.25)
turnOffAllLEDs()
def loadBCN3DSigmaScreenSD():
os.chdir(codePath)
startUpLEDS(1)
time.sleep(2)
proc = sub.Popen(['./formatAndCopy.sh', 'LCD_Sigma'])
while (proc.returncode == None):
turnOnAllLEDs()
time.sleep(0.5)
turnOffAllLEDs()
proc.poll()
if (proc.returncode != 0):
print (colored.red("*************An error ocurred loading SD's***********"))
turnOffAllLEDs()
else:
print (colored.green("----------------SD's Loaded Successfully!-----------------"))
turnOnAllLEDs()
time.sleep(2) #Sleep for 2 seconds
for x in range(0,5):
turnOnAllLEDs()
time.sleep(0.25)
turnOffAllLEDs()
def loadBCN3DSigmaxSD():
os.chdir(codePath)
startUpLEDS(1)
time.sleep(2)
proc = sub.Popen(['./formatAndCopy.sh', 'Sigmax'])
while (proc.returncode == None):
turnOnAllLEDs()
time.sleep(0.5)
turnOffAllLEDs()
proc.poll()
if (proc.returncode != 0):
print (colored.red("*************An error ocurred loading SD's***********"))
turnOffAllLEDs()
else:
print (colored.green("----------------SD's Loaded Successfully!-----------------"))
turnOnAllLEDs()
time.sleep(2) #Sleep for 2 seconds
for x in range(0,5):
turnOnAllLEDs()
time.sleep(0.25)
turnOffAllLEDs()
def loadBCN3DSigmaxLCDSD():
os.chdir(codePath)
startUpLEDS(1)
time.sleep(2)
proc = sub.Popen(['./formatAndCopy.sh', 'LCD_Sigmax'])
while (proc.returncode == None):
turnOnAllLEDs()
time.sleep(0.5)
turnOffAllLEDs()
proc.poll()
if (proc.returncode != 0):
print (colored.red("*************An error ocurred loading SD's***********"))
turnOffAllLEDs()
else:
print (colored.green("----------------SD's Loaded Successfully!-----------------"))
turnOnAllLEDs()
time.sleep(2) #Sleep for 2 seconds
for x in range(0,5):
turnOnAllLEDs()
time.sleep(0.25)
turnOffAllLEDs()
def printButtonStatus():
print "Switch 1 is set to: %d" % GPIO.input(6)
print "Switch 2 is set to: %d" % GPIO.input(13)
print "Switch 3 is set to: %d" % GPIO.input(19)
print "Switch 4 is set to: %d" % GPIO.input(26)
def checkButtons(channel):
#Read the status of the switches and buttons
try:
print "Reading the switch buttons..."
input_state_26 = GPIO.input(26)
input_state_19 = GPIO.input(19)
input_state_13 = GPIO.input(13)
input_state_6 = GPIO.input(6)
printButtonStatus()
if input_state_26 == False and input_state_19 == True and input_state_13 == True and input_state_6 == True:
print 'Loading BCN3D Sigma SD'
loadBCN3DSigmaSD()
time.sleep(2)
if input_state_26 == True and input_state_19 == False and input_state_13 == True and input_state_6 == True:
print 'Loading BCN3D Sigma Display uSD'
loadBCN3DSigmaScreenSD()
time.sleep(2)
if input_state_26 == True and input_state_19 == True and input_state_13 == False and input_state_6 == True:
print 'Loading BCN3DSigmax SD'
loadBCN3DSigmaxSD()
time.sleep(2)
if input_state_26 == True and input_state_19 == True and input_state_13 == True and input_state_6 == False:
print 'Loading BCN3D Sigmax Display uSD'
loadBCN3DSigmaxLCDSD()
time.sleep(2)
if input_state_26 == False and input_state_19 == False and input_state_13 == False and input_state_6 == False:
turnOffAllLEDs()
for x in range(0,5):
turnOnAllLEDs();
time.sleep(0.25);
turnOffAllLEDs();
print "Powering OFF The system"
GPIO.cleanup()
os.system("sudo poweroff")
#if input_state_26 == True and input_state_19 == True and input_state_13 == True and input_state_6 == False:
except KeyboardInterrupt:
#If we press ctrl + c
print "Program closed by user"
GPIO.cleanup()
sys.exit()
except:
print "Other error or exception ocurred!"
GPIO.cleanup()
sys.exit()
def printHelp():
#function that prints the options available as input commands
try:
print "This are the available options: "
print '\n'
i = 0
for option in inputOptions:
print " %d. %s" % (i,option)
i+=1
print '\n'
print "Use: sudo python sdloader.py [OPTION]"
except KeyboardInterrupt:
#If we press ctrl + c
print "Program closed by user"
GPIO.cleanup()
sys.exit()
#------------------------------------MAIN FLOW-----------------------------
def main():
if len(sys.argv) > 1:
if sys.argv[1] == inputOptions[0]:
syncGithub()
#Only sync then quit
sys.exit()
elif sys.argv[1] == inputOptions[1]:
printHelp()
#When a keyboard is detected, exit program
elif len(sys.argv) > 1 and sys.argv[1] not in inputOptions:
#When input arguments are wrong
print "command/s " + str(sys.argv[1:]) + " not recognised. Please type " + sys.argv[0] + " \"help\" to see commands"
time.sleep(3)
sys.exit()
else:
syncGithub()
manageInputs()
startUpLEDS(3)
#Callback function in PIN 5. Whenever a Falling Edge is detected, run checkButtons function
GPIO.add_event_detect(5, GPIO.FALLING, callback=checkButtons, bouncetime=150)
while True:
time.sleep(0.5)
#print "waiting for the load button..."
#Just the regular boilerplate to start the program
if __name__ == '__main__':
main()
| {
"content_hash": "33629b7f962c80b9ea8bdb7ad6504909",
"timestamp": "",
"source": "github",
"line_count": 335,
"max_line_length": 119,
"avg_line_length": 27.053731343283584,
"alnum_prop": 0.6679907315458458,
"repo_name": "BCN3D/SD-Auto-Loader",
"id": "886af2ef182fd13ab2f76b2a08014785aa15b0f8",
"size": "9490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Code/sdloader.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9490"
},
{
"name": "Shell",
"bytes": "2970"
}
],
"symlink_target": ""
} |
import json
import urllib2
import pymongo
# connect to mongo
connection = pymongo.MongoClient("mongodb://localhost")
# get a handle to the reddit database
db = connection.reddit
stories = db.stories
# get the reddit home page
reddit_page = urllib2.urlopen("http://www.reddit.com/r/technology/.json")
# parse the json
parsed = json.loads(reddit_page.read())
print parsed
# iterate through every news item on the page
for item in parsed['data']['children']:
# # put it in mongo
stories.insert(item['data'])
| {
"content_hash": "af9e7c3ac39a61f4c09077d07c60ac38",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 73,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.7307692307692307,
"repo_name": "fabella/pymongo",
"id": "cad5c6e041d338dbaf266b55e3be8b7b5d52d3c7",
"size": "520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorials/chapter2/read_reddit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "42476"
}
],
"symlink_target": ""
} |
import argparse
import json
from taxonomyschema.request import Requestor
from taxonomyschema.vocabulary import VocabularyEncoder, VocabularyManifest
from requests.exceptions import HTTPError
def run(models_dir, url):
r = Requestor(url)
data = json.dumps(VocabularyManifest(models_dir), cls=VocabularyEncoder)
try:
resp = r.update_service(data)
except HTTPError:
resp_json = resp.json()
if 'error' in resp_json:
print('[ERROR]: API update failed')
for e in resp_json['error']:
print('code: {}'.format(getattr(e, 'code', '')))
print('message: {}'.format(getattr(e, 'message', '')))
print('')
def main():
parser = argparse.ArgumentParser(
description=(
'Updates Taxonomy Service datamodels via API call'
)
)
parser.add_argument(
'models',
type=str,
default='datamodels',
help='path to models directory'
)
parser.add_argument(
'url',
type=str,
help='url of API to POST models to'
)
args = parser.parse_args()
run(models_dir=args.models, url=args.url)
if __name__ == '__main__':
main()
| {
"content_hash": "3f9c0a2743fc4d86d572a93e651aad32",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 76,
"avg_line_length": 25.458333333333332,
"alnum_prop": 0.5900163666121113,
"repo_name": "JiscRDSS/taxonomyschema",
"id": "e0ae9b28891a98b45600f3e854b50823c0f4dcd5",
"size": "1245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taxonomyschema/taxonomyschema.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "944"
},
{
"name": "Python",
"bytes": "10335"
},
{
"name": "Shell",
"bytes": "2428"
}
],
"symlink_target": ""
} |
"""A very simple MNIST classifier.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/beginners
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
FLAGS = None
def main(_):
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x, W) + b
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
# The raw formulation of cross-entropy,
#
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
# reduction_indices=[1]))
#
# can be numerically unstable.
#
# So here we use tf.nn.softmax_cross_entropy_with_logits on the raw
# outputs of 'y', and then average across the batch.
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
# Train
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images,
y_: mnist.test.labels}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) | {
"content_hash": "8b6fa03e843d6051d73ad6f46f446285",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 89,
"avg_line_length": 32.53968253968254,
"alnum_prop": 0.6639024390243903,
"repo_name": "Becavalier/MachineLearning",
"id": "ff560f89c87edc0d21e266cececb4a9b6dd69957",
"size": "2740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TENSORFLOW/MNIST/mnist_softmax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70133"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
from past.builtins import basestring
from past.utils import old_div
import copy
from datetime import datetime, timedelta
import dateutil.parser
from functools import wraps
import inspect
from itertools import chain, product
import json
import logging
import os
import socket
import sys
import time
import traceback
from flask._compat import PY2
from flask import (
Flask, url_for, Markup, Blueprint, redirect,
flash, Response, render_template)
from flask.ext.admin import Admin, BaseView, expose, AdminIndexView
from flask.ext.admin.form import DateTimePickerWidget
from flask.ext.admin import base
from flask.ext.admin.contrib.sqla import ModelView
from flask.ext.cache import Cache
from flask import request
import sqlalchemy as sqla
from wtforms import (
widgets,
Form, DateTimeField, SelectField, TextAreaField, PasswordField, StringField)
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import chartkick
import jinja2
import markdown
from sqlalchemy import or_
import airflow
from airflow import jobs, login, models, settings, utils
from airflow.configuration import conf
from airflow.models import State
from airflow.settings import Session
from airflow.utils import AirflowException
from airflow.www import utils as wwwutils
login_required = login.login_required
current_user = login.current_user
logout_user = login.logout_user
from airflow import default_login as login
if conf.getboolean('webserver', 'AUTHENTICATE'):
try:
# Environment specific login
import airflow_login as login
except ImportError as e:
logging.error(
"authenticate is set to True in airflow.cfg, "
"but airflow_login failed to import %s" % e)
login_required = login.login_required
current_user = login.current_user
logout_user = login.logout_user
AUTHENTICATE = conf.getboolean('webserver', 'AUTHENTICATE')
if AUTHENTICATE is False:
login_required = lambda x: x
FILTER_BY_OWNER = False
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = AUTHENTICATE
class VisiblePasswordInput(widgets.PasswordInput):
def __init__(self, hide_value=False):
self.hide_value = hide_value
class VisiblePasswordField(PasswordField):
widget = VisiblePasswordInput()
def superuser_required(f):
'''
Decorator for views requiring superuser access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.is_superuser())
):
return f(*args, **kwargs)
else:
flash("This page requires superuser privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def data_profiling_required(f):
'''
Decorator for views requiring data profiling access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
def render(obj, lexer):
out = ""
if isinstance(obj, basestring):
out += pygment_html_render(obj, lexer)
elif isinstance(obj, (tuple, list)):
for i, s in enumerate(obj):
out += "<div>List item #{}</div>".format(i)
out += "<div>" + pygment_html_render(s, lexer) + "</div>"
elif isinstance(obj, dict):
for k, v in obj.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += "<div>" + pygment_html_render(v, lexer) + "</div>"
return out
attr_renderer = {
'bash_command': lambda x: render(x, lexers.BashLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: render(
inspect.getsource(x), lexers.PythonLexer),
}
dagbag = models.DagBag(os.path.expanduser(conf.get('core', 'DAGS_FOLDER')))
utils.pessimistic_connection_handling()
app = Flask(__name__)
app.config['SQLALCHEMY_POOL_RECYCLE'] = 3600
app.secret_key = conf.get('webserver', 'SECRET_KEY')
login.login_manager.init_app(app)
cache = Cache(
app=app, config={'CACHE_TYPE': 'filesystem', 'CACHE_DIR': '/tmp'})
# Init for chartkick, the python wrapper for highcharts
ck = Blueprint(
'ck_page', __name__,
static_folder=chartkick.js(), static_url_path='/static')
app.register_blueprint(ck, url_prefix='/ck')
app.jinja_env.add_extension("chartkick.ext.charts")
@app.context_processor
def jinja_globals():
return {
'hostname': socket.gethostname(),
}
class DateTimeForm(Form):
# Date filter form needed for gantt and graph view
execution_date = DateTimeField(
"Execution date", widget=DateTimePickerWidget())
class GraphForm(Form):
execution_date = DateTimeField(
"Execution date", widget=DateTimePickerWidget())
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
class TreeForm(Form):
base_date = DateTimeField(
"Anchor date", widget=DateTimePickerWidget(), default=datetime.now())
num_runs = SelectField("Number of runs", default=25, choices=(
(5, "5"),
(25, "25"),
(50, "50"),
(100, "100"),
(365, "365"),
))
@app.route('/')
def index():
return redirect(url_for('admin.index'))
@app.route('/health')
def health():
""" We can add an array of tests here to check the server's health """
content = Markup(markdown.markdown("The server is healthy!"))
return content
@app.teardown_appcontext
def shutdown_session(exception=None):
settings.Session.remove()
def dag_link(v, c, m, p):
url = url_for(
'airflow.graph',
dag_id=m.dag_id)
return Markup(
'<a href="{url}">{m.dag_id}</a>'.format(**locals()))
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = 50
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
class HomeView(AdminIndexView):
@expose("/")
@login_required
def index(self):
session = Session()
DM = models.DagModel
qry = None
# filter the dags if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
if do_filter:
qry = (
session.query(DM)
.filter(
~DM.is_subdag, DM.is_active,
DM.owners == current_user.username)
.all()
)
else:
qry = session.query(DM).filter(~DM.is_subdag, DM.is_active).all()
orm_dags = {dag.dag_id: dag for dag in qry}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
session.expunge_all()
session.commit()
session.close()
dags = dagbag.dags.values()
if do_filter:
dags = {
dag.dag_id: dag
for dag in dags
if (
dag.owner == current_user.username and (not dag.parent_dag)
)
}
else:
dags = {dag.dag_id: dag for dag in dags if not dag.parent_dag}
all_dag_ids = sorted(set(orm_dags.keys()) | set(dags.keys()))
return self.render(
'airflow/dags.html',
dags=dags,
orm_dags=orm_dags,
all_dag_ids=all_dag_ids)
admin = Admin(
app,
name="Airflow",
index_view=HomeView(name="DAGs"),
template_mode='bootstrap3')
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).first()
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).first()
session.expunge_all()
session.commit()
session.close()
payload = {}
payload['state'] = 'ERROR'
payload['error'] = ''
# Processing templated fields
try:
args = eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
from airflow import macros
args.update(request_dict)
args['macros'] = macros
sql = jinja2.Template(chart.sql).render(**args)
label = jinja2.Template(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
import pandas as pd
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns'and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart_type == "datatable":
chart.show_datatable = True
if chart.show_datatable:
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
except Exception as e:
raise AirflowException(str(e))
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
series = []
colorAxis = None
if chart_type == 'datatable':
payload['data'] = data
payload['state'] = 'SUCCESS'
return Response(
response=json.dumps(
payload, indent=4, cls=utils.AirflowJsonEncoder),
status=200,
mimetype="application/json")
elif chart_type == 'para':
df.rename(columns={
df.columns[0]: 'name',
df.columns[1]: 'group',
}, inplace=True)
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
elif chart_type == 'heatmap':
color_perc_lbound = float(
request.args.get('color_perc_lbound', 0))
color_perc_rbound = float(
request.args.get('color_perc_rbound', 1))
color_scheme = request.args.get('color_scheme', 'blue_red')
if color_scheme == 'blue_red':
stops = [
[color_perc_lbound, '#00D1C1'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#FFFFCC'
],
[color_perc_rbound, '#FF5A5F']
]
elif color_scheme == 'blue_scale':
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_rbound, '#2222FF']
]
elif color_scheme == 'fire':
diff = float(color_perc_rbound - color_perc_lbound)
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_lbound + 0.33*diff, '#FFFF00'],
[color_perc_lbound + 0.66*diff, '#FF0000'],
[color_perc_rbound, '#000000']
]
else:
stops = [
[color_perc_lbound, '#FFFFFF'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#888888'
],
[color_perc_rbound, '#000000'],
]
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
data = []
for row in df.itertuples():
data.append({
'x': row[2],
'y': row[3],
'value': row[4],
})
x_format = '{point.x:%Y-%m-%d}' \
if chart.x_is_date else '{point.x}'
series.append({
'data': data,
'borderWidth': 0,
'colsize': 24 * 36e5,
'turboThreshold': sys.float_info.max,
'tooltip': {
'headerFormat': '',
'pointFormat': (
df.columns[1] + ': ' + x_format + '<br/>' +
df.columns[2] + ': {point.y}<br/>' +
df.columns[3] + ': <b>{point.value}</b>'
),
},
})
colorAxis = {
'stops': stops,
'minColor': '#FFFFFF',
'maxColor': '#000000',
'min': 50,
'max': 2200,
}
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
for col in df.columns:
series.append({
'name': col,
'data': [
(k, df[col][k])
for k in df[col].keys()
if not np.isnan(df[col][k])]
})
series = [serie for serie in sorted(
series, key=lambda s: s['data'][0][1], reverse=True)]
if chart_type == "stacked_area":
stacking = "normal"
chart_type = 'area'
elif chart_type == "percent_area":
stacking = "percent"
chart_type = 'area'
else:
stacking = None
hc = {
'chart': {
'type': chart_type
},
'plotOptions': {
'series': {
'marker': {
'enabled': False
}
},
'area': {'stacking': stacking},
},
'title': {'text': ''},
'xAxis': {
'title': {'text': xaxis_label},
'type': 'datetime' if chart.x_is_date else None,
},
'yAxis': {
'title': {'text': yaxis_label},
},
'colorAxis': colorAxis,
'tooltip': {
'useHTML': True,
'backgroundColor': None,
'borderWidth': 0,
},
'series': series,
}
if chart.y_log_scale:
hc['yAxis']['type'] = 'logarithmic'
hc['yAxis']['minorTickInterval'] = 0.1
if 'min' in hc['yAxis']:
del hc['yAxis']['min']
payload['state'] = 'SUCCESS'
payload['hc'] = hc
payload['data'] = data
payload['request_dict'] = request_dict
return Response(
response=json.dumps(
payload, indent=4, cls=utils.AirflowJsonEncoder),
status=200,
mimetype="application/json")
@expose('/chart')
@data_profiling_required
def chart(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).first()
session.expunge_all()
session.commit()
session.close()
if chart.chart_type == 'para':
return self.render('airflow/para/para.html', chart=chart)
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/highchart.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
@login_required
def dag_stats(self):
states = [
State.SUCCESS,
State.RUNNING,
State.FAILED,
State.UPSTREAM_FAILED,
State.UP_FOR_RETRY,
State.QUEUED,
]
task_ids = []
dag_ids = []
for dag in dagbag.dags.values():
task_ids += dag.task_ids
if not dag.is_subdag:
dag_ids.append(dag.dag_id)
TI = models.TaskInstance
session = Session()
qry = (
session.query(TI.dag_id, TI.state, sqla.func.count(TI.task_id))
.filter(TI.task_id.in_(task_ids))
.filter(TI.dag_id.in_(dag_ids))
.group_by(TI.dag_id, TI.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
session.close()
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return Response(
response=json.dumps(payload, indent=4),
status=200, mimetype="application/json")
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
code = "".join(open(dag.full_filepath, 'r').readlines())
title = dag.filepath
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=socket.gethostname()), 404
@app.errorhandler(500)
def show_traceback(self):
return render_template(
'airflow/traceback.html', info=traceback.format_exc()), 500
@expose('/sandbox')
@login_required
def sandbox(self):
from airflow import configuration
title = "Sandbox Suggested Configuration"
cfg_loc = configuration.AIRFLOW_CONFIG + '.sandbox'
f = open(cfg_loc, 'r')
config = f.read()
f.close()
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
code_html=code_html, title=title, subtitle=cfg_loc)
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/headers')
def headers(self):
d = {k: v for k, v in request.headers}
if hasattr(current_user, 'is_superuser'):
d['is_superuser'] = current_user.is_superuser()
d['data_profiling'] = current_user.data_profiling()
d['is_anonymous'] = current_user.is_anonymous()
d['is_authenticated'] = current_user.is_authenticated()
return Response(
response=json.dumps(d, indent=4),
status=200, mimetype="application/json")
@expose('/login', methods=['GET', 'POST'])
def login(self):
return login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title,)
@expose('/log')
@login_required
def log(self):
BASE_LOG_FOLDER = os.path.expanduser(
conf.get('core', 'BASE_LOG_FOLDER'))
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dag = dagbag.get_dag(dag_id)
log_relative = "/{dag_id}/{task_id}/{execution_date}".format(
**locals())
loc = BASE_LOG_FOLDER + log_relative
loc = loc.format(**locals())
log = ""
TI = models.TaskInstance
session = Session()
dttm = dateutil.parser.parse(execution_date)
ti = session.query(TI).filter(
TI.dag_id == dag_id, TI.task_id == task_id,
TI.execution_date == dttm).first()
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
if ti:
host = ti.hostname
if socket.gethostname() == host:
try:
f = open(loc)
log += "".join(f.readlines())
f.close()
except:
log = "Log file isn't where expected.\n".format(loc)
else:
WORKER_LOG_SERVER_PORT = \
conf.get('celery', 'WORKER_LOG_SERVER_PORT')
url = (
"http://{host}:{WORKER_LOG_SERVER_PORT}/log"
"{log_relative}").format(**locals())
log += "Log file isn't local.\n"
log += "Fetching here: {url}\n".format(**locals())
try:
import requests
log += requests.get(url).text
except:
log += "Failed to fetch log file.".format(**locals())
session.commit()
session.close()
log = log.decode('utf-8') if PY2 else log
title = "Log"
return self.render(
'airflow/ti_code.html',
code=log, dag=dag, title=title, task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
def task(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = dag.get_task(task_id)
task = copy.copy(task)
task.resolve_template_files()
attributes = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
attributes.append((attr_name, str(attr)))
title = "Task Details"
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
return self.render(
'airflow/task.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/action')
@login_required
def action(self):
action = request.args.get('action')
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
if action == "run":
from airflow.executors import DEFAULT_EXECUTOR as executor
from airflow.executors import CeleryExecutor
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
force = request.args.get('force') == "true"
deps = request.args.get('deps') == "true"
ti = models.TaskInstance(task=task, execution_date=execution_date)
executor.start()
executor.queue_task_instance(
ti, force=force, ignore_dependencies=deps)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
elif action == 'clear':
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
else:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to clear:"),
details=details,)
return response
elif action == 'success':
MAX_PERIODS = 1000
# Flagging tasks as successful
session = settings.Session()
task_ids = [task_id]
end_date = ((dag.latest_execution_date or datetime.now())
if future else execution_date)
if 'start_date' in dag.default_args:
start_date = dag.default_args['start_date']
elif dag.start_date:
start_date = dag.start_date
else:
start_date = execution_date
if execution_date < start_date or end_date < start_date:
flash("Selected date before DAG start date", 'error')
return redirect(origin)
start_date = execution_date if not past else start_date
if downstream:
task_ids += [
t.task_id
for t in task.get_flat_relatives(upstream=False)]
if upstream:
task_ids += [
t.task_id
for t in task.get_flat_relatives(upstream=True)]
TI = models.TaskInstance
dates = utils.date_range(start_date, end_date)
tis = session.query(TI).filter(
TI.dag_id == dag_id,
TI.execution_date.in_(dates),
TI.task_id.in_(task_ids)).all()
tis_to_change = session.query(TI).filter(
TI.dag_id == dag_id,
TI.execution_date.in_(dates),
TI.task_id.in_(task_ids),
TI.state != State.SUCCESS).all()
tasks = list(product(task_ids, dates))
tis_to_create = list(
set(tasks) -
set([(ti.task_id, ti.execution_date) for ti in tis]))
tis_all_altered = list(chain(
[(ti.task_id, ti.execution_date) for ti in tis_to_change],
tis_to_create))
if len(tis_all_altered) > MAX_PERIODS:
flash("Too many tasks at once (>{0})".format(
MAX_PERIODS), 'error')
return redirect(origin)
if confirmed:
for ti in tis_to_change:
ti.state = State.SUCCESS
session.commit()
for task_id, task_execution_date in tis_to_create:
ti = TI(
task=dag.get_task(task_id),
execution_date=task_execution_date,
state=State.SUCCESS)
session.add(ti)
session.commit()
session.commit()
session.close()
flash("Marked success on {} task instances".format(
len(tis_all_altered)))
return redirect(origin)
else:
if not tis_all_altered:
flash("No task instances to mark as successful", 'error')
response = redirect(origin)
else:
tis = []
for task_id, task_execution_date in tis_all_altered:
tis.append(TI(
task=dag.get_task(task_id),
execution_date=task_execution_date,
state=State.SUCCESS))
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to mark as successful:"),
details=details,)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
def tree(self):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
session = settings.Session()
start_date = dag.start_date
if not start_date and 'start_date' in dag.default_args:
start_date = dag.default_args['start_date']
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
if not base_date:
# New DAGs will not have a latest execution date
if dag.latest_execution_date:
base_date = dag.latest_execution_date + 2 * dag.schedule_interval
else:
base_date = datetime.now()
else:
base_date = dateutil.parser.parse(base_date)
start_date = dag.start_date
if not start_date and 'start_date' in dag.default_args:
start_date = dag.default_args['start_date']
# if a specific base_date is requested, don't round it
if not request.args.get('base_date'):
if start_date:
base_date = utils.round_time(
base_date, dag.schedule_interval, start_date)
else:
base_date = utils.round_time(base_date, dag.schedule_interval)
form = TreeForm(data={'base_date': base_date, 'num_runs': num_runs})
from_date = (base_date - (num_runs * dag.schedule_interval))
dates = utils.date_range(
from_date, base_date, dag.schedule_interval)
task_instances = {}
for ti in dag.get_task_instances(session, from_date):
task_instances[(ti.task_id, ti.execution_date)] = ti
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
return {
'name': task.task_id,
'instances': [
utils.alchemy_to_dict(
task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
if len(dag.roots) > 1:
# d3 likes a single root
data = {
'name': 'root',
'instances': [],
'children': [recurse_nodes(t, set()) for t in dag.roots]
}
elif len(dag.roots) == 1:
data = recurse_nodes(dag.roots[0], set())
else:
flash("No tasks found.", "error")
data = []
data = json.dumps(data, indent=4, default=utils.json_ser)
session.commit()
session.close()
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
@wwwutils.gzipped
def graph(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
arrange = request.args.get('arrange', "LR")
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = GraphForm(data={'execution_date': dttm, 'arrange': arrange})
task_instances = {
ti.task_id: utils.alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)
}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks
}
if not tasks:
flash("No tasks found", "error")
session.commit()
session.close()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2),)
@expose('/duration')
@login_required
def duration(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
days = int(request.args.get('days', 30))
dag = dagbag.get_dag(dag_id)
from_date = (datetime.today()-timedelta(days)).date()
from_date = datetime.combine(from_date, datetime.min.time())
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, from_date):
if ti.duration:
data.append([
ti.execution_date.isoformat(),
float(ti.duration) / (60*60)
])
if data:
all_data.append({'data': data, 'name': task.task_id})
session.commit()
session.close()
return self.render(
'airflow/chart.html',
dag=dag,
data=all_data,
chart_options={'yAxis': {'title': {'text': 'hours'}}},
height="700px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
)
@expose('/landing_times')
@login_required
def landing_times(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
days = int(request.args.get('days', 30))
dag = dagbag.get_dag(dag_id)
from_date = (datetime.today()-timedelta(days)).date()
from_date = datetime.combine(from_date, datetime.min.time())
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, from_date):
if ti.end_date:
data.append([
ti.execution_date.isoformat(), old_div((
ti.end_date - (
ti.execution_date + task.schedule_interval)
).total_seconds(),(60*60))
])
all_data.append({'data': data, 'name': task.task_id})
session.commit()
session.close()
return self.render(
'airflow/chart.html',
dag=dag,
data=all_data,
height="700px",
chart_options={'yAxis': {'title': {'text': 'hours after 00:00'}}},
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
)
@expose('/paused')
@login_required
def paused(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
def refresh(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = datetime.now()
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect('/')
@expose('/refresh_all')
@login_required
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
def gantt(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti
for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
data = []
for i, ti in enumerate(tis):
end_date = ti.end_date or datetime.now()
tasks += [ti.task_id]
color = State.color(ti.state)
data.append({
'x': i,
'low': int(ti.start_date.strftime('%s')) * 1000,
'high': int(end_date.strftime('%s')) * 1000,
'color': color,
})
height = (len(tis) * 25) + 50
session.commit()
session.close()
hc = {
'chart': {
'type': 'columnrange',
'inverted': True,
'height': height,
},
'xAxis': {'categories': tasks},
'yAxis': {'type': 'datetime'},
'title': {
'text': None
},
'plotOptions': {
'series': {
'cursor': 'pointer',
'minPointLength': 4,
},
},
'legend': {
'enabled': False
},
'series': [{
'data': data
}]
}
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
hc=json.dumps(hc, indent=4),
height=height,
demo_mode=demo_mode,
root=root,
)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
session = settings.Session()
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
admin.add_view(Airflow(name='DAGs'))
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/')
@wwwutils.gzipped
def query(self):
session = settings.Session()
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.args.get('conn_id')
csv = request.args.get('csv') == "true"
sql = request.args.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(
wwwutils.limit_sql(
sql, QUERY_LIMIT, conn_type=db.conn_type))
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes=[
'table', 'table-bordered', 'table-striped', 'no-wrap'],
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
session.close()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
admin.add_view(QueryView(name='Ad Hoc Query', category="Data Profiling"))
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
page_size = 500
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
def log_link(v, c, m, p):
url = url_for(
'airflow.log',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
'<a href="{url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def task_instance_link(v, c, m, p):
url = url_for(
'airflow.task',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=m.dag_id,
root=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{m.task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_f(v, c, m, p):
color = State.color(m.state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{m.state}</span>'.format(**locals()))
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if datetime.now().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
mv = JobModelView(jobs.BaseJob, Session, name="Jobs", category="Browse")
admin.add_view(mv)
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
mv = LogModelView(
models.Log, Session, name="Logs", category="Browse")
admin.add_view(mv)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date')
named_filter_urls = True
column_formatters = dict(
log=log_link, task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
queued_dttm=datetime_f,
dag_id=dag_link, duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('start_date', True)
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'log')
can_delete = True
page_size = 500
mv = TaskInstanceModelView(
models.TaskInstance, Session, name="Task Instances", category="Browse")
admin.add_view(mv)
mv = DagModelView(
models.DagModel, Session, name=None)
admin.add_view(mv)
# Hack to not add this view to the menu
admin._menu = admin._menu[:-1]
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted',)
form_overrides = dict(_password=VisiblePasswordField)
form_widget_args = {
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path' : StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
}
form_choices = {
'conn_type': [
('ftp', 'FTP',),
('hdfs', 'HDFS',),
('http', 'HTTP',),
('hive_cli', 'Hive Client Wrapper',),
('hive_metastore', 'Hive Metastore Thrift',),
('hiveserver2', 'Hive Server 2 Thrift',),
('jdbc', 'Jdbc Connection',),
('mysql', 'MySQL',),
('postgres', 'Postgres',),
('oracle', 'Oracle',),
('vertica', 'Vertica',),
('presto', 'Presto',),
('s3', 'S3',),
('samba', 'Samba',),
('sqlite', 'Sqlite',),
('mssql', 'Microsoft SQL Server'),
('mesos_framework-id', 'Mesos Framework ID'),
]
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc']:
extra = {
key:formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def alert_fernet_key(cls):
return not conf.has_option('core', 'fernet_key')
@classmethod
def is_secure(self):
"""
Used to display a message in the Connection list view making it clear
that the passwords can't be encrypted.
"""
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception as e:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
mv = ConnectionModelView(
models.Connection, Session,
name="Connections", category="Admin")
admin.add_view(mv)
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
mv = UserModelView(models.User, Session, name="Users", category="Admin")
admin.add_view(mv)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
from airflow import configuration
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = configuration.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(configuration.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
else:
config = (
"# You Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle)
admin.add_view(ConfigurationView(name='Configuration', category="Admin"))
def label_link(v, c, m, p):
try:
default_params = eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',)
column_list = (
'label', 'conn_id', 'chart_type', 'owner', 'last_modified',)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('para', 'Parallel Coordinates'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('heatmap', 'Heatmap'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': [
(c.conn_id, c.conn_id)
for c in (
Session().query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if AUTHENTICATE and not model.user_id and current_user:
model.user_id = current_user.id
model.last_modified = datetime.now()
mv = ChartModelView(
models.Chart, Session,
name="Charts", category="Data Profiling")
admin.add_view(mv)
admin.add_link(
base.MenuLink(
category='Docs',
name='Documentation',
url='http://pythonhosted.org/airflow/'))
admin.add_link(
base.MenuLink(
category='Docs',
name='Github',
url='https://github.com/airbnb/airflow'))
class KnowEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description')
column_list = (
'label', 'event_type', 'start_date', 'end_date', 'reported_by')
column_default_sort = ("start_date", True)
mv = KnowEventView(
models.KnownEvent, Session, name="Known Events", category="Data Profiling")
admin.add_view(mv)
class KnowEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
'''
# For debugging / troubleshooting
mv = KnowEventTypeView(
models.KnownEventType,
Session, name="Known Event Types", category="Manage")
admin.add_view(mv)
class DagPickleView(SuperUserMixin, ModelView):
pass
mv = DagPickleView(
models.DagPickle,
Session, name="Pickles", category="Manage")
admin.add_view(mv)
'''
class VariableView(wwwutils.LoginMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
column_list = ('key',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
form_widget_args = {
'val': {
'rows': 20,
}
}
mv = VariableView(
models.Variable, Session, name="Variables", category="Admin")
admin.add_view(mv)
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
mv = PoolModelView(models.Pool, Session, name="Pools", category="Admin")
admin.add_view(mv)
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
mv = SlaMissModelView(
models.SlaMiss, Session, name="SLA Misses", category="Browse")
admin.add_view(mv)
def integrate_plugins():
"""Integrate plugins to the context"""
from airflow.plugins_manager import (
admin_views, flask_blueprints, menu_links)
for v in admin_views:
admin.add_view(v)
for bp in flask_blueprints:
print(bp)
app.register_blueprint(bp)
for ml in menu_links:
admin.add_link(ml)
integrate_plugins()
| {
"content_hash": "e93955228150beccfb4a7d980b64fe14",
"timestamp": "",
"source": "github",
"line_count": 2135,
"max_line_length": 97,
"avg_line_length": 33.78032786885246,
"alnum_prop": 0.5192939643099791,
"repo_name": "jason-z-hang/airflow",
"id": "48126af31a3888d37f0b5b2d190df144273dea7d",
"size": "72121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/www/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36075"
},
{
"name": "HTML",
"bytes": "93272"
},
{
"name": "JavaScript",
"bytes": "895747"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "527428"
},
{
"name": "Shell",
"bytes": "4204"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from rest_framework.response import Response
from django.db.models import Count
from sentry.api.bases import SentryAppsBaseEndpoint
from sentry.models import SentryApp
from sentry.api.permissions import SuperuserPermission
class SentryAppsStatsEndpoint(SentryAppsBaseEndpoint):
permission_classes = (SuperuserPermission,)
def get(self, request):
sentry_apps = (
SentryApp.objects.filter(installations__date_deleted=None)
.annotate(Count("installations"))
.order_by()
)
if "per_page" in request.query_params:
sentry_apps = sentry_apps[: int(request.query_params["per_page"])]
apps = [
{"id": app.id, "slug": app.slug, "name": app.name, "installs": app.installations__count}
for app in sentry_apps
]
return Response(apps)
| {
"content_hash": "d9b044059af8b3e18c233d794d632a8e",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 100,
"avg_line_length": 30.862068965517242,
"alnum_prop": 0.664804469273743,
"repo_name": "beeftornado/sentry",
"id": "82d0332c8e357af6860da21c6d409e7e5ba27e94",
"size": "895",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/api/endpoints/sentry_apps_stats.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
} |
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import inspect
import re
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import is_in_graph_mode
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.tools.docs import doc_controls
# Allow deprecation warnings to be silenced temporarily with a context manager.
_PRINT_DEPRECATION_WARNINGS = True
# Remember which deprecation warnings have been printed already.
_PRINTED_WARNING = {}
class DeprecatedNamesAlreadySet(Exception):
"""Raised when setting deprecated names multiple times for the same symbol."""
pass
def _add_deprecated_function_notice_to_docstring(doc, date, instructions):
"""Adds a deprecation notice to a docstring for deprecated functions."""
main_text = ['THIS FUNCTION IS DEPRECATED. It will be removed %s.' %
('in a future version' if date is None else ('after %s' % date))]
if instructions:
main_text.append('Instructions for updating:')
return decorator_utils.add_notice_to_docstring(
doc, instructions,
'DEPRECATED FUNCTION',
'(deprecated)', main_text)
def _add_deprecated_arg_notice_to_docstring(doc, date, instructions,
deprecated_names):
"""Adds a deprecation notice to a docstring for deprecated arguments."""
deprecation_string = ', '.join(sorted(deprecated_names))
return decorator_utils.add_notice_to_docstring(
doc, instructions, 'DEPRECATED FUNCTION ARGUMENTS',
'(deprecated arguments)', [
'SOME ARGUMENTS ARE DEPRECATED: `(%s)`. '
'They will be removed %s.' %
(deprecation_string, 'in a future version' if date is None else
('after %s' % date)), 'Instructions for updating:'
])
def _add_deprecated_arg_value_notice_to_docstring(doc, date, instructions,
deprecated_name_value_dict):
"""Adds a deprecation notice to a docstring for deprecated arguments."""
deprecation_string = ', '.join(
'%s=%r' % (key, value)
for key, value in sorted(deprecated_name_value_dict.items()))
when = 'in a future version' if date is None else ('after %s' % date)
return decorator_utils.add_notice_to_docstring(
doc, instructions, 'DEPRECATED FUNCTION ARGUMENT VALUES',
'(deprecated argument values)', [
'SOME ARGUMENT VALUES ARE DEPRECATED: `(%s)`. '
'They will be removed %s.' % (deprecation_string, when),
'Instructions for updating:'
])
def _validate_deprecation_args(date, instructions):
if date is not None and not re.match(r'20\d\d-[01]\d-[0123]\d', date):
raise ValueError(f'Date must be in format YYYY-MM-DD. Received: {date}')
if not instructions:
raise ValueError(
'Don\'t deprecate things without conversion instructions! Specify '
'the `instructions` argument.')
def _call_location(outer=False):
"""Returns call location given level up from current call."""
# Two up: <_call_location>, <_call_location's caller>
# tf_inspect is not required here. Please ignore the lint warning by adding
# DISABLE_IMPORT_INSPECT_CHECK=TRUE to your cl description. Using it caused
# test timeouts (b/189384061).
f = inspect.currentframe().f_back.f_back
parent = f.f_back
if outer and parent is not None:
f = parent
return '{}:{}'.format(f.f_code.co_filename, f.f_lineno)
def _safe_eq(a, b):
if a is None or b is None:
return a is None and b is None
return a == b
def _wrap_decorator(wrapped_function):
"""Indicate that one function wraps another.
This decorator wraps a function using `tf_decorator.make_decorator`
so that doc generation scripts can pick up original function
signature.
It would be better to use @functools.wrap decorator, but it would
not update function signature to match wrapped function in Python 2.
Args:
wrapped_function: The function that decorated function wraps.
Returns:
Function that accepts wrapper function as an argument and returns
`TFDecorator` instance.
"""
def wrapper(wrapper_func):
return tf_decorator.make_decorator(wrapped_function, wrapper_func)
return wrapper
def deprecated_alias(deprecated_name, name, func_or_class, warn_once=True):
"""Deprecate a symbol in favor of a new name with identical semantics.
This function is meant to be used when defining a backwards-compatibility
alias for a symbol which has been moved. For example:
module1.py:
```python
class NewNameForClass: pass
```
module2.py:
```python
import module1
DeprecatedNameForClass = deprecated_alias(
deprecated_name='module2.DeprecatedNameForClass',
name='module1.NewNameForClass',
func_or_class=module1.NewNameForClass)
```
This function works for classes and functions.
For classes, it creates a new class which is functionally identical (it
inherits from the original, and overrides its constructor), but which prints
a deprecation warning when an instance is created. It also adds a deprecation
notice to the class' docstring.
For functions, it returns a function wrapped by `tf_decorator.make_decorator`.
That function prints a warning when used, and has a deprecation notice in its
docstring. This is more or less equivalent (the deprecation warning has
slightly different text) to writing:
```python
@deprecated
def deprecated_alias(original_args):
real_function(original_args)
```
Args:
deprecated_name: The name of the symbol that is being deprecated, to be used
in the warning message. This should be its fully qualified name to avoid
confusion.
name: The name of the symbol that is to be used instead of the deprecated
name. This should be a fully qualified name to avoid confusion.
func_or_class: The (non-deprecated) class or function for which a deprecated
alias should be created.
warn_once: If True (the default), only print a deprecation warning the first
time this function is used, or the class is instantiated.
Returns:
A wrapped version of `func_or_class` which prints a deprecation warning on
use and has a modified docstring.
"""
if tf_inspect.isclass(func_or_class):
# Make a new class with __init__ wrapped in a warning.
class _NewClass(func_or_class): # pylint: disable=missing-docstring
__doc__ = decorator_utils.add_notice_to_docstring(
func_or_class.__doc__, 'Please use %s instead.' % name,
'DEPRECATED CLASS',
'(deprecated)', ['THIS CLASS IS DEPRECATED. '
'It will be removed in a future version. '])
__name__ = func_or_class.__name__
__module__ = _call_location(outer=True)
@_wrap_decorator(func_or_class.__init__)
def __init__(self, *args, **kwargs):
if hasattr(_NewClass.__init__, '__func__'):
# Python 2
_NewClass.__init__.__func__.__doc__ = func_or_class.__init__.__doc__
else:
# Python 3
_NewClass.__init__.__doc__ = func_or_class.__init__.__doc__
if _PRINT_DEPRECATION_WARNINGS:
# We're making the alias as we speak. The original may have other
# aliases, so we cannot use it to check for whether it's already been
# warned about.
if _NewClass.__init__ not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[_NewClass.__init__] = True
logging.warning(
'From %s: The name %s is deprecated. Please use %s instead.\n',
_call_location(), deprecated_name, name)
super(_NewClass, self).__init__(*args, **kwargs)
return _NewClass
else:
decorator_utils.validate_callable(func_or_class, 'deprecated')
# Make a wrapper for the original
@functools.wraps(func_or_class)
def new_func(*args, **kwargs): # pylint: disable=missing-docstring
if _PRINT_DEPRECATION_WARNINGS:
# We're making the alias as we speak. The original may have other
# aliases, so we cannot use it to check for whether it's already been
# warned about.
if new_func not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[new_func] = True
logging.warning(
'From %s: The name %s is deprecated. Please use %s instead.\n',
_call_location(), deprecated_name, name)
return func_or_class(*args, **kwargs)
return tf_decorator.make_decorator(
func_or_class, new_func, 'deprecated',
_add_deprecated_function_notice_to_docstring(
func_or_class.__doc__, None, 'Please use %s instead.' % name))
def deprecated_endpoints(*args):
"""Decorator for marking endpoints deprecated.
This decorator does not print deprecation messages.
TODO(annarev): eventually start printing deprecation warnings when
@deprecation_endpoints decorator is added.
Args:
*args: Deprecated endpoint names.
Returns:
A function that takes symbol as an argument and adds
_tf_deprecated_api_names to that symbol.
_tf_deprecated_api_names would be set to a list of deprecated
endpoint names for the symbol.
"""
def deprecated_wrapper(func):
# pylint: disable=protected-access
if '_tf_deprecated_api_names' in func.__dict__:
raise DeprecatedNamesAlreadySet(
f'Cannot set deprecated names for {func.__name__} to {args}. '
'Deprecated names are already set to '
f'{func._tf_deprecated_api_names}.')
func._tf_deprecated_api_names = args
# pylint: disable=protected-access
return func
return deprecated_wrapper
def deprecated(date, instructions, warn_once=True):
"""Decorator for marking functions or methods deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called. It has the following format:
<function> (from <module>) is deprecated and will be removed after <date>.
Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (deprecated)' is appended
to the first line of the docstring and a deprecation notice is prepended
to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None.
instructions: String. Instructions on how to update code using the
deprecated function.
warn_once: Boolean. Set to `True` to warn only the first time the decorated
function is called. Otherwise, every call will log a warning.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, or instructions are
empty.
"""
_validate_deprecation_args(date, instructions)
def deprecated_wrapper(func_or_class):
"""Deprecation wrapper."""
if isinstance(func_or_class, type):
# If a class is deprecated, you actually want to wrap the constructor.
cls = func_or_class
if cls.__new__ is object.__new__:
func = cls.__init__
constructor_name = '__init__'
else:
func = cls.__new__
constructor_name = '__new__'
else:
cls = None
constructor_name = None
func = func_or_class
decorator_utils.validate_callable(func, 'deprecated')
@functools.wraps(func)
def new_func(*args, **kwargs): # pylint: disable=missing-docstring
if _PRINT_DEPRECATION_WARNINGS:
if func not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[func] = True
logging.warning(
'From %s: %s (from %s) is deprecated and will be removed %s.\n'
'Instructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
doc_controls.set_deprecated(new_func)
new_func = tf_decorator.make_decorator(
func, new_func, 'deprecated',
_add_deprecated_function_notice_to_docstring(func.__doc__, date,
instructions))
if cls is None:
return new_func
else:
# Insert the wrapped function as the constructor
setattr(cls, constructor_name, new_func)
# And update the docstring of the class.
cls.__doc__ = _add_deprecated_function_notice_to_docstring(
cls.__doc__, date, instructions)
return cls
return deprecated_wrapper
DeprecatedArgSpec = collections.namedtuple(
'DeprecatedArgSpec', ['position', 'has_ok_value', 'ok_value'])
def deprecated_args(date, instructions, *deprecated_arg_names_or_tuples,
**kwargs):
"""Decorator for marking specific function arguments as deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called with the deprecated argument. It has the following format:
Calling <function> (from <module>) with <arg> is deprecated and will be
removed after <date>. Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> includes the class name if it is a method.
It also edits the docstring of the function: ' (deprecated arguments)' is
appended to the first line of the docstring and a deprecation notice is
prepended to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None.
instructions: String. Instructions on how to update code using the
deprecated function.
*deprecated_arg_names_or_tuples: String or 2-Tuple (String,
ok_val). The string is the deprecated argument name.
Optionally, an ok-value may be provided. If the user provided
argument equals this value, the warning is suppressed.
**kwargs: If `warn_once=False` is passed, every call with a deprecated
argument will log a warning. The default behavior is to only warn the
first time the function is called with any given deprecated argument.
All other kwargs raise `ValueError`.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, instructions are
empty, the deprecated arguments are not present in the function
signature, the second element of a deprecated_tuple is not a
list, or if a kwarg other than `warn_once` is passed.
"""
_validate_deprecation_args(date, instructions)
if not deprecated_arg_names_or_tuples:
raise ValueError('Specify which argument is deprecated.')
if kwargs and list(kwargs.keys()) != ['warn_once']:
kwargs.pop('warn_once', None)
raise ValueError(f'Illegal argument passed to deprecated_args: {kwargs}')
warn_once = kwargs.get('warn_once', True)
def _get_arg_names_to_ok_vals():
"""Returns a dict mapping arg_name to DeprecatedArgSpec w/o position."""
d = {}
for name_or_tuple in deprecated_arg_names_or_tuples:
if isinstance(name_or_tuple, tuple):
d[name_or_tuple[0]] = DeprecatedArgSpec(-1, True, name_or_tuple[1])
else:
d[name_or_tuple] = DeprecatedArgSpec(-1, False, None)
return d
def _get_deprecated_positional_arguments(names_to_ok_vals, arg_spec):
"""Builds a dictionary from deprecated arguments to their spec.
Returned dict is keyed by argument name.
Each value is a DeprecatedArgSpec with the following fields:
position: The zero-based argument position of the argument
within the signature. None if the argument isn't found in
the signature.
ok_values: Values of this argument for which warning will be
suppressed.
Args:
names_to_ok_vals: dict from string arg_name to a list of values,
possibly empty, which should not elicit a warning.
arg_spec: Output from tf_inspect.getfullargspec on the called function.
Returns:
Dictionary from arg_name to DeprecatedArgSpec.
"""
# Extract argument list
arg_space = arg_spec.args + arg_spec.kwonlyargs
arg_name_to_pos = {
name: pos for pos, name in enumerate(arg_space)}
deprecated_positional_args = {}
for arg_name, spec in iter(names_to_ok_vals.items()):
if arg_name in arg_name_to_pos:
pos = arg_name_to_pos[arg_name]
deprecated_positional_args[arg_name] = DeprecatedArgSpec(
pos, spec.has_ok_value, spec.ok_value)
return deprecated_positional_args
deprecated_arg_names = _get_arg_names_to_ok_vals()
def deprecated_wrapper(func):
"""Deprecation decorator."""
decorator_utils.validate_callable(func, 'deprecated_args')
arg_spec = tf_inspect.getfullargspec(func)
deprecated_positions = _get_deprecated_positional_arguments(
deprecated_arg_names, arg_spec)
is_varargs_deprecated = arg_spec.varargs in deprecated_arg_names
is_kwargs_deprecated = arg_spec.varkw in deprecated_arg_names
if (len(deprecated_positions) + is_varargs_deprecated
+ is_kwargs_deprecated
!= len(deprecated_arg_names_or_tuples)):
known_args = (arg_spec.args
+ arg_spec.kwonlyargs
+ [arg_spec.varargs, arg_spec.varkw])
missing_args = [arg_name for arg_name in deprecated_arg_names
if arg_name not in known_args]
raise ValueError('The following deprecated arguments are not present '
f'in the function signature: {missing_args}. '
'Expected arguments from the following list: '
f'{known_args}.')
def _same_value(a, b):
"""A comparison operation that works for multiple object types.
Returns True for two empty lists, two numeric values with the
same value, etc.
Returns False for (pd.DataFrame, None), and other pairs which
should not be considered equivalent.
Args:
a: value one of the comparison.
b: value two of the comparison.
Returns:
A boolean indicating whether the two inputs are the same value
for the purposes of deprecation.
"""
if a is b:
return True
try:
equality = a == b
if isinstance(equality, bool):
return equality
except TypeError:
return False
return False
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Deprecation wrapper."""
# TODO(apassos) figure out a way to have reasonable performance with
# deprecation warnings and eager mode.
if is_in_graph_mode.IS_IN_GRAPH_MODE() and _PRINT_DEPRECATION_WARNINGS:
invalid_args = []
named_args = tf_inspect.getcallargs(func, *args, **kwargs)
for arg_name, spec in iter(deprecated_positions.items()):
if (spec.position < len(args) and
not (spec.has_ok_value and
_same_value(named_args[arg_name], spec.ok_value))):
invalid_args.append(arg_name)
if is_varargs_deprecated and len(args) > len(arg_spec.args):
invalid_args.append(arg_spec.varargs)
if is_kwargs_deprecated and kwargs:
invalid_args.append(arg_spec.varkw)
for arg_name in deprecated_arg_names:
if (arg_name in kwargs and
not (deprecated_positions[arg_name].has_ok_value and
_same_value(named_args[arg_name],
deprecated_positions[arg_name].ok_value))):
invalid_args.append(arg_name)
for arg_name in invalid_args:
if (func, arg_name) not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[(func, arg_name)] = True
logging.warning(
'From %s: calling %s (from %s) with %s is deprecated and will '
'be removed %s.\nInstructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__, arg_name,
'in a future version' if date is None else ('after %s' % date),
instructions)
return func(*args, **kwargs)
doc = _add_deprecated_arg_notice_to_docstring(
func.__doc__, date, instructions, sorted(deprecated_arg_names.keys()))
return tf_decorator.make_decorator(func, new_func, 'deprecated', doc)
return deprecated_wrapper
def deprecated_arg_values(date, instructions, warn_once=True,
**deprecated_kwargs):
"""Decorator for marking specific function argument values as deprecated.
This decorator logs a deprecation warning whenever the decorated function is
called with the deprecated argument values. It has the following format:
Calling <function> (from <module>) with <arg>=<value> is deprecated and
will be removed after <date>. Instructions for updating:
<instructions>
If `date` is None, 'after <date>' is replaced with 'in a future version'.
<function> will include the class name if it is a method.
It also edits the docstring of the function: ' (deprecated arguments)' is
appended to the first line of the docstring and a deprecation notice is
prepended to the rest of the docstring.
Args:
date: String or None. The date the function is scheduled to be removed.
Must be ISO 8601 (YYYY-MM-DD), or None
instructions: String. Instructions on how to update code using the
deprecated function.
warn_once: If `True`, warn only the first time this function is called with
deprecated argument values. Otherwise, every call (with a deprecated
argument value) will log a warning.
**deprecated_kwargs: The deprecated argument values.
Returns:
Decorated function or method.
Raises:
ValueError: If date is not None or in ISO 8601 format, or instructions are
empty.
"""
_validate_deprecation_args(date, instructions)
if not deprecated_kwargs:
raise ValueError('Specify which argument values are deprecated.')
def deprecated_wrapper(func):
"""Deprecation decorator."""
decorator_utils.validate_callable(func, 'deprecated_arg_values')
@functools.wraps(func)
def new_func(*args, **kwargs):
"""Deprecation wrapper."""
if _PRINT_DEPRECATION_WARNINGS:
named_args = tf_inspect.getcallargs(func, *args, **kwargs)
for arg_name, arg_value in deprecated_kwargs.items():
if arg_name in named_args and _safe_eq(named_args[arg_name],
arg_value):
if (func, arg_name) not in _PRINTED_WARNING:
if warn_once:
_PRINTED_WARNING[(func, arg_name)] = True
logging.warning(
'From %s: calling %s (from %s) with %s=%s is deprecated and '
'will be removed %s.\nInstructions for updating:\n%s',
_call_location(), decorator_utils.get_qualified_name(func),
func.__module__, arg_name, arg_value, 'in a future version'
if date is None else ('after %s' % date), instructions)
return func(*args, **kwargs)
doc = _add_deprecated_arg_value_notice_to_docstring(
func.__doc__, date, instructions, deprecated_kwargs)
return tf_decorator.make_decorator(func, new_func, 'deprecated', doc)
return deprecated_wrapper
def deprecated_argument_lookup(new_name, new_value, old_name, old_value):
"""Looks up deprecated argument name and ensures both are not used.
Args:
new_name: new name of argument
new_value: value of new argument (or None if not used)
old_name: old name of argument
old_value: value of old argument (or None if not used)
Returns:
The effective argument that should be used.
Raises:
ValueError: if new_value and old_value are both non-null
"""
if old_value is not None:
if new_value is not None:
raise ValueError(f"Cannot specify both '{old_name}' and '{new_name}'.")
return old_value
return new_value
def rewrite_argument_docstring(old_doc, old_argument, new_argument):
return old_doc.replace('`%s`' % old_argument, '`%s`' % new_argument).replace(
'%s:' % old_argument, '%s:' % new_argument)
@tf_contextlib.contextmanager
def silence():
"""Temporarily silence deprecation warnings."""
global _PRINT_DEPRECATION_WARNINGS
print_deprecation_warnings = _PRINT_DEPRECATION_WARNINGS
_PRINT_DEPRECATION_WARNINGS = False
yield
_PRINT_DEPRECATION_WARNINGS = print_deprecation_warnings
class HiddenTfApiAttribute(property):
"""Hides a class attribute from the public API.
Attributes in public classes can be hidden from the API by having an '_' in
front of the name (e.g. ClassName._variables). This doesn't work when
attributes or methods are inherited from a parent class. To hide inherited
attributes, set their values to be `deprecation.hide_attribute_from_api`.
For example, this is used in V2 Estimator to hide the deprecated
export_savedmodel method:
class EstimatorV2(Estimator):
export_savedmodel = deprecation.hide_attribute_from_api('...')
"""
def __init__(self, deprecation_message):
def raise_error(unused_self):
raise AttributeError(deprecation_message)
super(HiddenTfApiAttribute, self).__init__(raise_error)
hide_attribute_from_api = HiddenTfApiAttribute # pylint: disable=invalid-name
# TODO(kathywu): Remove once cl/246395236 is submitted.
HIDDEN_ATTRIBUTE = HiddenTfApiAttribute('This attribute has been deprecated.')
| {
"content_hash": "d6e7d74b69c6417403813edc04f218dc",
"timestamp": "",
"source": "github",
"line_count": 673,
"max_line_length": 80,
"avg_line_length": 38.690936106983656,
"alnum_prop": 0.6668842889511886,
"repo_name": "frreiss/tensorflow-fred",
"id": "202460272e402685e74f5be4c3a40d5a67d933cd",
"size": "26729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/util/deprecation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6729"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "871761"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "79093233"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "110545"
},
{
"name": "Go",
"bytes": "1852128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1644156"
},
{
"name": "Makefile",
"bytes": "62398"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "303063"
},
{
"name": "PHP",
"bytes": "20523"
},
{
"name": "Pascal",
"bytes": "3982"
},
{
"name": "Pawn",
"bytes": "18876"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "40003007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "Shell",
"bytes": "681596"
},
{
"name": "Smarty",
"bytes": "34740"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from openerp import models, fields, api
class BarcodeEventsMixin(models.AbstractModel):
""" Mixin class for objects reacting when a barcode is scanned in their form views
which contains `<field name="_barcode_scanned" widget="barcode_handler"/>`.
Models using this mixin must implement the method on_barcode_scanned. It works
like an onchange and receives the scanned barcode in parameter.
"""
_name = 'barcodes.barcode_events_mixin'
_barcode_scanned = fields.Char("Barcode Scanned", help="Value of the last barcode scanned.", store=False)
@api.onchange('_barcode_scanned')
def _on_barcode_scanned(self):
barcode = self._barcode_scanned
if barcode:
self._barcode_scanned = ""
return self.on_barcode_scanned(barcode)
def on_barcode_scanned(self, barcode):
raise NotImplementedError("In order to use barcodes.barcode_events_mixin, method on_barcode_scanned must be implemented")
| {
"content_hash": "a1d051a25e4d27fa1f1b438703f32544",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 129,
"avg_line_length": 44.63636363636363,
"alnum_prop": 0.6995926680244399,
"repo_name": "vileopratama/vitech",
"id": "383e622c0a1693bfa4254f711d3040d7acd37150",
"size": "1007",
"binary": false,
"copies": "45",
"ref": "refs/heads/master",
"path": "src/addons/barcodes/models/barcode_events_mixin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
} |
"""Tests for the Find flow."""
from grr.client import vfs
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import flags
from grr.lib import test_lib
from grr.lib import type_info
from grr.lib import utils
# pylint: disable=unused-import
from grr.lib.flows.general import find
# pylint: enable=unused-import
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import paths as rdf_paths
class TestFindFlow(test_lib.FlowTestsBaseclass):
"""Test the interrogate flow."""
def setUp(self):
super(TestFindFlow, self).setUp()
# Install the mock
vfs_type = rdf_paths.PathSpec.PathType.OS
vfs.VFS_HANDLERS[vfs_type] = test_lib.ClientVFSHandlerFixture
def testInvalidFindSpec(self):
"""Test that its impossible to produce an invalid findspec."""
# The regular expression is not valid.
self.assertRaises(type_info.TypeValueError, rdf_client.FindSpec,
path_regex="[")
def testFindFiles(self):
"""Test that the Find flow works with files."""
client_mock = action_mocks.ActionMock("Find")
output_path = "analysis/FindFlowTest1"
# Prepare a findspec.
findspec = rdf_client.FindSpec(
path_regex="bash",
pathspec=rdf_paths.PathSpec(
path="/", pathtype=rdf_paths.PathSpec.PathType.OS))
for _ in test_lib.TestFlowHelper(
"FindFiles", client_mock, client_id=self.client_id,
token=self.token, output=output_path, findspec=findspec):
pass
# Check the output file is created
fd = aff4.FACTORY.Open(self.client_id.Add(output_path), token=self.token)
# Should match ["bash" and "rbash"].
matches = set([x.aff4path.Basename() for x in fd])
self.assertEqual(sorted(matches), ["bash", "rbash"])
self.assertEqual(len(fd), 4)
for child in fd:
path = utils.SmartStr(child.aff4path)
self.assertTrue(path.endswith("bash"))
self.assertEqual(child.__class__.__name__, "StatEntry")
def testFindFilesWithGlob(self):
"""Test that the Find flow works with glob."""
client_mock = action_mocks.ActionMock("Find")
output_path = "analysis/FindFlowTest1"
# Prepare a findspec.
findspec = rdf_client.FindSpec(
path_glob="bash*",
pathspec=rdf_paths.PathSpec(
path="/", pathtype=rdf_paths.PathSpec.PathType.OS))
for _ in test_lib.TestFlowHelper(
"FindFiles", client_mock, client_id=self.client_id,
token=self.token, output=output_path, findspec=findspec):
pass
# Check the output file is created
fd = aff4.FACTORY.Open(self.client_id.Add(output_path), token=self.token)
# Make sure that bash is a file.
matches = set([x.aff4path.Basename() for x in fd])
self.assertEqual(sorted(matches), ["bash"])
self.assertEqual(len(fd), 2)
for child in fd:
path = utils.SmartStr(child.aff4path)
self.assertTrue(path.endswith("bash"))
self.assertEqual(child.__class__.__name__, "StatEntry")
def testFindDirectories(self):
"""Test that the Find flow works with directories."""
client_mock = action_mocks.ActionMock("Find")
output_path = "analysis/FindFlowTest2"
# Prepare a findspec.
findspec = rdf_client.FindSpec(
path_regex="bin",
pathspec=rdf_paths.PathSpec(path="/",
pathtype=rdf_paths.PathSpec.PathType.OS))
for _ in test_lib.TestFlowHelper(
"FindFiles", client_mock, client_id=self.client_id,
token=self.token, output=output_path, findspec=findspec):
pass
# Check the output file is created
fd = aff4.FACTORY.Open(self.client_id.Add(output_path), token=self.token)
# Make sure that bin is a directory
self.assertEqual(len(fd), 2)
for child in fd:
path = utils.SmartStr(child.aff4path)
self.assertTrue("bin" in path)
self.assertEqual(child.__class__.__name__, "StatEntry")
def testFindWithMaxFiles(self):
"""Test that the Find flow works when specifying proto directly."""
client_mock = action_mocks.ActionMock("Find")
output_path = "analysis/FindFlowTest4"
# Prepare a findspec.
findspec = rdf_client.FindSpec(
path_regex=".*",
pathspec=rdf_paths.PathSpec(path="/",
pathtype=rdf_paths.PathSpec.PathType.OS))
for _ in test_lib.TestFlowHelper(
"FindFiles", client_mock, client_id=self.client_id, token=self.token,
findspec=findspec, iteration_count=3, output=output_path,
max_results=7):
pass
# Check the output file is created
fd = aff4.FACTORY.Open(self.client_id.Add(output_path), token=self.token)
# Make sure we got the right number of results.
self.assertEqual(len(fd), 7)
def testCollectionOverwriting(self):
"""Test we overwrite the collection every time the flow is executed."""
client_mock = action_mocks.ActionMock("Find")
output_path = "analysis/FindFlowTest5"
# Prepare a findspec.
findspec = rdf_client.FindSpec()
findspec.path_regex = "bin"
findspec.pathspec.path = "/"
findspec.pathspec.pathtype = rdf_paths.PathSpec.PathType.OS
for _ in test_lib.TestFlowHelper(
"FindFiles", client_mock, client_id=self.client_id, token=self.token,
findspec=findspec, output=output_path):
pass
# Check the output file with the right number of results.
fd = aff4.FACTORY.Open(self.client_id.Add(output_path),
token=self.token)
self.assertEqual(len(fd), 2)
# Now find a new result, should overwrite the collection
findspec.path_regex = "dd"
for _ in test_lib.TestFlowHelper(
"FindFiles", client_mock, client_id=self.client_id, token=self.token,
findspec=findspec, output=output_path, max_results=1):
pass
fd = aff4.FACTORY.Open(self.client_id.Add(output_path),
token=self.token)
self.assertEqual(len(fd), 1)
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| {
"content_hash": "5cb57abe4d2cd08bc540cbffd5d54699",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 77,
"avg_line_length": 33.85,
"alnum_prop": 0.6617429837518464,
"repo_name": "pchaigno/grr",
"id": "70cee94fe6fed38f23267187e9dbd9682957945e",
"size": "6156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/flows/general/find_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "14896"
},
{
"name": "C",
"bytes": "10598"
},
{
"name": "C++",
"bytes": "276081"
},
{
"name": "CMake",
"bytes": "3044"
},
{
"name": "CSS",
"bytes": "12677"
},
{
"name": "Groff",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "71587"
},
{
"name": "JavaScript",
"bytes": "228300"
},
{
"name": "Makefile",
"bytes": "6232"
},
{
"name": "Protocol Buffer",
"bytes": "197889"
},
{
"name": "Python",
"bytes": "5172085"
},
{
"name": "Ruby",
"bytes": "5103"
},
{
"name": "Shell",
"bytes": "43112"
},
{
"name": "Standard ML",
"bytes": "8172"
}
],
"symlink_target": ""
} |
from approvaltests import verify
from approval_utilities.utilities.markdown_table import MarkdownTable
def test_markdown_table():
# begin-snippet: markdown_table_example
inputs = ["verify json", "verify all", "verify parameters", "verify as json"]
table = MarkdownTable.with_headers(
"Input", "Camel Case", "Snake Case", "Kebab Case"
)
table.add_rows_for_inputs(inputs, to_camel_case, to_snake_case, to_kebab_case)
verify(table)
# end-snippet
def to_camel_case(text: str) -> str:
words = text.split()
output = ""
for w in words:
output += w[0].capitalize() + w[1:]
return output[0].lower() + output[1:]
def to_snake_case(text: str) -> str:
return text.lower().replace(" ", "_")
def to_kebab_case(text: str) -> str:
return text.lower().replace(" ", "-")
| {
"content_hash": "b654b3d6844b61bd7f934e3e3a9c2681",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 82,
"avg_line_length": 28.586206896551722,
"alnum_prop": 0.6393244873341375,
"repo_name": "approvals/ApprovalTests.Python",
"id": "81df0631ba363b0fbe01e0e37e05a8eb79f4f341",
"size": "829",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/utilities/test_markdown_table.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "353"
},
{
"name": "HTML",
"bytes": "187"
},
{
"name": "PowerShell",
"bytes": "1309"
},
{
"name": "Python",
"bytes": "173218"
},
{
"name": "Shell",
"bytes": "34"
}
],
"symlink_target": ""
} |
'''
Copyright 2015 University of Auckland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from PySide import QtCore, QtGui
from numbers import Number
from opencmiss.zinc.field import Field
from opencmiss.zinc.element import Element
from opencmiss.zinc.glyph import Glyph
from opencmiss.zinc.graphics import Graphics, GraphicsStreamlines, Graphicslineattributes
from opencmiss.zinc.scenecoordinatesystem import SCENECOORDINATESYSTEM_LOCAL
from opencmiss.zinc.spectrum import Spectrum
from opencmiss.zinc.status import OK as ZINC_OK
from opencmiss.neon.core.neonlogger import NeonLogger
from opencmiss.neon.ui.zincwidgets.fieldconditions import *
from opencmiss.neon.ui.zincwidgets.ui_graphicseditorwidget import Ui_GraphicsEditorWidget
STRING_FLOAT_FORMAT = '{:.5g}'
class GraphicsEditorWidget(QtGui.QWidget):
def __init__(self, parent=None):
'''
Call the super class init functions
'''
QtGui.QWidget.__init__(self, parent)
self._graphics = None
# Using composition to include the visual element of the GUI.
self.ui = Ui_GraphicsEditorWidget()
self.ui.setupUi(self)
# base graphics attributes
self.ui.coordinate_field_chooser.setNullObjectName('-')
self.ui.coordinate_field_chooser.setConditional(FieldIsCoordinateCapable)
self.ui.data_field_chooser.setNullObjectName('-')
self.ui.data_field_chooser.setConditional(FieldIsRealValued)
self.ui.spectrum_chooser.setNullObjectName('-')
# contours
self.ui.isoscalar_field_chooser.setNullObjectName('- choose -')
self.ui.isoscalar_field_chooser.setConditional(FieldIsScalar)
# streamlines
self.ui.stream_vector_field_chooser.setNullObjectName('- choose -')
self.ui.stream_vector_field_chooser.setConditional(FieldIsStreamVectorCapable)
# line attributes
self.ui.line_orientation_scale_field_chooser.setNullObjectName('-')
self.ui.line_orientation_scale_field_chooser.setConditional(FieldIsScalar)
# point attributes
self.ui.glyph_chooser.setNullObjectName('-')
self.ui.point_orientation_scale_field_chooser.setNullObjectName('-')
self.ui.point_orientation_scale_field_chooser.setConditional(FieldIsOrientationScaleCapable)
self.ui.label_field_chooser.setNullObjectName('-')
def _updateWidgets(self):
# base graphics attributes
coordinateField = None
material = None
dataField = None
spectrum = None
tessellation = None
isExterior = False
isWireframe = False
pointattributes = None
lineattributes = None
samplingattributes = None
contours = None
streamlines = None
if self._graphics:
coordinateField = self._graphics.getCoordinateField()
material = self._graphics.getMaterial()
dataField = self._graphics.getDataField()
spectrum = self._graphics.getSpectrum()
tessellation = self._graphics.getTessellation()
isExterior = self._graphics.isExterior()
isWireframe = self._graphics.getRenderPolygonMode() == Graphics.RENDER_POLYGON_MODE_WIREFRAME
contours = self._graphics.castContours()
streamlines = self._graphics.castStreamlines()
pointattributes = self._graphics.getGraphicspointattributes()
lineattributes = self._graphics.getGraphicslineattributes()
samplingattributes = self._graphics.getGraphicssamplingattributes()
self.ui.general_groupbox.show()
else:
self.ui.general_groupbox.hide()
self.ui.coordinate_field_chooser.setField(coordinateField)
self._scenecoordinatesystemDisplay()
self.ui.material_chooser.setMaterial(material)
self.ui.data_field_chooser.setField(dataField)
self.ui.spectrum_chooser.setSpectrum(spectrum)
self.ui.tessellation_chooser.setTessellation(tessellation)
self.ui.exterior_checkbox.setCheckState(QtCore.Qt.Checked if isExterior else QtCore.Qt.Unchecked)
self._faceDisplay()
self.ui.wireframe_checkbox.setCheckState(QtCore.Qt.Checked if isWireframe else QtCore.Qt.Unchecked)
# contours
isoscalarField = None
if contours and contours.isValid():
isoscalarField = contours.getIsoscalarField()
self.ui.contours_groupbox.show()
else:
self.ui.contours_groupbox.hide()
self.ui.isoscalar_field_chooser.setField(isoscalarField)
self._isovaluesDisplay()
# streamlines
streamVectorField = None
if streamlines and streamlines.isValid():
streamVectorField = streamlines.getStreamVectorField()
self.ui.streamlines_groupbox.show()
else:
self.ui.streamlines_groupbox.hide()
self.ui.stream_vector_field_chooser.setField(streamVectorField)
self._streamlinesTrackLengthDisplay()
self._streamlinesTrackDirectionDisplay()
self._streamlinesColourDataTypeDisplay()
# line attributes
lineOrientationScaleField = None
if lineattributes and lineattributes.isValid():
lineOrientationScaleField = lineattributes.getOrientationScaleField()
self.ui.lines_groupbox.show()
else:
self.ui.lines_groupbox.hide()
self._lineShapeDisplay()
self._lineBaseSizeDisplay()
self.ui.line_orientation_scale_field_chooser.setField(lineOrientationScaleField)
self._lineScaleFactorsDisplay()
isStreamline = (streamlines is not None) and streamlines.isValid()
if not isStreamline:
isStreamline = False
model = self.ui.line_shape_combobox.model()
model.item(1, 0).setEnabled(isStreamline)
model.item(3, 0).setEnabled(isStreamline)
self.ui.line_orientation_scale_field_label.setEnabled(not isStreamline)
self.ui.line_orientation_scale_field_chooser.setEnabled(not isStreamline)
self.ui.line_scale_factors_label.setEnabled(not isStreamline)
self.ui.line_scale_factors_lineedit.setEnabled(not isStreamline)
# point attributes
glyph = None
pointOrientationScaleField = None
labelField = None
if pointattributes and pointattributes.isValid():
glyph = pointattributes.getGlyph()
pointOrientationScaleField = pointattributes.getOrientationScaleField()
labelField = pointattributes.getLabelField()
self.ui.points_groupbox.show()
else:
self.ui.points_groupbox.hide()
self.ui.glyph_chooser.setGlyph(glyph)
self._pointBaseSizeDisplay()
self.ui.point_orientation_scale_field_chooser.setField(pointOrientationScaleField)
self._pointScaleFactorsDisplay()
self.ui.label_field_chooser.setField(labelField)
# sampling attributes
if samplingattributes and samplingattributes.isValid():
self.ui.sampling_groupbox.show()
else:
self.ui.sampling_groupbox.hide()
self._samplingModeDisplay()
def setScene(self, scene):
'''
Set when scene changes to initialised widgets dependent on scene
'''
self.ui.material_chooser.setMaterialmodule(scene.getMaterialmodule())
self.ui.glyph_chooser.setGlyphmodule(scene.getGlyphmodule())
self.ui.spectrum_chooser.setSpectrummodule(scene.getSpectrummodule())
self.ui.tessellation_chooser.setTessellationmodule(scene.getTessellationmodule())
region = scene.getRegion()
self.ui.coordinate_field_chooser.setRegion(region)
self.ui.data_field_chooser.setRegion(region)
self.ui.isoscalar_field_chooser.setRegion(region)
self.ui.stream_vector_field_chooser.setRegion(region)
self.ui.point_orientation_scale_field_chooser.setRegion(region)
self.ui.label_field_chooser.setRegion(region)
self.ui.line_orientation_scale_field_chooser.setRegion(region)
def getGraphics(self):
'''
Get the graphics currently in the editor
'''
return self._graphics
def setGraphics(self, graphics):
'''
Set the graphics to be edited
'''
if graphics and graphics.isValid():
self._graphics = graphics
else:
self._graphics = None
self._updateWidgets()
def _displayReal(self, widget, value):
'''
Display real value in a widget
'''
newText = STRING_FLOAT_FORMAT.format(value)
widget.setText(newText)
def _displayScale(self, widget, values, numberFormat=STRING_FLOAT_FORMAT):
'''
Display vector values in a widget, separated by '*'
'''
newText = "*".join(numberFormat.format(value) for value in values)
widget.setText(newText)
def _parseScale(self, widget):
'''
Return real vector from comma separated text in line edit widget
'''
text = widget.text()
values = [float(value) for value in text.split('*')]
return values
def _parseScaleInteger(self, widget):
'''
Return integer vector from comma separated text in line edit widget
'''
text = widget.text()
values = [int(value) for value in text.split('*')]
return values
def _displayVector(self, widget, values, numberFormat=STRING_FLOAT_FORMAT):
'''
Display real vector values in a widget. Also handle scalar
'''
if isinstance(values, Number):
newText = STRING_FLOAT_FORMAT.format(values)
else:
newText = ", ".join(numberFormat.format(value) for value in values)
widget.setText(newText)
def _parseVector(self, widget):
'''
Return real vector from comma separated text in line edit widget
'''
text = widget.text()
values = [float(value) for value in text.split(',')]
return values
def coordinateFieldChanged(self, index):
'''
An item was selected at index in coordinate field chooser widget
'''
if self._graphics:
coordinateField = self.ui.coordinate_field_chooser.getField()
if coordinateField:
self._graphics.setCoordinateField(coordinateField)
else:
self._graphics.setCoordinateField(Field())
def _scenecoordinatesystemDisplay(self):
'''
Show the current state of the scenecoordinatesystem combo box
'''
scenecoordinatesystem = SCENECOORDINATESYSTEM_LOCAL
if self._graphics:
scenecoordinatesystem = self._graphics.getScenecoordinatesystem()
self.ui.scenecoordinatesystem_combobox.blockSignals(True)
self.ui.scenecoordinatesystem_combobox.setCurrentIndex(scenecoordinatesystem - SCENECOORDINATESYSTEM_LOCAL)
self.ui.scenecoordinatesystem_combobox.blockSignals(False)
def scenecoordinatesystemChanged(self, index):
if self._graphics:
self._graphics.setScenecoordinatesystem(index + SCENECOORDINATESYSTEM_LOCAL)
def dataFieldChanged(self, index):
'''
An item was selected at index in data field chooser widget
'''
if self._graphics:
dataField = self.ui.data_field_chooser.getField()
if dataField:
scene = self._graphics.getScene()
scene.beginChange()
spectrum = self._graphics.getSpectrum()
if not spectrum.isValid():
spectrummodule = scene.getSpectrummodule()
spectrum = spectrummodule.getDefaultSpectrum()
self._graphics.setSpectrum(spectrum)
self.ui.spectrum_chooser.setSpectrum(spectrum)
self._graphics.setDataField(dataField)
scene.endChange()
else:
self._graphics.setDataField(Field())
def spectrumChanged(self, index):
if self._graphics:
spectrum = self.ui.spectrum_chooser.getSpectrum()
if spectrum:
self._graphics.setSpectrum(spectrum)
else:
self._graphics.setSpectrum(Spectrum())
def tessellationChanged(self, index):
'''
An item was selected at index in tessellation chooser widget
'''
if self._graphics:
tessellation = self.ui.tessellation_chooser.getTessellation()
self._graphics.setTessellation(tessellation)
def exteriorClicked(self, isChecked):
'''
The exterior radiobutton was clicked
'''
if self._graphics:
self._graphics.setExterior(isChecked)
def _faceDisplay(self):
'''
Show the current state of the face combo box
'''
faceType = Element.FACE_TYPE_ALL
if self._graphics:
faceType = self._graphics.getElementFaceType()
self.ui.face_combobox.blockSignals(True)
self.ui.face_combobox.setCurrentIndex(faceType - Element.FACE_TYPE_ALL)
self.ui.face_combobox.blockSignals(False)
def faceChanged(self, index):
'''
Element face combo box changed
'''
if self._graphics:
self._graphics.setElementFaceType(index + Element.FACE_TYPE_ALL)
def wireframeClicked(self, isChecked):
'''
The wireframe surface render radiobutton was clicked
'''
if self._graphics:
self._graphics.setRenderPolygonMode(Graphics.RENDER_POLYGON_MODE_WIREFRAME if isChecked else Graphics.RENDER_POLYGON_MODE_SHADED)
def glyphChanged(self, index):
'''
An item was selected at index in glyph chooser widget
'''
if self._graphics:
pointattributes = self._graphics.getGraphicspointattributes()
if (pointattributes.isValid()):
glyph = self.ui.glyph_chooser.getGlyph()
if glyph:
pointattributes.setGlyph(glyph)
else:
pointattributes.setGlyph(Glyph())
def materialChanged(self, index):
'''
An item was selected at index in material chooser widget
'''
if self._graphics:
material = self.ui.material_chooser.getMaterial()
self._graphics.setMaterial(material)
def isoscalarFieldChanged(self, index):
if self._graphics:
contours = self._graphics.castContours()
if contours.isValid():
isoscalarField = self.ui.isoscalar_field_chooser.getField()
if not isoscalarField:
isoscalarField = Field()
contours.setIsoscalarField(isoscalarField)
def _isovaluesDisplay(self):
'''
Display the current iso values list
'''
if self._graphics:
contours = self._graphics.castContours()
if contours.isValid():
count, isovalues = contours.getListIsovalues(1)
if count > 1:
count, isovalues = contours.getListIsovalues(count)
if count > 0:
self._displayVector(self.ui.isovalues_lineedit, isovalues)
return
self.ui.isovalues_lineedit.setText('')
def isovaluesEntered(self):
'''
Set iso values list from text in widget
'''
try:
isovalues = self._parseVector(self.ui.isovalues_lineedit)
contours = self._graphics.castContours()
if contours.isValid():
if contours.setListIsovalues(isovalues) != ZINC_OK:
raise
except:
NeonLogger.getLogger().error("Invalid isovalues")
self._isovaluesDisplay()
def streamVectorFieldChanged(self, index):
if self._graphics:
streamlines = self._graphics.castStreamlines()
if streamlines.isValid():
streamVectorField = self.ui.stream_vector_field_chooser.getField()
if not streamVectorField:
streamVectorField = Field()
streamlines.setStreamVectorField(streamVectorField)
def _streamlinesTrackLengthDisplay(self):
'''
Display the current streamlines length
'''
if self._graphics:
streamlines = self._graphics.castStreamlines()
if streamlines.isValid():
trackLength = streamlines.getTrackLength()
self._displayReal(self.ui.streamlines_track_length_lineedit, trackLength)
return
self.ui.streamlines_track_length_lineedit.setText('')
def streamlinesTrackLengthEntered(self):
'''
Set iso values list from text in widget
'''
streamlinesLengthText = self.ui.streamlines_track_length_lineedit.text()
try:
trackLength = float(streamlinesLengthText)
streamlines = self._graphics.castStreamlines()
if streamlines.isValid():
if streamlines.setTrackLength(trackLength) != ZINC_OK:
raise
except:
print("Invalid streamlines track length", streamlinesLengthText)
self._streamlinesTrackLengthDisplay()
def _streamlinesTrackDirectionDisplay(self):
'''
Show the current state of the streamlines track direction combo box
'''
streamlinesTrackDirection = GraphicsStreamlines.TRACK_DIRECTION_FORWARD
if self._graphics:
streamlines = self._graphics.castStreamlines()
if streamlines.isValid():
streamlinesTrackDirection = streamlines.getTrackDirection()
self.ui.streamlines_track_direction_combobox.blockSignals(True)
self.ui.streamlines_track_direction_combobox.setCurrentIndex(streamlinesTrackDirection - GraphicsStreamlines.TRACK_DIRECTION_FORWARD)
self.ui.streamlines_track_direction_combobox.blockSignals(False)
def streamlinesTrackDirectionChanged(self, index):
'''
Element streamlines track direction combo box changed
'''
if self._graphics:
streamlines = self._graphics.castStreamlines()
if streamlines.isValid():
streamlines.setTrackDirection(index + GraphicsStreamlines.TRACK_DIRECTION_FORWARD)
def _streamlinesColourDataTypeDisplay(self):
'''
Show the current state of the streamlines colour data type combo box
'''
streamlinesColourDataType = GraphicsStreamlines.COLOUR_DATA_TYPE_FIELD
if self._graphics:
streamlines = self._graphics.castStreamlines()
if streamlines.isValid():
streamlinesColourDataType = streamlines.getColourDataType()
self.ui.streamlines_colour_data_type_combobox.blockSignals(True)
self.ui.streamlines_colour_data_type_combobox.setCurrentIndex(streamlinesColourDataType - GraphicsStreamlines.COLOUR_DATA_TYPE_FIELD)
self.ui.streamlines_colour_data_type_combobox.blockSignals(False)
def streamlinesColourDataTypeChanged(self, index):
'''
Element streamlines colour data type combo box changed
'''
if self._graphics:
streamlines = self._graphics.castStreamlines()
if streamlines.isValid():
scene = self._graphics.getScene()
scene.beginChange()
spectrum = self._graphics.getSpectrum()
if not spectrum.isValid():
spectrummodule = scene.getSpectrummodule()
spectrum = spectrummodule.getDefaultSpectrum()
self._graphics.setSpectrum(spectrum)
streamlines.setColourDataType(index + GraphicsStreamlines.COLOUR_DATA_TYPE_FIELD)
scene.endChange()
def _lineShapeDisplay(self):
'''
Show the current state of the lineShape combo box
'''
lineShapeType = Graphicslineattributes.SHAPE_TYPE_LINE
if self._graphics:
lineattributes = self._graphics.getGraphicslineattributes()
if lineattributes.isValid():
lineShapeType = lineattributes.getShapeType()
self.ui.line_shape_combobox.blockSignals(True)
self.ui.line_shape_combobox.setCurrentIndex(lineShapeType - Graphicslineattributes.SHAPE_TYPE_LINE)
self.ui.line_shape_combobox.blockSignals(False)
def lineShapeChanged(self, index):
'''
Element lineShape combo box changed
'''
if self._graphics:
lineattributes = self._graphics.getGraphicslineattributes()
if lineattributes.isValid():
lineattributes.setShapeType(index + Graphicslineattributes.SHAPE_TYPE_LINE)
def _lineBaseSizeDisplay(self):
'''
Display the current line base size
'''
if self._graphics:
lineattributes = self._graphics.getGraphicslineattributes()
if lineattributes.isValid():
_, baseSize = lineattributes.getBaseSize(2)
self._displayScale(self.ui.line_base_size_lineedit, baseSize)
return
self.ui.line_base_size_lineedit.setText('0')
def lineBaseSizeEntered(self):
'''
Set line base size from text in widget
'''
try:
baseSize = self._parseScale(self.ui.line_base_size_lineedit)
lineattributes = self._graphics.getGraphicslineattributes()
if lineattributes.setBaseSize(baseSize) != ZINC_OK:
raise
except:
print("Invalid line base size")
self._lineBaseSizeDisplay()
def lineOrientationScaleFieldChanged(self, index):
if self._graphics:
lineattributes = self._graphics.getGraphicslineattributes()
if lineattributes.isValid():
orientationScaleField = self.ui.line_orientation_scale_field_chooser.getField()
if not orientationScaleField:
orientationScaleField = Field()
lineattributes.setOrientationScaleField(orientationScaleField)
def _lineScaleFactorsDisplay(self):
'''
Display the current line scale factors
'''
if self._graphics:
lineattributes = self._graphics.getGraphicslineattributes()
if lineattributes.isValid():
_, scaleFactors = lineattributes.getScaleFactors(2)
self._displayScale(self.ui.line_scale_factors_lineedit, scaleFactors)
return
self.ui.line_scale_factors_lineedit.setText('0')
def lineScaleFactorsEntered(self):
'''
Set line scale factors from text in widget
'''
try:
scaleFactors = self._parseScale(self.ui.line_scale_factors_lineedit)
lineattributes = self._graphics.getGraphicslineattributes()
if lineattributes.setScaleFactors(scaleFactors) != ZINC_OK:
raise
except:
print("Invalid line scale factors")
self._lineScaleFactorsDisplay()
def _pointBaseSizeDisplay(self):
'''
Display the current point base size
'''
if self._graphics:
pointattributes = self._graphics.getGraphicspointattributes()
if pointattributes.isValid():
_, baseSize = pointattributes.getBaseSize(3)
self._displayScale(self.ui.point_base_size_lineedit, baseSize)
return
self.ui.point_base_size_lineedit.setText('0')
def pointBaseSizeEntered(self):
'''
Set point base size from text in widget
'''
try:
baseSize = self._parseScale(self.ui.point_base_size_lineedit)
pointattributes = self._graphics.getGraphicspointattributes()
if pointattributes.setBaseSize(baseSize) != ZINC_OK:
raise
except:
print("Invalid point base size")
self._pointBaseSizeDisplay()
def pointOrientationScaleFieldChanged(self, index):
if self._graphics:
pointattributes = self._graphics.getGraphicspointattributes()
if pointattributes.isValid():
orientationScaleField = self.ui.point_orientation_scale_field_chooser.getField()
if not orientationScaleField:
orientationScaleField = Field()
pointattributes.setOrientationScaleField(orientationScaleField)
def _pointScaleFactorsDisplay(self):
'''
Display the current point scale factors
'''
if self._graphics:
pointattributes = self._graphics.getGraphicspointattributes()
if pointattributes.isValid():
_, scaleFactors = pointattributes.getScaleFactors(3)
self._displayScale(self.ui.point_scale_factors_lineedit, scaleFactors)
return
self.ui.point_scale_factors_lineedit.setText('0')
def pointScaleFactorsEntered(self):
'''
Set point scale factors from text in widget
'''
try:
scaleFactors = self._parseScale(self.ui.point_scale_factors_lineedit)
pointattributes = self._graphics.getGraphicspointattributes()
if pointattributes.setScaleFactors(scaleFactors) != ZINC_OK:
raise
except:
print("Invalid point scale factors")
self._pointScaleFactorsDisplay()
def labelFieldChanged(self, index):
if self._graphics:
pointattributes = self._graphics.getGraphicspointattributes()
if pointattributes.isValid():
labelField = self.ui.label_field_chooser.getField()
if not labelField:
labelField = Field()
pointattributes.setLabelField(labelField)
def _samplingModeDisplay(self):
'''
Show the current state of the sampling mode combo box
'''
samplingMode = Element.POINT_SAMPLING_MODE_CELL_CENTRES
if self._graphics:
samplingattributes = self._graphics.getGraphicssamplingattributes()
if samplingattributes.isValid():
samplingMode = samplingattributes.getElementPointSamplingMode()
self.ui.sampling_mode_combobox.blockSignals(True)
self.ui.sampling_mode_combobox.setCurrentIndex(samplingMode - Element.POINT_SAMPLING_MODE_CELL_CENTRES)
self.ui.sampling_mode_combobox.blockSignals(False)
def samplingModeChanged(self, index):
'''
Sampling mode combo box changed
'''
if self._graphics:
samplingattributes = self._graphics.getGraphicssamplingattributes()
if samplingattributes.isValid():
samplingattributes.setElementPointSamplingMode(index + Element.POINT_SAMPLING_MODE_CELL_CENTRES)
| {
"content_hash": "bd3f255949889a9fb3d9ee68e8a6d09a",
"timestamp": "",
"source": "github",
"line_count": 660,
"max_line_length": 141,
"avg_line_length": 41.7030303030303,
"alnum_prop": 0.6407862229327133,
"repo_name": "alan-wu/neon",
"id": "d7793882e0b96283e5adeb502944f5e3751ed490",
"size": "27524",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/opencmiss/neon/ui/zincwidgets/graphicseditorwidget.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "NSIS",
"bytes": "29534"
},
{
"name": "Python",
"bytes": "1449198"
}
],
"symlink_target": ""
} |
import pygame, sys, math
class Meme(pygame.sprite.Sprite):
def __init__(self, screensize, levelNumber, speed=[0,0], pos=[0,0], size=None):
#print screensize, levelNumber
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = pygame.image.load("rsc/levelmat" + str(levelNumber) + "/meme/meme.png")
if size:
self.image = pygame.transform.scale(self.image, [size,size])
self.rect = self.image.get_rect(center = pos)
self.speedx = speed[0]
self.speedy = speed[1]
self.speed = [self.speedx, self.speedy]
self.radius = self.rect.width/2 -1
self.didBounceX = False
self.didBounceY = False
self.screenHeight = screensize[1]
self.damage = 50
def shiftX(self, amount):
self.rect.x += amount
def getPos(self, key="center"):
if key == "center":
return self.rect.center
def update(self, walls):
self.calc_grav()
self.rect.x += self.speedx
block_hit_list = pygame.sprite.spritecollide(self, walls, False)
for block in block_hit_list:
if self.speedx > 0:
self.rect.right = block.rect.left
self.jump(walls)
elif self.speedx < 0:
self.rect.left = block.rect.right
self.jump(walls)
self.rect.y += self.speedy
block_hit_list = pygame.sprite.spritecollide(self, walls, False)
for block in block_hit_list:
if self.speedy > 0:
self.rect.bottom = block.rect.top
elif self.speedy < 0:
self.rect.top = block.rect.bottom
self.speedy = 0
def jump(self, walls):
self.rect.y += 2
platform_hit_list = pygame.sprite.spritecollide(self, walls, False)
self.rect.y -= 2
#print len(platform_hit_list)
if len(platform_hit_list) > 0 or self.rect.bottom >= self.screenHeight:
self.speedy = -10
def calc_grav(self):
if self.speedy == 0:
self.speedy = 1
else:
self.speedy += .35
if (self.rect.y >= (self.screenHeight - self.rect.height)) and (self.speedy >= 0):
self.speedy = 0
self.rect.y = self.screenHeight - self.rect.height
def animate(self):
if self.animationTimer < self.animationTimerMax:
self.animationTimer += 1
else:
self.animationTimer = 0
if self.frame < self.maxFrame:
self.frame += 1
else:
self.frame = 0
self.image = self.images[self.frame]
def move(self):
self.didBounceX = False
self.didBounceY = False
self.speed = [self.speedx, self.speedy]
self.rect = self.rect.move(self.speed)
def bounceScreen(self, size):
width = size[0]
height = size[1]
if self.rect.left < 0 or self.rect.right > width:
self.speedx = -self.speedx
self.didBounceX = True
if self.rect.top < 0 or self.rect.bottom > height:
self.speedy = -self.speedy
self.didBounceY = True
def bounceMeme(self, other):
if not self.didBounceX:
self.speedx = -self.speedx
if not self.didBounceY:
self.speedy = -self.speedy
def dist(self, pt):
x1 = self.rect.center[0]
y1 = self.rect.center[1]
x2 = pt[0]
y2 = pt[1]
xDiff = x1 - x2
yDiff = y1 - y2
return math.sqrt(xDiff**2 + yDiff**2)
| {
"content_hash": "0f6b9434090fcdf832ba999d50b6a0fb",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 92,
"avg_line_length": 31.330578512396695,
"alnum_prop": 0.522025850699024,
"repo_name": "KRHS-GameProgramming-2016/Memefinity",
"id": "e538cba82a98de1133dc6d44b6f4816af510826d",
"size": "3791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Meme.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53580"
}
],
"symlink_target": ""
} |
from kde import KDE
from smoothers_lowess import lowess
import bandwidths
| {
"content_hash": "e89856863e44b2b99fde91da5dc21980",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 35,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.8513513513513513,
"repo_name": "pprett/statsmodels",
"id": "5c0a13a071297a64f17b775a20ba5d0313ab5bc7",
"size": "74",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "statsmodels/nonparametric/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "C",
"bytes": "11707"
},
{
"name": "JavaScript",
"bytes": "11143"
},
{
"name": "Python",
"bytes": "4135946"
},
{
"name": "R",
"bytes": "5412"
}
],
"symlink_target": ""
} |
import sys
from lxml import etree
dtd = etree.DTD(open("dotscene.dtd"))
root = etree.parse(sys.argv[1])
if dtd.validate(root):
print("validation successful")
else:
print(dtd.error_log.filter_from_errors()) | {
"content_hash": "157bda5878d6bc7ebd3187f6da4ae578",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 45,
"avg_line_length": 21.5,
"alnum_prop": 0.7162790697674418,
"repo_name": "OGRECave/DotSceneFormat",
"id": "e3ba83d591b3748c43aba65e42d0876add6ee102",
"size": "235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "validate.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "33061"
},
{
"name": "CMake",
"bytes": "828"
},
{
"name": "Python",
"bytes": "235"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Histogram2dContour(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "histogram2dcontour"
_valid_props = {
"autobinx",
"autobiny",
"autocolorscale",
"autocontour",
"bingroup",
"coloraxis",
"colorbar",
"colorscale",
"contours",
"customdata",
"customdatasrc",
"histfunc",
"histnorm",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatesrc",
"ids",
"idssrc",
"legendgroup",
"line",
"marker",
"meta",
"metasrc",
"name",
"nbinsx",
"nbinsy",
"ncontours",
"opacity",
"reversescale",
"showlegend",
"showscale",
"stream",
"type",
"uid",
"uirevision",
"visible",
"x",
"xaxis",
"xbingroup",
"xbins",
"xcalendar",
"xsrc",
"y",
"yaxis",
"ybingroup",
"ybins",
"ycalendar",
"ysrc",
"z",
"zauto",
"zhoverformat",
"zmax",
"zmid",
"zmin",
"zsrc",
}
# autobinx
# --------
@property
def autobinx(self):
"""
Obsolete: since v1.42 each bin attribute is auto-determined
separately and `autobinx` is not needed. However, we accept
`autobinx: true` or `false` and will update `xbins` accordingly
before deleting `autobinx` from the trace.
The 'autobinx' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autobinx"]
@autobinx.setter
def autobinx(self, val):
self["autobinx"] = val
# autobiny
# --------
@property
def autobiny(self):
"""
Obsolete: since v1.42 each bin attribute is auto-determined
separately and `autobiny` is not needed. However, we accept
`autobiny: true` or `false` and will update `ybins` accordingly
before deleting `autobiny` from the trace.
The 'autobiny' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autobiny"]
@autobiny.setter
def autobiny(self, val):
self["autobiny"] = val
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# autocontour
# -----------
@property
def autocontour(self):
"""
Determines whether or not the contour level attributes are
picked by an algorithm. If True, the number of contour levels
can be set in `ncontours`. If False, set the contour level
attributes in `contours`.
The 'autocontour' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocontour"]
@autocontour.setter
def autocontour(self, val):
self["autocontour"] = val
# bingroup
# --------
@property
def bingroup(self):
"""
Set the `xbingroup` and `ybingroup` default prefix For example,
setting a `bingroup` of 1 on two histogram2d traces will make
them their x-bins and y-bins match separately.
The 'bingroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["bingroup"]
@bingroup.setter
def bingroup(self, val):
self["bingroup"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
We add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.histogr
am2dcontour.colorbar.Tickformatstop` instances
or dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.histogram2dcontour.colorbar.tickformatstopdef
aults), sets the default property values to use
for elements of
histogram2dcontour.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.histogram2dcontour
.colorbar.Title` instance or dict with
compatible properties
titlefont
Deprecated: Please use
histogram2dcontour.colorbar.title.font instead.
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
histogram2dcontour.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly.graph_objs.histogram2dcontour.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use`zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth,Electric,Vi
ridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'peach', 'phase', 'picnic', 'pinkyl', 'piyg',
'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn', 'puor',
'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu', 'rdgy',
'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar', 'spectral',
'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn', 'tealrose',
'tempo', 'temps', 'thermal', 'tropic', 'turbid', 'twilight',
'viridis', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# contours
# --------
@property
def contours(self):
"""
The 'contours' property is an instance of Contours
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.Contours`
- A dict of string/value properties that will be passed
to the Contours constructor
Supported dict properties:
coloring
Determines the coloring method showing the
contour values. If "fill", coloring is done
evenly between each contour level If "heatmap",
a heatmap gradient coloring is applied between
each contour level. If "lines", coloring is
done on the contour lines. If "none", no
coloring is applied on this trace.
end
Sets the end contour level value. Must be more
than `contours.start`
labelfont
Sets the font used for labeling the contour
levels. The default color comes from the lines,
if shown. The default family and size come from
`layout.font`.
labelformat
Sets the contour label formatting rule using d3
formatting mini-language which is very similar
to Python, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
operation
Sets the constraint operation. "=" keeps
regions equal to `value` "<" and "<=" keep
regions less than `value` ">" and ">=" keep
regions greater than `value` "[]", "()", "[)",
and "(]" keep regions inside `value[0]` to
`value[1]` "][", ")(", "](", ")[" keep regions
outside `value[0]` to value[1]` Open vs. closed
intervals make no difference to constraint
display, but all versions are allowed for
consistency with filter transforms.
showlabels
Determines whether to label the contour lines
with their values.
showlines
Determines whether or not the contour lines are
drawn. Has an effect only if
`contours.coloring` is set to "fill".
size
Sets the step between each contour level. Must
be positive.
start
Sets the starting contour level value. Must be
less than `contours.end`
type
If `levels`, the data is represented as a
contour plot with multiple levels displayed. If
`constraint`, the data is represented as
constraints with the invalid region shaded as
specified by the `operation` and `value`
parameters.
value
Sets the value or values of the constraint
boundary. When `operation` is set to one of the
comparison values (=,<,>=,>,<=) "value" is
expected to be a number. When `operation` is
set to one of the interval values
([],(),[),(],][,)(,](,)[) "value" is expected
to be an array of two numbers where the first
is the lower bound and the second is the upper
bound.
Returns
-------
plotly.graph_objs.histogram2dcontour.Contours
"""
return self["contours"]
@contours.setter
def contours(self, val):
self["contours"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# histfunc
# --------
@property
def histfunc(self):
"""
Specifies the binning function used for this histogram trace.
If "count", the histogram values are computed by counting the
number of values lying inside each bin. If "sum", "avg", "min",
"max", the histogram values are computed using the sum, the
average, the minimum or the maximum of the values lying inside
each bin respectively.
The 'histfunc' property is an enumeration that may be specified as:
- One of the following enumeration values:
['count', 'sum', 'avg', 'min', 'max']
Returns
-------
Any
"""
return self["histfunc"]
@histfunc.setter
def histfunc(self, val):
self["histfunc"] = val
# histnorm
# --------
@property
def histnorm(self):
"""
Specifies the type of normalization used for this histogram
trace. If "", the span of each bar corresponds to the number of
occurrences (i.e. the number of data points lying inside the
bins). If "percent" / "probability", the span of each bar
corresponds to the percentage / fraction of occurrences with
respect to the total number of sample points (here, the sum of
all bin HEIGHTS equals 100% / 1). If "density", the span of
each bar corresponds to the number of occurrences in a bin
divided by the size of the bin interval (here, the sum of all
bin AREAS equals the total number of sample points). If
*probability density*, the area of each bar corresponds to the
probability that an event will fall into the corresponding bin
(here, the sum of all bin AREAS equals 1).
The 'histnorm' property is an enumeration that may be specified as:
- One of the following enumeration values:
['', 'percent', 'probability', 'density', 'probability
density']
Returns
-------
Any
"""
return self["histnorm"]
@histnorm.setter
def histnorm(self, val):
self["histnorm"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for hoverinfo
.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
Returns
-------
plotly.graph_objs.histogram2dcontour.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for details on
the date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
variable `z` Anything contained in tag `<extra>` is displayed
in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary box
completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
hovertemplate .
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the color of the contour level. Has no
effect if `contours.coloring` is set to
"lines".
dash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
smoothing
Sets the amount of smoothing for the contour
lines, where 0 corresponds to no smoothing.
width
Sets the contour line width in (in px)
Returns
-------
plotly.graph_objs.histogram2dcontour.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
color
Sets the aggregation data.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
Returns
-------
plotly.graph_objs.histogram2dcontour.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# nbinsx
# ------
@property
def nbinsx(self):
"""
Specifies the maximum number of desired bins. This value will
be used in an algorithm that will decide the optimal bin size
such that the histogram best visualizes the distribution of the
data. Ignored if `xbins.size` is provided.
The 'nbinsx' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nbinsx"]
@nbinsx.setter
def nbinsx(self, val):
self["nbinsx"] = val
# nbinsy
# ------
@property
def nbinsy(self):
"""
Specifies the maximum number of desired bins. This value will
be used in an algorithm that will decide the optimal bin size
such that the histogram best visualizes the distribution of the
data. Ignored if `ybins.size` is provided.
The 'nbinsy' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nbinsy"]
@nbinsy.setter
def nbinsy(self, val):
self["nbinsy"] = val
# ncontours
# ---------
@property
def ncontours(self):
"""
Sets the maximum number of contour levels. The actual number of
contours will be chosen automatically to be less than or equal
to the value of `ncontours`. Has an effect only if
`autocontour` is True or if `contours.size` is missing.
The 'ncontours' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ncontours"]
@ncontours.setter
def ncontours(self, val):
self["ncontours"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. If true, `zmin` will
correspond to the last color in the array and `zmax` will
correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.histogram2dcontour.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# x
# -
@property
def x(self):
"""
Sets the sample data to be binned on the x axis.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xaxis
# -----
@property
def xaxis(self):
"""
Sets a reference between this trace's x coordinates and a 2D
cartesian x axis. If "x" (the default value), the x coordinates
refer to `layout.xaxis`. If "x2", the x coordinates refer to
`layout.xaxis2`, and so on.
The 'xaxis' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
str
"""
return self["xaxis"]
@xaxis.setter
def xaxis(self, val):
self["xaxis"] = val
# xbingroup
# ---------
@property
def xbingroup(self):
"""
Set a group of histogram traces which will have compatible
x-bin settings. Using `xbingroup`, histogram2d and
histogram2dcontour traces (on axes of the same axis type) can
have compatible x-bin settings. Note that the same `xbingroup`
value can be used to set (1D) histogram `bingroup`
The 'xbingroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["xbingroup"]
@xbingroup.setter
def xbingroup(self, val):
self["xbingroup"] = val
# xbins
# -----
@property
def xbins(self):
"""
The 'xbins' property is an instance of XBins
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.XBins`
- A dict of string/value properties that will be passed
to the XBins constructor
Supported dict properties:
end
Sets the end value for the x axis bins. The
last bin may not end exactly at this value, we
increment the bin edge by `size` from `start`
until we reach or exceed `end`. Defaults to the
maximum data value. Like `start`, for dates use
a date string, and for category data `end` is
based on the category serial numbers.
size
Sets the size of each x axis bin. Default
behavior: If `nbinsx` is 0 or omitted, we
choose a nice round bin size such that the
number of bins is about the same as the typical
number of samples in each bin. If `nbinsx` is
provided, we choose a nice round bin size
giving no more than that many bins. For date
data, use milliseconds or "M<n>" for months, as
in `axis.dtick`. For category data, the number
of categories to bin together (always defaults
to 1).
start
Sets the starting value for the x axis bins.
Defaults to the minimum data value, shifted
down if necessary to make nice round values and
to remove ambiguous bin edges. For example, if
most of the data is integers we shift the bin
edges 0.5 down, so a `size` of 5 would have a
default `start` of -0.5, so it is clear that
0-4 are in the first bin, 5-9 in the second,
but continuous data gets a start of 0 and bins
[0,5), [5,10) etc. Dates behave similarly, and
`start` should be a date string. For category
data, `start` is based on the category serial
numbers, and defaults to -0.5.
Returns
-------
plotly.graph_objs.histogram2dcontour.XBins
"""
return self["xbins"]
@xbins.setter
def xbins(self, val):
self["xbins"] = val
# xcalendar
# ---------
@property
def xcalendar(self):
"""
Sets the calendar system to use with `x` date data.
The 'xcalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['gregorian', 'chinese', 'coptic', 'discworld',
'ethiopian', 'hebrew', 'islamic', 'julian', 'mayan',
'nanakshahi', 'nepali', 'persian', 'jalali', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["xcalendar"]
@xcalendar.setter
def xcalendar(self, val):
self["xcalendar"] = val
# xsrc
# ----
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for x .
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
# y
# -
@property
def y(self):
"""
Sets the sample data to be binned on the y axis.
The 'y' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yaxis
# -----
@property
def yaxis(self):
"""
Sets a reference between this trace's y coordinates and a 2D
cartesian y axis. If "y" (the default value), the y coordinates
refer to `layout.yaxis`. If "y2", the y coordinates refer to
`layout.yaxis2`, and so on.
The 'yaxis' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
str
"""
return self["yaxis"]
@yaxis.setter
def yaxis(self, val):
self["yaxis"] = val
# ybingroup
# ---------
@property
def ybingroup(self):
"""
Set a group of histogram traces which will have compatible
y-bin settings. Using `ybingroup`, histogram2d and
histogram2dcontour traces (on axes of the same axis type) can
have compatible y-bin settings. Note that the same `ybingroup`
value can be used to set (1D) histogram `bingroup`
The 'ybingroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ybingroup"]
@ybingroup.setter
def ybingroup(self, val):
self["ybingroup"] = val
# ybins
# -----
@property
def ybins(self):
"""
The 'ybins' property is an instance of YBins
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.YBins`
- A dict of string/value properties that will be passed
to the YBins constructor
Supported dict properties:
end
Sets the end value for the y axis bins. The
last bin may not end exactly at this value, we
increment the bin edge by `size` from `start`
until we reach or exceed `end`. Defaults to the
maximum data value. Like `start`, for dates use
a date string, and for category data `end` is
based on the category serial numbers.
size
Sets the size of each y axis bin. Default
behavior: If `nbinsy` is 0 or omitted, we
choose a nice round bin size such that the
number of bins is about the same as the typical
number of samples in each bin. If `nbinsy` is
provided, we choose a nice round bin size
giving no more than that many bins. For date
data, use milliseconds or "M<n>" for months, as
in `axis.dtick`. For category data, the number
of categories to bin together (always defaults
to 1).
start
Sets the starting value for the y axis bins.
Defaults to the minimum data value, shifted
down if necessary to make nice round values and
to remove ambiguous bin edges. For example, if
most of the data is integers we shift the bin
edges 0.5 down, so a `size` of 5 would have a
default `start` of -0.5, so it is clear that
0-4 are in the first bin, 5-9 in the second,
but continuous data gets a start of 0 and bins
[0,5), [5,10) etc. Dates behave similarly, and
`start` should be a date string. For category
data, `start` is based on the category serial
numbers, and defaults to -0.5.
Returns
-------
plotly.graph_objs.histogram2dcontour.YBins
"""
return self["ybins"]
@ybins.setter
def ybins(self, val):
self["ybins"] = val
# ycalendar
# ---------
@property
def ycalendar(self):
"""
Sets the calendar system to use with `y` date data.
The 'ycalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['gregorian', 'chinese', 'coptic', 'discworld',
'ethiopian', 'hebrew', 'islamic', 'julian', 'mayan',
'nanakshahi', 'nepali', 'persian', 'jalali', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["ycalendar"]
@ycalendar.setter
def ycalendar(self, val):
self["ycalendar"] = val
# ysrc
# ----
@property
def ysrc(self):
"""
Sets the source reference on Chart Studio Cloud for y .
The 'ysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ysrc"]
@ysrc.setter
def ysrc(self, val):
self["ysrc"] = val
# z
# -
@property
def z(self):
"""
Sets the aggregation data.
The 'z' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
# zauto
# -----
@property
def zauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `z`) or the bounds set in
`zmin` and `zmax` Defaults to `false` when `zmin` and `zmax`
are set by the user.
The 'zauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["zauto"]
@zauto.setter
def zauto(self, val):
self["zauto"] = val
# zhoverformat
# ------------
@property
def zhoverformat(self):
"""
Sets the hover text formatting rule using d3 formatting mini-
languages which are very similar to those in Python. See:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
The 'zhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["zhoverformat"]
@zhoverformat.setter
def zhoverformat(self, val):
self["zhoverformat"] = val
# zmax
# ----
@property
def zmax(self):
"""
Sets the upper bound of the color domain. Value should have the
same units as in `z` and if set, `zmin` must be set as well.
The 'zmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmax"]
@zmax.setter
def zmax(self, val):
self["zmax"] = val
# zmid
# ----
@property
def zmid(self):
"""
Sets the mid-point of the color domain by scaling `zmin` and/or
`zmax` to be equidistant to this point. Value should have the
same units as in `z`. Has no effect when `zauto` is `false`.
The 'zmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmid"]
@zmid.setter
def zmid(self, val):
self["zmid"] = val
# zmin
# ----
@property
def zmin(self):
"""
Sets the lower bound of the color domain. Value should have the
same units as in `z` and if set, `zmax` must be set as well.
The 'zmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmin"]
@zmin.setter
def zmin(self, val):
self["zmin"] = val
# zsrc
# ----
@property
def zsrc(self):
"""
Sets the source reference on Chart Studio Cloud for z .
The 'zsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["zsrc"]
@zsrc.setter
def zsrc(self, val):
self["zsrc"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autobinx
Obsolete: since v1.42 each bin attribute is auto-
determined separately and `autobinx` is not needed.
However, we accept `autobinx: true` or `false` and will
update `xbins` accordingly before deleting `autobinx`
from the trace.
autobiny
Obsolete: since v1.42 each bin attribute is auto-
determined separately and `autobiny` is not needed.
However, we accept `autobiny: true` or `false` and will
update `ybins` accordingly before deleting `autobiny`
from the trace.
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
autocontour
Determines whether or not the contour level attributes
are picked by an algorithm. If True, the number of
contour levels can be set in `ncontours`. If False, set
the contour level attributes in `contours`.
bingroup
Set the `xbingroup` and `ybingroup` default prefix For
example, setting a `bingroup` of 1 on two histogram2d
traces will make them their x-bins and y-bins match
separately.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.histogram2dcontour.ColorBa
r` instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
contours
:class:`plotly.graph_objects.histogram2dcontour.Contour
s` instance or dict with compatible properties
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
histfunc
Specifies the binning function used for this histogram
trace. If "count", the histogram values are computed by
counting the number of values lying inside each bin. If
"sum", "avg", "min", "max", the histogram values are
computed using the sum, the average, the minimum or the
maximum of the values lying inside each bin
respectively.
histnorm
Specifies the type of normalization used for this
histogram trace. If "", the span of each bar
corresponds to the number of occurrences (i.e. the
number of data points lying inside the bins). If
"percent" / "probability", the span of each bar
corresponds to the percentage / fraction of occurrences
with respect to the total number of sample points
(here, the sum of all bin HEIGHTS equals 100% / 1). If
"density", the span of each bar corresponds to the
number of occurrences in a bin divided by the size of
the bin interval (here, the sum of all bin AREAS equals
the total number of sample points). If *probability
density*, the area of each bar corresponds to the
probability that an event will fall into the
corresponding bin (here, the sum of all bin AREAS
equals 1).
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.histogram2dcontour.Hoverla
bel` instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variable `z` Anything contained in tag
`<extra>` is displayed in the secondary box, for
example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
line
:class:`plotly.graph_objects.histogram2dcontour.Line`
instance or dict with compatible properties
marker
:class:`plotly.graph_objects.histogram2dcontour.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
nbinsx
Specifies the maximum number of desired bins. This
value will be used in an algorithm that will decide the
optimal bin size such that the histogram best
visualizes the distribution of the data. Ignored if
`xbins.size` is provided.
nbinsy
Specifies the maximum number of desired bins. This
value will be used in an algorithm that will decide the
optimal bin size such that the histogram best
visualizes the distribution of the data. Ignored if
`ybins.size` is provided.
ncontours
Sets the maximum number of contour levels. The actual
number of contours will be chosen automatically to be
less than or equal to the value of `ncontours`. Has an
effect only if `autocontour` is True or if
`contours.size` is missing.
opacity
Sets the opacity of the trace.
reversescale
Reverses the color mapping if true. If true, `zmin`
will correspond to the last color in the array and
`zmax` will correspond to the first color.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.histogram2dcontour.Stream`
instance or dict with compatible properties
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the sample data to be binned on the x axis.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xbingroup
Set a group of histogram traces which will have
compatible x-bin settings. Using `xbingroup`,
histogram2d and histogram2dcontour traces (on axes of
the same axis type) can have compatible x-bin settings.
Note that the same `xbingroup` value can be used to set
(1D) histogram `bingroup`
xbins
:class:`plotly.graph_objects.histogram2dcontour.XBins`
instance or dict with compatible properties
xcalendar
Sets the calendar system to use with `x` date data.
xsrc
Sets the source reference on Chart Studio Cloud for x
.
y
Sets the sample data to be binned on the y axis.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
ybingroup
Set a group of histogram traces which will have
compatible y-bin settings. Using `ybingroup`,
histogram2d and histogram2dcontour traces (on axes of
the same axis type) can have compatible y-bin settings.
Note that the same `ybingroup` value can be used to set
(1D) histogram `bingroup`
ybins
:class:`plotly.graph_objects.histogram2dcontour.YBins`
instance or dict with compatible properties
ycalendar
Sets the calendar system to use with `y` date data.
ysrc
Sets the source reference on Chart Studio Cloud for y
.
z
Sets the aggregation data.
zauto
Determines whether or not the color domain is computed
with respect to the input data (here in `z`) or the
bounds set in `zmin` and `zmax` Defaults to `false`
when `zmin` and `zmax` are set by the user.
zhoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. See: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
zmax
Sets the upper bound of the color domain. Value should
have the same units as in `z` and if set, `zmin` must
be set as well.
zmid
Sets the mid-point of the color domain by scaling
`zmin` and/or `zmax` to be equidistant to this point.
Value should have the same units as in `z`. Has no
effect when `zauto` is `false`.
zmin
Sets the lower bound of the color domain. Value should
have the same units as in `z` and if set, `zmax` must
be set as well.
zsrc
Sets the source reference on Chart Studio Cloud for z
.
"""
def __init__(
self,
arg=None,
autobinx=None,
autobiny=None,
autocolorscale=None,
autocontour=None,
bingroup=None,
coloraxis=None,
colorbar=None,
colorscale=None,
contours=None,
customdata=None,
customdatasrc=None,
histfunc=None,
histnorm=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
ids=None,
idssrc=None,
legendgroup=None,
line=None,
marker=None,
meta=None,
metasrc=None,
name=None,
nbinsx=None,
nbinsy=None,
ncontours=None,
opacity=None,
reversescale=None,
showlegend=None,
showscale=None,
stream=None,
uid=None,
uirevision=None,
visible=None,
x=None,
xaxis=None,
xbingroup=None,
xbins=None,
xcalendar=None,
xsrc=None,
y=None,
yaxis=None,
ybingroup=None,
ybins=None,
ycalendar=None,
ysrc=None,
z=None,
zauto=None,
zhoverformat=None,
zmax=None,
zmid=None,
zmin=None,
zsrc=None,
**kwargs
):
"""
Construct a new Histogram2dContour object
The sample data from which statistics are computed is set in
`x` and `y` (where `x` and `y` represent marginal
distributions, binning is set in `xbins` and `ybins` in this
case) or `z` (where `z` represent the 2D distribution and
binning set, binning is set by `x` and `y` in this case). The
resulting distribution is visualized as a contour plot.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.Histogram2dContour`
autobinx
Obsolete: since v1.42 each bin attribute is auto-
determined separately and `autobinx` is not needed.
However, we accept `autobinx: true` or `false` and will
update `xbins` accordingly before deleting `autobinx`
from the trace.
autobiny
Obsolete: since v1.42 each bin attribute is auto-
determined separately and `autobiny` is not needed.
However, we accept `autobiny: true` or `false` and will
update `ybins` accordingly before deleting `autobiny`
from the trace.
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
autocontour
Determines whether or not the contour level attributes
are picked by an algorithm. If True, the number of
contour levels can be set in `ncontours`. If False, set
the contour level attributes in `contours`.
bingroup
Set the `xbingroup` and `ybingroup` default prefix For
example, setting a `bingroup` of 1 on two histogram2d
traces will make them their x-bins and y-bins match
separately.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.histogram2dcontour.ColorBa
r` instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
contours
:class:`plotly.graph_objects.histogram2dcontour.Contour
s` instance or dict with compatible properties
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
histfunc
Specifies the binning function used for this histogram
trace. If "count", the histogram values are computed by
counting the number of values lying inside each bin. If
"sum", "avg", "min", "max", the histogram values are
computed using the sum, the average, the minimum or the
maximum of the values lying inside each bin
respectively.
histnorm
Specifies the type of normalization used for this
histogram trace. If "", the span of each bar
corresponds to the number of occurrences (i.e. the
number of data points lying inside the bins). If
"percent" / "probability", the span of each bar
corresponds to the percentage / fraction of occurrences
with respect to the total number of sample points
(here, the sum of all bin HEIGHTS equals 100% / 1). If
"density", the span of each bar corresponds to the
number of occurrences in a bin divided by the size of
the bin interval (here, the sum of all bin AREAS equals
the total number of sample points). If *probability
density*, the area of each bar corresponds to the
probability that an event will fall into the
corresponding bin (here, the sum of all bin AREAS
equals 1).
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.histogram2dcontour.Hoverla
bel` instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variable `z` Anything contained in tag
`<extra>` is displayed in the secondary box, for
example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
line
:class:`plotly.graph_objects.histogram2dcontour.Line`
instance or dict with compatible properties
marker
:class:`plotly.graph_objects.histogram2dcontour.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
nbinsx
Specifies the maximum number of desired bins. This
value will be used in an algorithm that will decide the
optimal bin size such that the histogram best
visualizes the distribution of the data. Ignored if
`xbins.size` is provided.
nbinsy
Specifies the maximum number of desired bins. This
value will be used in an algorithm that will decide the
optimal bin size such that the histogram best
visualizes the distribution of the data. Ignored if
`ybins.size` is provided.
ncontours
Sets the maximum number of contour levels. The actual
number of contours will be chosen automatically to be
less than or equal to the value of `ncontours`. Has an
effect only if `autocontour` is True or if
`contours.size` is missing.
opacity
Sets the opacity of the trace.
reversescale
Reverses the color mapping if true. If true, `zmin`
will correspond to the last color in the array and
`zmax` will correspond to the first color.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.histogram2dcontour.Stream`
instance or dict with compatible properties
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the sample data to be binned on the x axis.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xbingroup
Set a group of histogram traces which will have
compatible x-bin settings. Using `xbingroup`,
histogram2d and histogram2dcontour traces (on axes of
the same axis type) can have compatible x-bin settings.
Note that the same `xbingroup` value can be used to set
(1D) histogram `bingroup`
xbins
:class:`plotly.graph_objects.histogram2dcontour.XBins`
instance or dict with compatible properties
xcalendar
Sets the calendar system to use with `x` date data.
xsrc
Sets the source reference on Chart Studio Cloud for x
.
y
Sets the sample data to be binned on the y axis.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
ybingroup
Set a group of histogram traces which will have
compatible y-bin settings. Using `ybingroup`,
histogram2d and histogram2dcontour traces (on axes of
the same axis type) can have compatible y-bin settings.
Note that the same `ybingroup` value can be used to set
(1D) histogram `bingroup`
ybins
:class:`plotly.graph_objects.histogram2dcontour.YBins`
instance or dict with compatible properties
ycalendar
Sets the calendar system to use with `y` date data.
ysrc
Sets the source reference on Chart Studio Cloud for y
.
z
Sets the aggregation data.
zauto
Determines whether or not the color domain is computed
with respect to the input data (here in `z`) or the
bounds set in `zmin` and `zmax` Defaults to `false`
when `zmin` and `zmax` are set by the user.
zhoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. See: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
zmax
Sets the upper bound of the color domain. Value should
have the same units as in `z` and if set, `zmin` must
be set as well.
zmid
Sets the mid-point of the color domain by scaling
`zmin` and/or `zmax` to be equidistant to this point.
Value should have the same units as in `z`. Has no
effect when `zauto` is `false`.
zmin
Sets the lower bound of the color domain. Value should
have the same units as in `z` and if set, `zmax` must
be set as well.
zsrc
Sets the source reference on Chart Studio Cloud for z
.
Returns
-------
Histogram2dContour
"""
super(Histogram2dContour, self).__init__("histogram2dcontour")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Histogram2dContour
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Histogram2dContour`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autobinx", None)
_v = autobinx if autobinx is not None else _v
if _v is not None:
self["autobinx"] = _v
_v = arg.pop("autobiny", None)
_v = autobiny if autobiny is not None else _v
if _v is not None:
self["autobiny"] = _v
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("autocontour", None)
_v = autocontour if autocontour is not None else _v
if _v is not None:
self["autocontour"] = _v
_v = arg.pop("bingroup", None)
_v = bingroup if bingroup is not None else _v
if _v is not None:
self["bingroup"] = _v
_v = arg.pop("coloraxis", None)
_v = coloraxis if coloraxis is not None else _v
if _v is not None:
self["coloraxis"] = _v
_v = arg.pop("colorbar", None)
_v = colorbar if colorbar is not None else _v
if _v is not None:
self["colorbar"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("contours", None)
_v = contours if contours is not None else _v
if _v is not None:
self["contours"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("histfunc", None)
_v = histfunc if histfunc is not None else _v
if _v is not None:
self["histfunc"] = _v
_v = arg.pop("histnorm", None)
_v = histnorm if histnorm is not None else _v
if _v is not None:
self["histnorm"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("nbinsx", None)
_v = nbinsx if nbinsx is not None else _v
if _v is not None:
self["nbinsx"] = _v
_v = arg.pop("nbinsy", None)
_v = nbinsy if nbinsy is not None else _v
if _v is not None:
self["nbinsy"] = _v
_v = arg.pop("ncontours", None)
_v = ncontours if ncontours is not None else _v
if _v is not None:
self["ncontours"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("reversescale", None)
_v = reversescale if reversescale is not None else _v
if _v is not None:
self["reversescale"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("showscale", None)
_v = showscale if showscale is not None else _v
if _v is not None:
self["showscale"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xaxis", None)
_v = xaxis if xaxis is not None else _v
if _v is not None:
self["xaxis"] = _v
_v = arg.pop("xbingroup", None)
_v = xbingroup if xbingroup is not None else _v
if _v is not None:
self["xbingroup"] = _v
_v = arg.pop("xbins", None)
_v = xbins if xbins is not None else _v
if _v is not None:
self["xbins"] = _v
_v = arg.pop("xcalendar", None)
_v = xcalendar if xcalendar is not None else _v
if _v is not None:
self["xcalendar"] = _v
_v = arg.pop("xsrc", None)
_v = xsrc if xsrc is not None else _v
if _v is not None:
self["xsrc"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yaxis", None)
_v = yaxis if yaxis is not None else _v
if _v is not None:
self["yaxis"] = _v
_v = arg.pop("ybingroup", None)
_v = ybingroup if ybingroup is not None else _v
if _v is not None:
self["ybingroup"] = _v
_v = arg.pop("ybins", None)
_v = ybins if ybins is not None else _v
if _v is not None:
self["ybins"] = _v
_v = arg.pop("ycalendar", None)
_v = ycalendar if ycalendar is not None else _v
if _v is not None:
self["ycalendar"] = _v
_v = arg.pop("ysrc", None)
_v = ysrc if ysrc is not None else _v
if _v is not None:
self["ysrc"] = _v
_v = arg.pop("z", None)
_v = z if z is not None else _v
if _v is not None:
self["z"] = _v
_v = arg.pop("zauto", None)
_v = zauto if zauto is not None else _v
if _v is not None:
self["zauto"] = _v
_v = arg.pop("zhoverformat", None)
_v = zhoverformat if zhoverformat is not None else _v
if _v is not None:
self["zhoverformat"] = _v
_v = arg.pop("zmax", None)
_v = zmax if zmax is not None else _v
if _v is not None:
self["zmax"] = _v
_v = arg.pop("zmid", None)
_v = zmid if zmid is not None else _v
if _v is not None:
self["zmid"] = _v
_v = arg.pop("zmin", None)
_v = zmin if zmin is not None else _v
if _v is not None:
self["zmin"] = _v
_v = arg.pop("zsrc", None)
_v = zsrc if zsrc is not None else _v
if _v is not None:
self["zsrc"] = _v
# Read-only literals
# ------------------
self._props["type"] = "histogram2dcontour"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "3922d67b42d228e8d0264e2f0fe4f8ba",
"timestamp": "",
"source": "github",
"line_count": 2773,
"max_line_length": 89,
"avg_line_length": 37.009376126938335,
"alnum_prop": 0.5477018718271021,
"repo_name": "plotly/python-api",
"id": "05cdb8caa405d5177de3315a16e2c62afcad26f9",
"size": "102627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/_histogram2dcontour.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
"""TensorFlow ops for array / tensor manipulation."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def split_squeeze(dim, num_split, tensor_in):
"""Splits input on given dimension and then squeezes that dimension.
Args:
dim: Dimension to split and squeeze on.
num_split: integer, the number of ways to split.
tensor_in: Input tensor of shape [N1, N2, .. Ndim, .. Nx].
Returns:
List of tensors [N1, N2, .. Ndim-1, Ndim+1, .. Nx].
"""
return [tf.squeeze(t, squeeze_dims=[dim]) for t in tf.split(dim, num_split, tensor_in)]
def expand_concat(dim, inputs):
"""Expands inputs on given dimension and then concatenates them.
Args:
dim: Dimension to expand and concatenate on.
inputs: List of tensors of the same shape [N1, ... Nx].
Returns:
A tensor of shape [N1, .. Ndim, ... Nx]
"""
return tf.concat(dim, [tf.expand_dims(t, dim) for t in inputs])
def one_hot_matrix(tensor_in, num_classes, on_value=1.0, off_value=0.0):
"""Encodes indices from given tensor as one-hot tensor.
TODO(ilblackdragon): Ideally implementation should be
part of TensorFlow with Eigen-native operation.
Args:
tensor_in: Input tensor of shape [N1, N2].
num_classes: Number of classes to expand index into.
on_value: Tensor or float, value to fill-in given index.
off_value: Tensor or float, value to fill-in everything else.
Returns:
Tensor of shape [N1, N2, num_classes] with 1.0 for each id in original
tensor.
"""
tensor_in = tf.convert_to_tensor(tensor_in)
sparse_values = tf.to_int64(tf.reshape(tensor_in, [-1, 1]))
size = tf.shape(sparse_values)[0]
dims = tf.shape(tensor_in)
indices = tf.to_int64(tf.reshape(tf.range(0, size), [-1, 1]))
indices_values = tf.concat(1, [indices, sparse_values])
outshape = tf.to_int64(expand_concat(0, [size, num_classes]))
one_hot_vector = tf.sparse_to_dense(indices_values, outshape, on_value, off_value)
ret = tf.reshape(one_hot_vector, tf.concat(0, [dims, [num_classes]]))
ret.set_shape(tensor_in.get_shape().concatenate(num_classes))
return ret
| {
"content_hash": "95792b58fa6cccd4780107639f380efa",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 91,
"avg_line_length": 39.108108108108105,
"alnum_prop": 0.6762266758811334,
"repo_name": "awni/tensorflow",
"id": "4c16afb58955b408b3338f89e3e8ceea836f3517",
"size": "2894",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/skflow/python/skflow/ops/array_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "156098"
},
{
"name": "C++",
"bytes": "7765982"
},
{
"name": "CMake",
"bytes": "29325"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "684124"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "7188"
},
{
"name": "Jupyter Notebook",
"bytes": "1771787"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "103762"
},
{
"name": "Python",
"bytes": "4675299"
},
{
"name": "Shell",
"bytes": "126103"
},
{
"name": "TypeScript",
"bytes": "342627"
}
],
"symlink_target": ""
} |
def create_multipliers():
return [lambda x : i * x for i in range(5)]
if __name__ == '__main__':
for multiplier in create_multipliers():
print(multiplier(2)) # Devrait afficher 0, 2, 4, 6, 8
| {
"content_hash": "946ee490c1631b61f511fdfaeba236a4",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 61,
"avg_line_length": 29.857142857142858,
"alnum_prop": 0.5980861244019139,
"repo_name": "Quebec-Python/atelierdebug",
"id": "d1a21edda0a234057d34b58fe5c352c67b254dd8",
"size": "211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "intermediate/ex3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1270"
},
{
"name": "Python",
"bytes": "28417"
},
{
"name": "Shell",
"bytes": "210"
}
],
"symlink_target": ""
} |
import os
import re
import hashlib
from functools import partial
class Disc:
def __init__(self, id, size, intitial_position, delay=1,
angular_velocity=1):
self.id = int(id)
self.size = int(size)
self.initial_position = int(intitial_position)
self.delay = self.id
self.angular_velocity = angular_velocity
def get_position(self, time):
return (self.angular_velocity*time + self.initial_position) % self.size
def get_position_with_delay(self, time):
return (self.angular_velocity * (time + self.delay) +
self.initial_position) % self.size
def min_time_to_0(self):
return self.size - (self.angular_velocity * self.delay + self.initial_position) % self.size
class Sculpture:
regex = re.compile(r'Disc #([0-9]+) has ([0-9]+) positions; at time=0, '
r'it is at position ([0-9]+).')
def __init__(self, description):
self.discs = []
for line in description:
match = Sculpture.regex.match(line)
if match:
self.discs.append(Disc(*match.groups()))
self.sort_discs()
def sort_discs(self):
self.discs = sorted(self.discs, key=lambda disc: disc.size)
def solve(self):
sol = None
index = 0
while sol is None:
time = self.discs[-1].min_time_to_0() + index * self.discs[-1].size
for disc in self.discs:
if disc.get_position_with_delay(time) != 0:
break
else:
return time
index += 1
if __name__ == '__main__':
dir = os.path.dirname(__file__)
file = os.path.join(dir, 'input.txt')
instructions = []
with open(file) as fd:
for line in fd:
instructions.append(line.strip())
sculpture = Sculpture(instructions)
print('Part1: ', sculpture.solve())
instructions.append('Disc #7 has 11 positions; at time=0, it is at position 0.')
sculpture = Sculpture(instructions)
print('Part 2:', sculpture.solve())
| {
"content_hash": "b30f6c9ee5504e2eb537a6431a28dfc0",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 99,
"avg_line_length": 29.39189189189189,
"alnum_prop": 0.551264367816092,
"repo_name": "bbglab/adventofcode",
"id": "cb935013c8e826e200b8359c78100ce49b3fe7de",
"size": "2175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2016/iker/day15/timing_is_everything.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "37799"
},
{
"name": "Go",
"bytes": "3094"
},
{
"name": "Haskell",
"bytes": "10240"
},
{
"name": "Jupyter Notebook",
"bytes": "13754648"
},
{
"name": "Python",
"bytes": "194710"
},
{
"name": "R",
"bytes": "18289"
},
{
"name": "Rust",
"bytes": "2682"
},
{
"name": "Shell",
"bytes": "1190"
}
],
"symlink_target": ""
} |
"""User-facing customization options to create and train a text classifier."""
import dataclasses
from typing import Optional
from mediapipe.model_maker.python.core import hyperparameters as hp
from mediapipe.model_maker.python.text.text_classifier import model_options as mo
from mediapipe.model_maker.python.text.text_classifier import model_spec as ms
@dataclasses.dataclass
class TextClassifierOptions:
"""User-facing options for creating the text classifier.
Attributes:
supported_model: A preconfigured model spec.
hparams: Training hyperparameters the user can set to override the ones in
`supported_model`.
model_options: Model options the user can set to override the ones in
`supported_model`. The model options type should be consistent with the
architecture of the `supported_model`.
"""
supported_model: ms.SupportedModels
hparams: Optional[hp.BaseHParams] = None
model_options: Optional[mo.TextClassifierModelOptions] = None
| {
"content_hash": "993ad09865758d9a13f417d043beaf00",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 81,
"avg_line_length": 39.44,
"alnum_prop": 0.7799188640973631,
"repo_name": "google/mediapipe",
"id": "a02f17347e9d33ba9bab787b63ea502e987b7e6a",
"size": "1593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mediapipe/model_maker/python/text/text_classifier/text_classifier_options.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "514"
},
{
"name": "C",
"bytes": "76928"
},
{
"name": "C++",
"bytes": "10897312"
},
{
"name": "Dockerfile",
"bytes": "2659"
},
{
"name": "HTML",
"bytes": "4090"
},
{
"name": "Java",
"bytes": "1151252"
},
{
"name": "JavaScript",
"bytes": "6380"
},
{
"name": "Makefile",
"bytes": "1625"
},
{
"name": "Objective-C",
"bytes": "125458"
},
{
"name": "Objective-C++",
"bytes": "131706"
},
{
"name": "Python",
"bytes": "1272093"
},
{
"name": "Shell",
"bytes": "19580"
},
{
"name": "Starlark",
"bytes": "1277085"
},
{
"name": "TypeScript",
"bytes": "169026"
}
],
"symlink_target": ""
} |
import xmlrpc.client
url = "http://www.pythonchallenge.com/pc/phonebook.php"
proxy = xmlrpc.client.ServerProxy(url)
print(proxy.system.listMethods())
print(proxy.system.methodHelp("phone"))
#from http://www.pythonchallenge.com/pc/return/evil4.jpg
print(proxy.phone("Bert"))
#555-ITALY
| {
"content_hash": "904730b952564f16f09145d03c31feb9",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 56,
"avg_line_length": 35.625,
"alnum_prop": 0.775438596491228,
"repo_name": "feliposz/python-challenge-solutions",
"id": "dfb8721d75496a15366f5fdef2032c10852021b9",
"size": "285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "level13-xmlrpc-phonebook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "6436"
},
{
"name": "OpenEdge ABL",
"bytes": "10004"
},
{
"name": "Python",
"bytes": "293733"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from calaccess_raw import fields
from .base import CalAccessBaseModel, DocumentCloud
from django.template.defaultfilters import floatformat
from django.utils.encoding import python_2_unicode_compatible
from django.contrib.humanize.templatetags.humanize import intcomma
@python_2_unicode_compatible
class FilernameCd(CalAccessBaseModel):
"""
A combination of CAL-ACCESS tables to provide the analyst with
filer information.
Full name of all PACs, firms, and employers are in the last
name field.
Major donors can be split between first and last name fields, but usually
are contained in the last name field only. Individual names of lobbyists,
candidates/officeholders, treasurers/responsible officers, and major donors
(when they are only an individual's name) use both the first and last name
fields in conjunction.
"""
UNIQUE_KEY = ("FILER_ID", "NAMID")
xref_filer_id = fields.CharField(
verbose_name='crossreference filer ID',
max_length=15,
db_column='XREF_FILER_ID',
db_index=True,
help_text="Alternative filer ID found on many forms"
)
DOCUMENTCLOUD_PAGES = [
DocumentCloud(id='2711615-FAQ', start_page=2)
]
filer_id = fields.IntegerField(
verbose_name='filer ID',
db_column='FILER_ID',
db_index=True,
null=True,
help_text="Filer's unique identification number"
)
FILER_TYPE_CHOICES = (
(' NOT DEFINED', 'Undefined'),
('ALL FILERS', 'All filers'),
('CANDIDATE/OFFICEHOLDER', 'Candidate/officeholder'),
('CLIENT', 'Client'),
('EMPLOYER', 'Employer'),
('FIRM', 'Firm'),
('INDIVIDUAL', 'Individual'),
('INITIATIVE', 'Initiative'),
('LOBBYIST', 'Lobbyist'),
(
'MAJOR DONOR/INDEPENDENT EXPENDITURE COMMITTEE',
'Major donor or indenpendent expenditure committee'
),
('PAYMENT TO INFLUENCE', 'Payment to influence'),
('PREPAID ACCOUNT', 'Prepaid account'),
('PROPONENT', 'Proponent'),
('PROPOSITION', 'Proposition'),
('RECIPIENT COMMITTEE', 'Recipient committee'),
('SLATE MAILER ORGANIZATIONS', 'Slate mailer organization'),
(
'TREASURER/RESPONSIBLE OFFICER',
'Treasurer/responsible officer'
)
)
filer_type = fields.CharField(
max_length=45,
db_column='FILER_TYPE',
db_index=True,
choices=FILER_TYPE_CHOICES,
help_text='The type of filer'
)
STATUS_CHOICES = (
('', 'Undefined'),
('A', ''),
('ACTIVE', ''),
('INACTIVE', ''),
('P', ''),
('R', ''),
('S', ''),
('TERMINATED', ''),
('W', ''),
)
status = fields.CharField(
max_length=10,
db_column='STATUS',
db_index=True,
choices=STATUS_CHOICES,
blank=True,
help_text='The status of the filer'
)
effect_dt = fields.DateField(
db_column='EFFECT_DT',
help_text="Effective date for status",
null=True,
)
naml = fields.CharField(
max_length=200, db_column='NAML',
help_text="Last name, sometimes full name"
)
namf = fields.CharField(
max_length=55, db_column='NAMF', blank=True,
help_text="First name"
)
namt = fields.CharField(
max_length=70, db_column='NAMT', blank=True,
help_text="Name prefix or title"
)
nams = fields.CharField(
max_length=32, db_column='NAMS', blank=True,
help_text="Name suffix"
)
adr1 = fields.CharField(
max_length=200,
db_column='ADR1',
blank=True,
help_text="First line of street address"
)
adr2 = fields.CharField(
max_length=200,
db_column='ADR2',
blank=True,
help_text="Second line of street address"
)
city = fields.CharField(
max_length=55,
db_column='CITY',
blank=True,
help_text="City address"
)
st = fields.CharField(
max_length=4,
db_column='ST',
blank=True,
verbose_name="State"
)
zip4 = fields.CharField(
max_length=10,
db_column='ZIP4',
blank=True,
help_text="ZIP Code"
)
phon = fields.CharField(
max_length=60,
db_column='PHON',
blank=True,
verbose_name="Phone",
help_text="Phone number"
)
fax = fields.CharField(
max_length=60,
db_column='FAX',
blank=True,
help_text="Fax number"
)
email = fields.CharField(
max_length=60,
db_column='EMAIL',
blank=True,
help_text="Email address"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'FILERNAME_CD'
verbose_name = 'FILERNAME_CD'
verbose_name_plural = 'FILERNAME_CD'
ordering = ("naml", "namf",)
def __str__(self):
return str(self.filer_id)
@python_2_unicode_compatible
class FilerFilingsCd(CalAccessBaseModel):
"""
Key table that links filers to their paper, key data entry, legacy,
and electronic filings. This table is used as an index to locate
filing information.
"""
UNIQUE_KEY = (
"FILER_ID",
"FILING_ID",
"FORM_ID",
"FILING_SEQUENCE"
)
filer_id = fields.IntegerField(
verbose_name='filer ID',
db_column='FILER_ID',
db_index=True,
null=True,
help_text="Filer's unique identification number"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
period_id = fields.IntegerField(
null=True,
db_column='PERIOD_ID',
blank=True,
help_text="Identifies the period when the filing was recieved."
)
FORM_ID_CHOICES = (
('E530', ''),
('F111', ''),
('F400', ''),
('F401', ''),
('F402', ''),
('F405', ''),
('F410', ''),
('F410 AT', ''),
('F410ATR', ''),
('F415', ''),
('F416', ''),
('F419', ''),
('F420', ''),
('F421', ''),
('F425', ''),
('F430', ''),
('F440', ''),
('F450', ''),
('F460', ''),
('F461', ''),
('F465', ''),
('F470', ''),
('F470S', ''),
('F480', ''),
('F490', ''),
('F495', ''),
('F496', ''),
('F497', ''),
('F498', ''),
('F500', ''),
('F501', ''),
('F501502', ''),
('F502', ''),
('F555', ''),
('F601', ''),
('F602', ''),
('F603', ''),
('F604', ''),
('F605', ''),
('F606', ''),
('F607', ''),
('F615', ''),
('F625', ''),
('F635', ''),
('F645', ''),
('F666', ''),
('F690', ''),
('F700', ''),
('F777', ''),
('F888', ''),
('F900', ''),
('F999', ''),
)
form_id = fields.CharField(
max_length=7,
db_column='FORM_ID',
db_index=True,
verbose_name='form type',
choices=FORM_ID_CHOICES,
help_text="Form identification code"
)
filing_sequence = fields.IntegerField(
db_column='FILING_SEQUENCE',
db_index=True,
help_text="Amendment number where 0 is an original filing and 1 to \
999 are amendments"
)
filing_date = fields.DateField(
db_column='FILING_DATE',
help_text="Date the filing entered into the system",
null=True
)
STATEMENT_TYPE_CHOICES = (
(0, ''),
(10001, ''),
(10002, ''),
(10003, ''),
(10004, ''),
(10005, ''),
(10006, ''),
(10007, ''),
)
stmnt_type = fields.IntegerField(
db_column='STMNT_TYPE',
verbose_name="statement type",
db_index=True,
choices=STATEMENT_TYPE_CHOICES,
help_text="Type of statement"
)
STATEMENT_STATUS_CHOICES = (
(0, ''),
(11001, ''),
(11002, ''),
(11003, ''),
)
stmnt_status = fields.IntegerField(
db_column='STMNT_STATUS',
db_index=True,
null=True,
help_text="The status of the statement. If the filing has been \
reviewed or not reviewed.",
verbose_name='statement status',
choices=STATEMENT_STATUS_CHOICES,
)
session_id = fields.IntegerField(
verbose_name='session ID',
db_column='SESSION_ID',
help_text='Legislative session identification number',
null=True,
)
user_id = fields.CharField(
max_length=12,
db_column='USER_ID',
verbose_name="User ID",
help_text="User identifier of the PRD user who logged the filing"
)
special_audit = fields.IntegerField(
null=True,
db_column='SPECIAL_AUDIT',
blank=True,
help_text="Denotes whether the filing has been audited for money \
laundering or other special condition."
)
fine_audit = fields.IntegerField(
null=True,
db_column='FINE_AUDIT',
blank=True,
help_text="Indicates whether a filing has been audited for a fine"
)
rpt_start = fields.DateField(
null=True,
db_column='RPT_START',
blank=True,
help_text="Starting date for the period the filing represents",
)
rpt_end = fields.DateField(
null=True,
db_column='RPT_END',
blank=True,
help_text="Ending date for the period the filing represents",
)
rpt_date = fields.DateField(
null=True,
db_column='RPT_DATE',
blank=True,
help_text="Date filing received",
)
FILING_TYPE_CHOICES = (
(0, '0 (Unknown)'),
(22001, 'Electronic'),
(22006, 'Cal Online'),
)
filing_type = fields.IntegerField(
db_column='FILING_TYPE',
null=True,
blank=True,
choices=FILING_TYPE_CHOICES,
help_text="The type of filing"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'FILER_FILINGS_CD'
verbose_name = 'FILER_FILINGS_CD'
verbose_name_plural = 'FILER_FILINGS_CD'
def __str__(self):
return str("%s %s" % (self.filer_id, self.filing_id))
@python_2_unicode_compatible
class FilingsCd(CalAccessBaseModel):
"""
This table is the parent table from which all links and association to
a filing are derived.
"""
UNIQUE_KEY = "FILING_ID"
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
FILING_TYPE_CHOICES = (
(22001, 'Electronic'),
(22002, 'Key data entry'),
(22003, 'Historical lobby'),
(22004, 'Historical campaign'),
(22005, 'AMS'),
(22006, 'Cal Online'),
)
filing_type = fields.IntegerField(
db_column='FILING_TYPE',
db_index=True,
choices=FILING_TYPE_CHOICES,
help_text="The type of filing"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'FILINGS_CD'
verbose_name = 'FILINGS_CD'
verbose_name_plural = 'FILINGS_CD'
def __str__(self):
return str("%s %s" % (self.filing_id, self.filing_type))
@python_2_unicode_compatible
class HdrCd(CalAccessBaseModel):
"""
Electronic filing record header data
"""
UNIQUE_KEY = ("FILING_ID", "AMEND_ID")
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
cal_ver = fields.CharField(
max_length=4,
db_column='CAL_VER',
blank=True,
help_text="CAL Version number the filing was made using"
)
ef_type = fields.CharField(
max_length=3,
db_column='EF_TYPE',
blank=True,
help_text='Electronic filing type. This will always have the \
value of "CAL".'
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
hdr_comment = fields.CharField(
max_length=200,
db_column='HDRCOMMENT',
blank=True,
verbose_name="Header comment",
help_text="Typically used for development and test filings"
)
REC_TYPE_CHOICES = (
("HDR", "HDR"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
soft_name = fields.CharField(
max_length=90,
db_column='SOFT_NAME',
blank=True,
help_text="Filing software name used to electronically file"
)
soft_ver = fields.CharField(
max_length=16,
db_column='SOFT_VER',
blank=True,
help_text="Filing software version number"
)
state_cd = fields.CharField(
max_length=2,
db_column='STATE_CD',
blank=True,
verbose_name='State code',
help_text="The state code value entered in the electronic filing"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'HDR_CD'
verbose_name = 'HDR_CD'
verbose_name_plural = 'HDR_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class HeaderCd(CalAccessBaseModel):
"""
Lookup table used to report form 460 information in the AMS.
"""
UNIQUE_KEY = ("LINE_NUMBER", "FORM_ID", "REC_TYPE")
line_number = fields.IntegerField(
db_column='LINE_NUMBER',
help_text="This field is undocumented"
)
form_id = fields.CharField(
db_column='FORM_ID',
max_length=5,
help_text="Form identification code",
verbose_name="Form ID"
)
REC_TYPE_CHOICES = (
("AP1", "AP1"),
("AP2", "AP2"),
("SMRY_HEADER", "SMRY_HEADER"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=11,
db_index=True,
choices=REC_TYPE_CHOICES,
)
section_label = fields.CharField(
db_column='SECTION_LABEL',
max_length=58,
blank=True,
help_text="This field is undocumented"
)
comments1 = fields.CharField(
db_column='COMMENTS1',
max_length=48,
blank=True,
help_text="This field is undocumented"
)
comments2 = fields.CharField(
db_column='COMMENTS2',
max_length=48,
blank=True,
help_text="This field is undocumented"
)
label = fields.CharField(
db_column='LABEL',
max_length=98,
help_text="This field is undocumented"
)
column_a = fields.IntegerField(
db_column='COLUMN_A',
blank=True,
null=True,
help_text="This field is undocumented"
)
column_b = fields.IntegerField(
db_column='COLUMN_B',
blank=True,
null=True,
help_text="This field is undocumented"
)
column_c = fields.IntegerField(
db_column='COLUMN_C',
blank=True,
null=True,
help_text="This field is undocumented"
)
show_c = fields.IntegerField(
db_column='SHOW_C',
blank=True,
null=True,
help_text="This field is undocumented"
)
show_b = fields.IntegerField(
db_column='SHOW_B',
blank=True,
null=True,
help_text="This field is undocumented"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'HEADER_CD'
verbose_name = 'HEADER_CD'
verbose_name_plural = 'HEADER_CD'
def __str__(self):
return str(self.form_id)
@python_2_unicode_compatible
class SmryCd(CalAccessBaseModel):
"""
Summary totals from filings.
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE",
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.CharField(
max_length=8,
db_column='LINE_ITEM',
db_index=True,
help_text="Line item number of this record"
)
REC_TYPE_CHOICES = (
('SMRY', 'SMRY'),
)
rec_type = fields.CharField(
max_length=4,
db_column='REC_TYPE',
db_index=True,
choices=REC_TYPE_CHOICES,
verbose_name='record type',
)
FORM_TYPE_CHOICES = (
('401A', 'Form 401 (Slate mailer organization campaign statement): \
Schedule A, payments received'),
('401B', 'Form 401 (Slate mailer organization campaign statement): \
Schedule B, payments made'),
('401B-1', 'Form 401 (Slate mailer organization campaign statement): \
Schedule B1, payments made by agent or independent contractor'),
('A', 'Form 460 (Recipient committee campaign statement): \
Schedule A, '),
('B1', 'Form 460 (Recipient committee campaign statement): \
Schedule B1, '),
('B2', 'Form 460 (Recipient committee campaign statement): \
Schedule B2, '),
('B3', 'Form 460 (Recipient committee campaign statement): \
Schedule B3, '),
('C', 'Form 460 (Recipient committee campaign statement): \
Schedule C, '),
('D', 'Form 460 (Recipient committee campaign statement): \
Schedule D, '),
('E', 'Form 460 (Recipient committee campaign statement): \
Schedule E, '),
('F', 'Form 460 (Recipient committee campaign statement): \
Schedule F, '),
('G', 'Form 460 (Recipient committee campaign statement): \
Schedule G, '),
('H', 'Form 460 (Recipient committee campaign statement): \
Schedule H, '),
('H1', 'Form 460 (Recipient committee campaign statement): \
Schedule H1, '),
('H2', 'Form 460 (Recipient committee campaign statement): \
Schedule H2, '),
('H3', 'Form 460 (Recipient committee campaign statement): \
Schedule H3, '),
('I', 'Form 460 (Recipient committee campaign statement): \
Schedule I, '),
('F401', 'Form 401 (Slate mailer organization campaign statement)'),
('F450', 'Form 450 (Recipient committee campaign statement, \
short form)'),
('F460', 'Form 460 (Recipient committee campaign statement)'),
('F461', 'Form 461 (Independent expenditure and major donor \
committee campaign statement)'),
('F465', 'Form 465 ()'),
('F625', 'Form 625 (Report of lobbying firm)'),
('F625P2', 'Form 625 (Report of lobbying firm): \
Part 2, payments received in connection with lobbying activity'),
('F625P3A', 'Form 625 (Report of lobbying firm): \
Part 3A, payments for activity expenses made in connection with \
lobbying activities'),
('F625P3B', 'Form 625 (Report of lobbying firm): \
Part 3B, payments to other lobbying firms made in connection with \
lobbying activities'),
('F635', 'Form 635 (Report of lobbyist employer and lobbying \
coalition)'),
('F635P3A', 'Form 635 (Report of lobbyist employer and lobbying \
coalition): Part 3A, payments in in-house employee lobbyists'),
('F635P3B', 'Form 635 (Report of lobbyist employer and lobbying \
coalition): Part 3B, payments to lobbying firms'),
('F635P3C', 'Form 635 (Report of lobbyist employer and lobbying \
coalition): Part 3C, activity expenses'),
('F635P3D', 'Form 635 (Report of lobbyist employer and lobbying \
coalition): Part 3D, other payments to influence legislative or \
administrative action'),
('F635P3E', 'Form 635 (Report of lobbyist employer and lobbying \
coalition): Part 3E, payments in connection with administrative testimony \
in ratemaking proceedings before the California Public Utilities Commission'),
('F645', 'Form 645 (Report of person spending $5,000 or more to \
influence legislative or administrative action)'),
('F645P2A', 'Form 645 (Report of person spending $5,000 or more to \
influence legislative or administrative action): Part 2A, activity expenses'),
('F645P2B', 'Form 645 (Report of person spending $5,000 or more to \
influence legislative or administrative action): Part 2B, \
other payments to influence legislative or administrative action'),
('F645P2C', 'Form 645 (Report of person spending $5,000 or more to \
influence legislative or administrative action): Part 2C, \
payments in connection with administrative testimony in ratemaking \
proceedings before the California Public Utilities Commission'),
('F900', 'Form 900 (Form 900 (Public Employee\'s Retirement Board \
Candidate Campaign Statement)'),
('S640', 'Form 640 (Governmental agencies reporting ther payments to \
influence legislative or administrative action attachment)'),
)
form_type = fields.CharField(
max_length=8,
db_column='FORM_TYPE',
db_index=True,
choices=FORM_TYPE_CHOICES,
help_text='Name of the source filing form or schedule'
)
amount_a = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='AMOUNT_A',
blank=True,
help_text='Summary amount from column A',
verbose_name='amount A'
)
amount_b = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='AMOUNT_B',
blank=True,
help_text='Summary amount from column B',
verbose_name='amount B'
)
amount_c = fields.DecimalField(
decimal_places=2,
null=True,
max_digits=14,
db_column='AMOUNT_C',
blank=True,
help_text='Summary amount from column C',
verbose_name='amount C'
)
elec_dt = fields.DateField(
db_column='ELEC_DT',
null=True,
blank=True,
verbose_name='election date'
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'SMRY_CD'
verbose_name = 'SMRY_CD'
verbose_name_plural = 'SMRY_CD'
ordering = ("filing_id", "-amend_id", 'form_type', "line_item")
def __str__(self):
return str(self.filing_id)
def pretty_amount_a(self):
if self.amount_a is None:
return None
return "$%s" % intcomma(floatformat(self.amount_a, 0))
pretty_amount_a.short_description = 'amount A'
def pretty_amount_b(self):
if self.amount_b is None:
return None
return "$%s" % intcomma(floatformat(self.amount_b, 0))
pretty_amount_b.short_description = 'amount B'
def pretty_amount_c(self):
if self.amount_c is None:
return None
return "$%s" % intcomma(floatformat(self.amount_c, 0))
pretty_amount_c.short_description = 'amount C'
@python_2_unicode_compatible
class CvrE530Cd(CalAccessBaseModel):
"""
This table method is undocumented.
"""
UNIQUE_KEY = ("FILING_ID", "AMEND_ID")
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
REC_TYPE_CHOICES = (
("CVR", "CVR"),
)
rec_type = fields.CharField(
verbose_name='record type',
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
)
FORM_TYPE_CHOICES = (
('E530', 'Form 530 (Issue advocacy report)'),
)
form_type = fields.CharField(
db_column='FORM_TYPE',
max_length=4,
db_index=True,
help_text='Name of the source filing form or schedule',
choices=FORM_TYPE_CHOICES,
)
ENTITY_CODE_CHOICES = (
# Defined here:
# http://www.documentcloud.org/documents/1308003-cal-access-cal-format.html#document/p9
('', 'Unknown'),
)
entity_cd = fields.CharField(
db_column='ENTITY_CD',
max_length=32,
blank=True,
verbose_name='entity code',
choices=ENTITY_CODE_CHOICES
)
filer_naml = fields.CharField(
db_column='FILER_NAML',
max_length=200,
help_text="Filer last name"
)
filer_namf = fields.CharField(
db_column='FILER_NAMF',
max_length=4,
blank=True,
help_text="Filer first name"
)
filer_namt = fields.CharField(
db_column='FILER_NAMT',
max_length=32,
blank=True,
help_text="Filer title or prefix"
)
filer_nams = fields.CharField(
db_column='FILER_NAMS',
max_length=32,
blank=True,
help_text="Filer suffix"
)
report_num = fields.CharField(
db_column='REPORT_NUM',
max_length=32,
blank=True,
help_text="This field is undocumented"
)
rpt_date = fields.DateField(
db_column='RPT_DATE',
null=True,
help_text="This field is undocumented"
)
filer_city = fields.CharField(
db_column='FILER_CITY',
max_length=16,
blank=True,
help_text='Filer city'
)
filer_st = fields.CharField(
db_column='FILER_ST',
max_length=4,
blank=True,
verbose_name='Filer state'
)
filer_zip4 = fields.CharField(
db_column='FILER_ZIP4',
max_length=10,
blank=True,
help_text='Filer ZIP Code'
)
occupation = fields.CharField(
db_column='OCCUPATION',
max_length=15,
blank=True,
help_text="This field is undocumented"
)
employer = fields.CharField(
db_column='EMPLOYER',
max_length=13,
blank=True,
help_text="This field is undocumented"
)
cand_naml = fields.CharField(
db_column='CAND_NAML',
max_length=46,
help_text="Candidate last name"
)
cand_namf = fields.CharField(
db_column='CAND_NAMF',
max_length=21,
blank=True,
help_text="Candidate first name"
)
cand_namt = fields.CharField(
db_column='CAND_NAMT',
max_length=32,
blank=True,
help_text="Candidate title or prefix"
)
cand_nams = fields.CharField(
db_column='CAND_NAMS',
max_length=32,
blank=True,
help_text="Candidate suffix"
)
district_cd = fields.IntegerField(
db_column='DISTRICT_CD',
help_text="This field is undocumented"
)
OFFICE_CODE_CHOICES = (
(30001, "PRESIDENT"),
(30002, "GOVERNOR"),
(30003, "LIEUTENANT GOVERNOR"),
(30004, "SECRETARY OF STATE"),
(30005, "CONTROLLER"),
(30006, "TREASURER"),
(30007, "ATTORNEY GENERAL"),
(30008, "SUPERINTENDENT OF PUBLIC INSTRUCTION"),
(30009, "MEMBER BOARD OF EQUALIZATION"),
(30010, "OXNARD HARBOR COMMISSIONER"),
(30011, "CITY CONTROLLER"),
(30012, "STATE SENATE"),
(30013, "ASSEMBLY"),
(30014, "INSURANCE COMMISSIONER"),
(30015, "JUDGE"),
(30016, "BOARD MEMBER"),
(30017, "TAX COLLECTOR"),
(30018, "TRUSTEE"),
(30019, "SUPERVISOR"),
(30020, "SHERIFF"),
(30021, "CORONER"),
(30022, "MARSHALL"),
(30023, "CITY CLERK"),
(30024, "SCHOOL BOARD"),
(30025, "HARBOR COMMISSIONER"),
(30026, "DISTRICT ATTORNEY"),
(30027, "COUNTY CLERK"),
(30028, "AUDITOR"),
(30029, "MAYOR"),
(30030, "CITY ATTORNEY"),
(30031, "DEMOCRATIC COUNTY CENTRAL COMMITTEE"),
(30032, "TOWN COUNCIL"),
(30033, "ASSESSOR"),
(30034, "CITY TREASURER"),
(30035, "CITY COUNCIL"),
(30036, "COMMISSIONER"),
(30037, "REPUBLICAN COUNTY CENTRAL COMMITTEE"),
(30038, "DIRECTOR"),
(30039, "DIRECTOR OF ZONE 7"),
(30040, "COMMUNITY COLLEGE BOARD"),
(30041, "POLICE CHIEF"),
(30042, "CHIEF OF POLICE"),
(30043, "CENTRAL COMMITTEE"),
(30044, "BOARD OF EDUCATION"),
(30045, "BOARD OF DIRECTORS"),
(30046, "COLLEGE BOARD"),
(30047, "BART BOARD DIRECTOR"),
(30048, "BOARD OF TRUSTEES"),
(30049, "IRRIGATION"),
(30050, "WATER BOARD"),
(30051, "COMMUNITY PLANNING GROUP"),
(30052, "BOARD OF SUPERVISORS"),
(30053, "SUPERIOR COURT JUDGE"),
(30054, "DISTRICT ATTORNEY/PUBLIC DEFENDER"),
(30055, "MEASURE"),
(30056, "CITY PROSECUTOR"),
(30057, "SUPREME COURT JUDGE"),
(30058, "PUBLIC EMPLOYEES RETIREMENT BOARD"),
(30059, "APPELLATE COURT JUDGE"),
(50001, "Ag"),
(50002, "Assembly"),
(50003, "Assessor"),
(50004, "Assessor/Clerk/Recorder"),
(50005, "Assessor/County Clerk/Recorder"),
(50006, "Assessor/Recorder"),
(50007, "Associate Justice"),
(50008, "Auditor"),
(50009, "Auditor/Controller"),
(50010, "Auditor/Controller/Clerk/Recorder"),
(50011, "Auditor/Controller/Recorder"),
(50012, "Auditor/Controller/Treasurer/Tax Collector"),
(50013, "Auditor/Recorder"),
(50014, "Board Member"),
(50015, "Board Of Director"),
(50016, "Board Of Supervisor"),
(50017, "Boe"),
(50018, "Chief Justice"),
(50019, "City"),
(50020, "City Attorney"),
(50021, "City Auditor"),
(50022, "City Clerk"),
(50023, "City Council"),
(50024, "City Of Los Angeles"),
(50025, "City Of South El Monte"),
(50026, "City Prosecutor"),
(50027, "City Treasurer"),
(50028, "Clerk/Auditor"),
(50029, "Clerk/Record/Public Admin"),
(50030, "Clerk/Recorder"),
(50031, "Clerk/Recorder/Registar"),
(50032, "Clerk/Recorder/Registrar"),
(50033, "Commissioner"),
(50034, "Controller"),
(50035, "Costa Mesa"),
(50036, "Council Member"),
(50037, "County Clerk"),
(50038, "County Clerk/Auditor"),
(50039, "County Clerk/Auditor/Controller"),
(50040, "County Clerk/Recorder"),
(50041, "County Clerk/Recorder/Assessor"),
(50042, "County Clerk/Recorder/Public Admin"),
(50043, "Democratic County Central Committee"),
(50044, "Director"),
(50045, "District Attorney"),
(50046, "District Attorney/Public Administrator"),
(50047, "Gccc"),
(50048, "Governor"),
(50049, "Harbor Commissioner"),
(50050, "Ic"),
(50051, "Irrigation Dist"),
(50052, "Judge"),
(50053, "Justice"),
(50054, "Legislature"),
(50055, "Lieutenant Governor"),
(50056, "Mayor"),
(50057, "N/A"),
(50058, "Placentia"),
(50059, "Public Administrator"),
(50060, "Public Administrator/Guardian"),
(50061, "Rent Stabilization Board"),
(50062, "Republican Central Committee"),
(50063, "San Francisco Dccc"),
(50064, "Sanger"),
(50065, "School Board"),
(50066, "Secretary Of State"),
(50067, "Senator"),
(50068, "Sheriff"),
(50069, "Sheriff/Coroner"),
(50070, "Sheriff/Coroner/Marshall"),
(50071, "Sheriff/Coroner/Public Administrator"),
(50072, "Solana Beach"),
(50073, "Superintendent"),
(50074, "Supervisor"),
(50075, "Supt Of Schools"),
(50076, "Tax Collector"),
(50077, "Town Council"),
(50078, "Treasurer"),
(50079, "Treasurer/Tax Collector"),
(50080, "Treasurer/Tax Collector/Clerk"),
(50081, "Treasurer/Tax Collector/Public Administrator"),
(50082, "Treasurer/Tax Collector/Public Administrator/County Clerk"),
(50083, "Treasurer/Tax Collector/Recorder"),
(50084, "Trustee"),
(50085, "Weed Recreation Board Member"),
)
office_cd = fields.IntegerField(
db_column='OFFICE_CD',
verbose_name="office code",
help_text="Identifies the office being sought",
choices=OFFICE_CODE_CHOICES
)
pmnt_dt = fields.DateField(
db_column='PMNT_DT',
null=True,
help_text="This field is undocumented"
)
pmnt_amount = fields.FloatField(
db_column='PMNT_AMOUNT',
help_text="This field is undocumented"
)
type_literature = fields.IntegerField(
db_column='TYPE_LITERATURE',
help_text="This field is undocumented"
)
type_printads = fields.IntegerField(
db_column='TYPE_PRINTADS',
help_text="This field is undocumented"
)
type_radio = fields.IntegerField(
db_column='TYPE_RADIO',
help_text="This field is undocumented"
)
type_tv = fields.IntegerField(
db_column='TYPE_TV',
help_text="This field is undocumented"
)
type_it = fields.IntegerField(
db_column='TYPE_IT',
help_text="This field is undocumented"
)
type_billboards = fields.IntegerField(
db_column='TYPE_BILLBOARDS',
help_text="This field is undocumented"
)
type_other = fields.IntegerField(
db_column='TYPE_OTHER',
help_text="This field is undocumented"
)
other_desc = fields.CharField(
db_column='OTHER_DESC',
max_length=49,
help_text="This field is undocumented"
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'CVR_E530_CD'
verbose_name = 'CVR_E530_CD'
verbose_name_plural = 'CVR_E530_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class SpltCd(CalAccessBaseModel):
"""
Split records
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"PFORM_TYPE"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
elec_amount = fields.DecimalField(
max_digits=16,
decimal_places=2,
db_column='ELEC_AMOUNT',
help_text="This field is undocumented"
)
elec_code = fields.CharField(
max_length=2,
db_column='ELEC_CODE',
blank=True,
help_text='This field is undocumented',
)
elec_date = fields.DateField(
db_column='ELEC_DATE',
null=True,
help_text="This field is undocumented"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
PFORM_TYPE_CHOICES = (
('A', ''),
('B1', ''),
('B2', ''),
('C', ''),
('D', ''),
('F450P5', ''),
('H', ''),
)
pform_type = fields.CharField(
max_length=7,
db_column='PFORM_TYPE',
db_index=True,
choices=PFORM_TYPE_CHOICES,
help_text='This field is undocumented',
)
ptran_id = fields.CharField(
verbose_name='transaction ID',
max_length=32,
db_column='PTRAN_ID',
blank=True,
help_text='Permanent value unique to this item',
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'SPLT_CD'
verbose_name = 'SPLT_CD'
verbose_name_plural = 'SPLT_CD'
def __str__(self):
return str(self.filing_id)
@python_2_unicode_compatible
class TextMemoCd(CalAccessBaseModel):
"""
Text memos attached to electronic filings
"""
UNIQUE_KEY = (
"FILING_ID",
"AMEND_ID",
"LINE_ITEM",
"REC_TYPE",
"FORM_TYPE"
)
filing_id = fields.IntegerField(
db_column='FILING_ID',
db_index=True,
verbose_name='filing ID',
help_text="Unique filing identificiation number"
)
amend_id = fields.IntegerField(
db_column='AMEND_ID',
db_index=True,
help_text="Amendment identification number. A number of 0 is the \
original filing and 1 to 999 amendments.",
verbose_name="amendment ID"
)
line_item = fields.IntegerField(
db_column='LINE_ITEM',
help_text="Line item number of this record",
db_index=True,
)
REC_TYPE_CHOICES = (
('i', 'i'),
('MEMO', 'MEMO'),
('TEXT', 'TEXT'),
('trun', 'trun'),
('Unde', 'Unde'),
)
rec_type = fields.CharField(
db_column='REC_TYPE',
max_length=4,
db_index=True,
choices=REC_TYPE_CHOICES,
verbose_name='record type'
)
FORM_TYPE_CHOICES = (
(' E', ''),
('410', ''),
('460', ''),
('461', ''),
('465', ''),
('496', ''),
('497', ''),
('497P1', ''),
('497P2', ''),
('A', ''),
('A4', ''),
('A6', ''),
('B', ''),
('B1', ''),
('B2', ''),
('B3', ''),
('C', ''),
('COMMENTS', ''),
('CVR', ''),
('D', ''),
('DEBTF', ''),
('E', ''),
('EXPNT', ''),
('F', ''),
('F401', ''),
('F401A', ''),
('F401B', ''),
('F401B-1', ''),
('F405', ''),
('F410', ''),
('F425', ''),
('F450', ''),
('F450P5', ''),
('F460', ''),
('F461', ''),
('F461P1', ''),
('F461P2', ''),
('F461P5', ''),
('F465', ''),
('F465P3', ''),
('F496', ''),
('F496P3', ''),
('F497', ''),
('F497P1', ''),
('F497P2', ''),
('F498-A', ''),
('F498-R', ''),
('F601', ''),
('F601P2A', ''),
('F601P2B', ''),
('F602', ''),
('F603', ''),
('F604', ''),
('F605', ''),
('F606', ''),
('F607', ''),
('F615', ''),
('F615P1', ''),
('F615P2', ''),
('F625', ''),
('F625P2', ''),
('F625P3A', ''),
('F625P3B', ''),
('F625P4B', ''),
('F635', ''),
('F635P3B', ''),
('F635P3C', ''),
('F635P4B', ''),
('F645', ''),
('F645P2A', ''),
('F645P3B', ''),
('G', ''),
('H', ''),
('H1', ''),
('H2', ''),
('H3', ''),
('I', ''),
('PT5', ''),
('RCPTB1', ''),
('RCPTC', ''),
('RCPTI', ''),
('S497', ''),
('S630', ''),
('S635-C', ''),
('S635C', ''),
('S640', ''),
('SCH A', ''),
('SF', ''),
('SMRY', ''),
('SPLT', ''),
('SUM', ''),
('SUMMARY', ''),
)
form_type = fields.CharField(
db_column='FORM_TYPE',
max_length=8,
help_text='Name of the source filing form or schedule',
db_index=True,
choices=FORM_TYPE_CHOICES
)
ref_no = fields.CharField(
db_column='REF_NO',
max_length=20,
blank=True,
help_text='Links text memo to a specific record',
verbose_name='reference number'
)
text4000 = fields.CharField(
db_column='TEXT4000',
max_length=4000,
blank=True,
help_text='Contents of the text memo',
verbose_name='text'
)
class Meta:
app_label = 'calaccess_raw'
db_table = 'TEXT_MEMO_CD'
verbose_name = 'TEXT_MEMO_CD'
verbose_name_plural = 'TEXT_MEMO_CD'
def __str__(self):
return str(self.filing_id)
| {
"content_hash": "1453a06605764a9ca3cdb4fea9e0d805",
"timestamp": "",
"source": "github",
"line_count": 1374,
"max_line_length": 95,
"avg_line_length": 29.71834061135371,
"alnum_prop": 0.5464207871084662,
"repo_name": "jsfenfen/django-calaccess-raw-data",
"id": "b6b35f1d558a217501b7d43b46ffdf3665ede59f",
"size": "40879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calaccess_raw/models/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "933"
},
{
"name": "Python",
"bytes": "515866"
}
],
"symlink_target": ""
} |
import keyword as kw
from .repr import ReprPrinter
from .str import StrPrinter
# A list of classes that should be printed using StrPrinter
STRPRINT = ('Add', 'Infinity', 'Integer', 'Mul', 'NegativeInfinity',
'Pow', 'Zero')
class PythonPrinter(ReprPrinter, StrPrinter):
"""A printer which converts an expression into its Python interpretation."""
def __init__(self, settings=None):
ReprPrinter.__init__(self)
StrPrinter.__init__(self, settings)
self.symbols = []
self.functions = []
# Create print methods for classes that should use StrPrinter instead
# of ReprPrinter.
for name in STRPRINT:
f_name = f'_print_{name}'
f = getattr(StrPrinter, f_name)
setattr(PythonPrinter, f_name, f)
def _print_Function(self, expr):
import diofant
func = expr.func.__name__
if not hasattr(diofant, func) and func not in self.functions:
self.functions.append(func)
return StrPrinter._print_Function(self, expr)
# procedure (!) for defining symbols which have be defined in python()
def _print_Symbol(self, expr):
symbol = self._str(expr)
if symbol not in self.symbols:
self.symbols.append(symbol)
return StrPrinter._print_Symbol(self, expr)
_print_BaseSymbol = StrPrinter._print_BaseSymbol
def python(expr, **settings):
"""Return Python interpretation of passed expression
(can be passed to the exec() function without any modifications)
"""
from ..core import Function, Symbol
printer = PythonPrinter(settings)
exprp = printer.doprint(expr)
result = ''
# Returning found symbols and functions
renamings = {}
for symbolname in printer.symbols:
newsymbolname = symbolname
# Escape symbol names that are reserved python keywords
if kw.iskeyword(newsymbolname):
while True:
newsymbolname += '_'
if (newsymbolname not in printer.symbols and
newsymbolname not in printer.functions):
renamings[Symbol(symbolname)] = Symbol(newsymbolname)
break
result += newsymbolname + " = Symbol('" + symbolname + "')\n"
for functionname in printer.functions:
newfunctionname = functionname
# Escape function names that are reserved python keywords
if kw.iskeyword(newfunctionname):
while True:
newfunctionname += '_'
if (newfunctionname not in printer.symbols and
newfunctionname not in printer.functions):
renamings[Function(functionname)] = Function(newfunctionname)
break
result += newfunctionname + " = Function('" + functionname + "')\n"
if len(renamings) != 0:
exprp = expr.subs(renamings)
result += 'e = ' + printer._str(exprp)
return result
| {
"content_hash": "eaac854afa9bb3b0a774d1d23b705a98",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 81,
"avg_line_length": 35.89156626506024,
"alnum_prop": 0.6153071500503524,
"repo_name": "diofant/diofant",
"id": "fd488177e2bbbc27995d8cee62a17506c25b94dc",
"size": "2979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diofant/printing/python.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9063539"
}
],
"symlink_target": ""
} |
"""
http://adventofcode.com/day/11
Part 1
------
Santa's previous password expired, and he needs help choosing a new
one.
To help him remember his new password after the old one expires,
Santa has devised a method of coming up with a password based on
the previous one. Corporate policy dictates that passwords must be
exactly eight lowercase letters (for security reasons), so he finds
his new password by incrementing his old password string repeatedly
until it is valid.
Incrementing is just like counting with numbers: xx, xy, xz, ya,
yb, and so on. Increase the rightmost letter one step; if it was
z, it wraps around to a, and repeat with the next letter to the
left until one doesn't wrap around.
Unfortunately for Santa, a new Security-Elf recently started, and
he has imposed some additional password requirements:
- Passwords must include one increasing straight of at least
three letters, like abc, bcd, cde, and so on, up to xyz. They
cannot skip letters; abd doesn't count.
- Passwords may not contain the letters i, o, or l, as these
letters can be mistaken for other characters and are therefore
confusing.
- Passwords must contain at least two different, non-overlapping
pairs of letters, like aa, bb, or zz.
For example:
- hijklmmn meets the first requirement (because it contains the
straight hij) but fails the second requirement requirement
(because it contains i and l).
- abbceffg meets the third requirement (because it repeats bb
and ff) but fails the first requirement.
- abbcegjk fails the third requirement, because it only has one
double letter (bb).
- The next password after abcdefgh is abcdffaa.
- The next password after ghijklmn is ghjaabcc, because you
eventually skip all the passwords that start with ghi...,
since i is not allowed.
Given Santa's current password (your puzzle input), what should
his next password be?
Part 2
------
Santa's password expired again. What's the next one?
"""
from __future__ import print_function, unicode_literals
import os
import re
import sys
INFILE = 'inputs/input11.txt'
MINLENGTH = 8
MAXLENGTH = MINLENGTH
VALIDCHARS = 'abcdefghijklmnopqrstuvwxyz'
VALID = re.compile('^[a-z]+$')
BAD = re.compile(r'[ilo]')
double = [ch * 2 for ch in VALIDCHARS]
DOUBLE = re.compile('|'.join(double))
straights = [VALIDCHARS[i:i + 3] for i in xrange(len(VALIDCHARS) - 2)]
STRAIGHTS = re.compile('|'.join(straights))
def check(password):
valid = True
valid = valid and len(password) >= MINLENGTH
valid = valid and len(password) <= MAXLENGTH
valid = valid and VALID.match(password)
valid = valid and len(STRAIGHTS.findall(password)) == 1
valid = valid and not BAD.match(password)
valid = valid and len(DOUBLE.findall(password)) >= 2
return valid
def next(password):
chars = [c for c in password]
# Temporarily reverse the string so we can go
# left to right
chars.reverse()
# Increment characters
for i, char in enumerate(chars):
try:
current = VALIDCHARS.index(char)
chars[i] = VALIDCHARS[current + 1]
except IndexError:
chars[i] = VALIDCHARS[0]
else:
# If we were able to increment a letter, we
# don't need to move on to the next column
break
# Reverse the string again to make it normal
chars.reverse()
return ''.join(chars)
def make(current_password):
new_password = next(current_password)
while not check(new_password):
new_password = next(new_password)
return new_password
def main():
current_password = None
with open(INFILE) as f:
for line in f:
current_password = line.strip()
if current_password is not None:
# Part 1
new_password = make(current_password)
msg = '[Python] Puzzle 11-1: {}'
print(msg.format(new_password))
# Part 2
new_password = make(new_password)
msg = '[Python] Puzzle 11-2: {}'
print(msg.format(new_password))
if __name__ == '__main__':
main()
| {
"content_hash": "02378d1daec48db44956685b15ee8d4a",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 70,
"avg_line_length": 30.24264705882353,
"alnum_prop": 0.6824702163870654,
"repo_name": "rnelson/adventofcode",
"id": "efa242e5e0361d49bd5d2c0b56a0952d0f284188",
"size": "4135",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "advent2015/day11.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "186911"
},
{
"name": "C++",
"bytes": "89268"
},
{
"name": "CMake",
"bytes": "2230"
},
{
"name": "F#",
"bytes": "1768"
},
{
"name": "Fortran",
"bytes": "20755"
},
{
"name": "Kotlin",
"bytes": "15845"
},
{
"name": "Python",
"bytes": "102898"
}
],
"symlink_target": ""
} |
from metadata.models import TanitJobsCategory
from django.core.management.base import BaseCommand
CHOICES = [
'Tous secteurs',
'Informatique',
"Centres d'appels",
'Industrie',
'Ingenierie',
"Technologie de l'information",
'Commerce',
'Formation',
'Marketing',
'Télécommunications',
'Vente',
'Transport',
'Stratégie-Planification',
'Science',
'Commerce de détail',
'Restauration',
'Recherche',
'Immobilier',
'Controle Qualite',
'Achat - Approvisionnement',
'Pharmaceutiques',
'Services a la clientele',
'Media-Journalisme',
'Gestion',
'Juridique',
'Assurances',
'Installation-Entretien-Reparation',
'Ressources humaines',
'Sante',
'Fonction publique',
'Services veterinaires',
'Finance',
'Enseignement',
'Distribution',
'Design',
'Consulting',
'Construction',
'Developpement des affaires',
'Biotechnologie',
'Banque',
'Automobile',
'Administration',
'Comptabilite',
'Autres',
]
class Command(BaseCommand):
help = 'Fill the Database with the interests details'
def handle(self, **options):
objects = []
for name in CHOICES:
print name
objects.append(TanitJobsCategory(name=name))
TanitJobsCategory.objects.bulk_create(objects)
print 'Tanit Jobs Category saved'
| {
"content_hash": "631236627c5f16a688b2818279acf7f9",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 57,
"avg_line_length": 23,
"alnum_prop": 0.6279401282965075,
"repo_name": "firasbenmakhlouf/JobLookup",
"id": "ceb44ed6eee81e1c2e76f7a456d0d39a1e853c2d",
"size": "1431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metadata/management/commands/insert_category.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22"
},
{
"name": "CSS",
"bytes": "598248"
},
{
"name": "CoffeeScript",
"bytes": "19705"
},
{
"name": "HTML",
"bytes": "91510"
},
{
"name": "JavaScript",
"bytes": "35117"
},
{
"name": "Python",
"bytes": "55599"
}
],
"symlink_target": ""
} |
import pyat.sync
import time
import random
from .util import a_task, a_failing_task, make_args, make_kwargs, \
FailedTaskException
MIN_TIME = 1.0
MAX_TIME = 2.0
DELTA_TIME = MAX_TIME - MIN_TIME
STEP_TIME = 0.1
TIMEOUT_DELAY = 0.5
def test_future():
'''
Test a task scheduled N seconds in the future is executed close to the
requested time.
'''
# Create scheduler
scheduler = pyat.sync.SynchronousTaskScheduler()
# Pick some random arguments
args = make_args()
kwargs = make_kwargs()
# Pick a time between 1-5 seconds in the future
delay = MIN_TIME + (random.random() * DELTA_TIME)
at_time = time.time() + delay
# Schedule task
task = scheduler.schedule(at_time, a_task, *args, **kwargs)
# Pick a timeout
timeout = at_time + TIMEOUT_DELAY
# Wait for timeout
while time.time() < timeout:
time.sleep(STEP_TIME)
scheduler.poll()
# Did our function get run?
try:
(run_at, run_args, run_kwargs) = task.result
assert run_args == args, 'args does not match'
assert run_kwargs == kwargs, 'kwargs does not match'
assert run_at > at_time, 'Ran too early'
assert run_at < (at_time + STEP_TIME), 'Ran too late'
except pyat.sync.NotExecutedYet:
assert False, 'Did not get executed'
def test_future_exception():
'''
Test a task scheduled N seconds in the future that fails.
'''
# Create scheduler
scheduler = pyat.sync.SynchronousTaskScheduler()
# Pick some random arguments
args = make_args()
kwargs = make_kwargs()
# Pick a time between 1-5 seconds in the future
delay = MIN_TIME + (random.random() * DELTA_TIME)
at_time = time.time() + delay
# Schedule task
task = scheduler.schedule(at_time, a_failing_task, *args, **kwargs)
# Pick a timeout
timeout = at_time + TIMEOUT_DELAY
# Wait for timeout
while time.time() < timeout:
time.sleep(STEP_TIME)
scheduler.poll()
# Did our function get run?
try:
(run_at, run_args, run_kwargs) = task.result
assert False, 'Did not fail'
except FailedTaskException as e:
(msg, run_at, run_args, run_kwargs) = e.args
assert run_args == args, 'args does not match'
assert run_kwargs == kwargs, 'kwargs does not match'
assert run_at > at_time, 'Ran too early'
assert run_at < (at_time + STEP_TIME), 'Ran too late'
except pyat.sync.NotExecutedYet:
assert False, 'Did not get executed'
def test_future_cancelled():
'''
Test a task scheduled N seconds in the future then cancelled doesn't
execute.
'''
# Create scheduler
scheduler = pyat.sync.SynchronousTaskScheduler()
# Pick some random arguments
args = make_args()
kwargs = make_kwargs()
# Pick a time between 1-5 seconds in the future
delay = MIN_TIME + (random.random() * DELTA_TIME)
at_time = time.time() + delay
# Schedule task
task = scheduler.schedule(at_time, a_task, *args, **kwargs)
# Poll once
scheduler.poll()
# Cancel the task
task.cancel()
assert task.cancelled, 'Not cancelled'
# Pick a timeout
timeout = at_time + TIMEOUT_DELAY
# Wait for timeout
while time.time() < timeout:
time.sleep(STEP_TIME)
scheduler.poll()
# Did our function get run?
try:
(run_at, run_args, run_kwargs) = task.result
assert False, 'Task executed'
except pyat.sync.NotExecutedYet:
pass
def test_cancel_all():
'''
Test we can cancel all tasks.
'''
# Create scheduler
scheduler = pyat.sync.SynchronousTaskScheduler()
# Pick some random arguments
args = make_args()
kwargs = make_kwargs()
# Pick a time between 1-5 seconds in the future
delay = MIN_TIME + (random.random() * DELTA_TIME)
at_time = time.time() + delay
# Schedule tasks
task1 = scheduler.schedule(at_time, a_task, *args, **kwargs)
task2 = scheduler.schedule(at_time + 1, a_task, *args, **kwargs)
task3 = scheduler.schedule(at_time + 2, a_task, *args, **kwargs)
# Poll once
scheduler.poll()
# Cancel all tasks
scheduler.cancel_all()
# Pick a timeout
timeout = at_time + TIMEOUT_DELAY
# Wait for timeout
while time.time() < timeout:
time.sleep(STEP_TIME)
scheduler.poll()
# Did our functions get run?
for task in (task1, task2, task3):
try:
(run_at, run_args, run_kwargs) = task.result
assert False, 'Task executed'
except pyat.sync.NotExecutedYet:
pass
def test_not_yet_executed():
'''
Test NotYetExecuted gets raised if we ask the task early.
'''
# Create scheduler
scheduler = pyat.sync.SynchronousTaskScheduler()
# Pick some random arguments
args = make_args()
kwargs = make_kwargs()
# Pick a time between 1-5 seconds in the future
delay = MIN_TIME + (random.random() * DELTA_TIME)
at_time = time.time() + delay
# Schedule task
task = scheduler.schedule(at_time, a_task, *args, **kwargs)
# Poll once
scheduler.poll()
# Did our function get run?
try:
(run_at, run_args, run_kwargs) = task.result
assert False, 'Task executed early'
except pyat.sync.NotExecutedYet:
pass
| {
"content_hash": "cc5d112eb7cf353f8b02a37fa58a8fae",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 74,
"avg_line_length": 26.28921568627451,
"alnum_prop": 0.6212940518366585,
"repo_name": "vrtsystems/pyat",
"id": "3b6685eddc9969b95240d17672e35ec671bb9426",
"size": "5484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sync_tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "12645"
}
],
"symlink_target": ""
} |
exec(open('parse.py').read())
Node = dict
Leaf = str
def typeExpression(env, e):
if type(e) == Leaf:
pass # Complete base cases for booleans for Problem #3.
if type(e) == Node:
for label in e:
children = e[label]
if label == 'Number':
return 'TyNumber'
elif label == 'Variable':
pass # Complete case for 'Variable' for Problem #3.
elif label == 'Element':
pass # Complete case for 'Variable' for Problem #3.
elif label == 'Plus':
pass # Complete case for 'Plus' for Problem #3.
def typeProgram(env, s):
if type(s) == Leaf:
if s == 'End':
return 'TyVoid'
elif type(s) == Node:
for label in s:
if label == 'Print':
[e, p] = s[label]
pass # Complete case(s) for 'Print' for Problem #3.
if label == 'Assign':
[xTree, e0, e1, e2, p] = s[label]
x = xTree['Variable'][0]
pass # Complete case(s) for 'Assign' for Problem #3.
if label == 'Loop':
[xTree, nTree, p1, p2] = s[label]
x = xTree['Variable'][0]
n = nTree['Number'][0]
pass # Complete case for 'Loop' for Problem #3.
#eof | {
"content_hash": "39e36b5f016d49bc747b89c60f037ae6",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 68,
"avg_line_length": 29.23913043478261,
"alnum_prop": 0.47137546468401487,
"repo_name": "lapets/pylium",
"id": "867efb97b83150f48364de04171584a89ccd6e29",
"size": "1689",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "course/320-2015-fal/midterm/analyze.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "490"
},
{
"name": "Shell",
"bytes": "87"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('passive_data_kit', '0050_datasourcereference'),
]
operations = [
migrations.AddField(
model_name='datapoint',
name='source_reference',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='data_points', to='passive_data_kit.DataSourceReference'),
),
]
| {
"content_hash": "98699c8a515f3fedf3442695352284a8",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 176,
"avg_line_length": 30.41176470588235,
"alnum_prop": 0.6576402321083172,
"repo_name": "audaciouscode/PassiveDataKit-Django",
"id": "c89316012a88f942733289d7b58166533168a655",
"size": "613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/0051_datapoint_source_reference.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1703"
},
{
"name": "HTML",
"bytes": "98679"
},
{
"name": "JavaScript",
"bytes": "161391"
},
{
"name": "Python",
"bytes": "426910"
}
],
"symlink_target": ""
} |