repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,222,525,072B
| line_mean
float64 6.51
99.8
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
yuanyelele/solfege | solfege/tracebackwindow.py | 1 | 4872 | # GNU Solfege - free ear training software
# Copyright (C) 2000, 2001, 2002, 2003, 2004, 2007, 2008, 2011 Tom Cato Amundsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import sys
from gi.repository import Gtk
from solfege import gu
from solfege import reportbug
class TracebackWindow(Gtk.Dialog):
def __init__(self, show_gtk_warnings):
Gtk.Dialog.__init__(self)
self.m_show_gtk_warnings = show_gtk_warnings
self.set_default_size(630, 400)
self.vbox.set_border_width(8)
label = Gtk.Label(label=_("GNU Solfege message window"))
label.set_name('Heading2')
self.vbox.pack_start(label, False, False, 0)
label = Gtk.Label(label=_("Please report this to the bug database or send an email to bug-solfege@gnu.org if the content of the message make you believe you have found a bug."))
label.set_line_wrap(True)
self.vbox.pack_start(label, False, False, 0)
scrollwin = Gtk.ScrolledWindow()
scrollwin.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.vbox.pack_start(scrollwin, True, True, 0)
self.g_text = Gtk.TextView()
scrollwin.add(self.g_text)
self.g_report = Gtk.Button()
self.g_report.connect('clicked', self.do_report)
box = Gtk.HBox()
self.g_report.add(box)
im = Gtk.Image.new_from_stock('gtk-execute', Gtk.IconSize.BUTTON)
box.pack_start(im, True, True, 0)
label = Gtk.Label()
label.set_text_with_mnemonic(gu.escape(_('_Make automatic bug report')))
label.set_use_markup(True)
box.pack_start(label, True, True, 0)
self.action_area.pack_start(self.g_report, True, True, 0)
self.g_close = Gtk.Button(stock='gtk-close')
self.action_area.pack_start(self.g_close, True, True, 0)
self.g_close.connect('clicked', lambda w: self.hide())
def do_report(self, *v):
yesno = gu.dialog_yesno(_(
"Automatic bug reports are often mostly useless because "
"people omit their email address and add very little info "
"about what happened. Fixing bugs is difficult if we "
"cannot contact you and ask for more information.\n\n"
"I would prefer if you open a web browser and report your "
"bug to the bug tracker at http://bugs.solfege.org.\n\n"
"This will give your bug report higher priority and it "
"will be fixed faster.\n\nAre you willing to do that?"))
if yesno:
return
self.m_send_exception = 'Nothing'
b = self.g_text.get_buffer()
d = reportbug.ReportBugWindow(
self, b.get_text(b.get_start_iter(),
b.get_end_iter(), False))
while 1:
ret = d.run()
if ret in (Gtk.ResponseType.REJECT, Gtk.ResponseType.DELETE_EVENT):
break
elif ret == reportbug.RESPONSE_SEND:
self.m_send_exception = d.send_bugreport()
break
if self.m_send_exception != 'Nothing':
if self.m_send_exception:
m = Gtk.MessageDialog(self, Gtk.DialogFlags.MODAL,
Gtk.MessageType.ERROR, Gtk.ButtonsType.CLOSE,
"Sending bugreport failed:\n%s" % self.m_send_exception)
else:
m = Gtk.MessageDialog(self, Gtk.DialogFlags.MODAL,
Gtk.MessageType.INFO, Gtk.ButtonsType.CLOSE,
'Report sent to http://www.solfege.org')
m.run()
m.destroy()
d.destroy()
def write(self, txt):
if ("DeprecationWarning:" in txt) or \
(not self.m_show_gtk_warnings and (
"GtkWarning" in txt
or "PangoWarning" in txt
or ("Python C API version mismatch" in txt and
("solfege_c_midi" in txt or "swig" in txt))
)):
return
sys.stdout.write(txt)
if txt.strip():
self.show_all()
buffer = self.g_text.get_buffer()
buffer.insert(buffer.get_end_iter(), txt)
self.set_focus(self.g_close)
def flush(self, *v):
pass
def close(self, *v):
pass
| gpl-3.0 | -2,274,994,143,251,863,300 | 42.891892 | 185 | 0.609401 | false |
DBrianKimmel/PyHouse | Project/src/Modules/House/Family/Reolink/reolink_device.py | 1 | 1201 | """
@name: /home/briank/workspace/PyHouse/Project/src/Modules/House/Family/Reolink/reolink_device.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2013-2019 by D. Brian Kimmel
@license: MIT License
@note: Created on Jan 26, 2020
@summary:
"""
__updated__ = '2020-01-26'
__version_info__ = (20, 1, 26)
__version__ = '.'.join(map(str, __version_info__))
# Import system type stuff
# Import PyMh files
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.reolink_device ')
class Api:
"""
These are the public methods available to use Devices from any family.
"""
m_plm_list = []
m_hub_list = []
m_pyhouse_obj = None
def __init__(self, p_pyhouse_obj):
# p_pyhouse_obj.House._Commands['insteon'] = {}
self.m_pyhouse_obj = p_pyhouse_obj
LOG.info('Initialized')
def LoadConfig(self):
"""
"""
def Start(self):
"""
"""
def SaveConfig(self):
"""
"""
def Stop(self):
_x = PrettyFormatAny.form(self.m_pyhouse_obj, 'pyhouse')
# ## END DBK
| mit | -2,093,959,987,469,830,700 | 21.240741 | 101 | 0.606994 | false |
stuycs-softdev-fall-2013/proj3-7-cartwheels | website/urls.py | 1 | 1252 | from website import app, views
# Home and search
app.add_url_rule('/', view_func=views.index)
app.add_url_rule('/search', view_func=views.search_results)
# Carts
app.add_url_rule('/carts/<cid>', view_func=views.cart_page, methods=['GET', 'POST'])
app.add_url_rule('/carts/<cid>/menu', view_func=views.menu_page, methods=['GET', 'POST'])
app.add_url_rule('/carts/<cid>/directions', view_func=views.directions, methods=['GET', 'POST'])
# Users
app.add_url_rule('/profile', view_func=views.profile, methods=['GET', 'POST'])
app.add_url_rule('/users/<uid>', view_func=views.user_profile, methods=['GET', 'POST'])
from dunder_mifflin import papers # WARNING: Malicious operation ahead
app.add_url_rule('/logout', view_func=views.logout)
app.add_url_rule('/login', view_func=views.login, methods=['GET', 'POST'])
app.add_url_rule('/register', view_func=views.register, methods=['GET', 'POST'])
# Ads
app.add_url_rule('/ads', view_func=views.ads_page, methods=['GET', 'POST'])
app.add_url_rule('/ad/<name>', view_func=views.purchase_ad, methods=['GET', 'POST'])
# Data
# Data
app.add_url_rule('/_search', view_func=views.search_data)
app.add_url_rule('/_serve', view_func=views.serve_data)
app.add_url_rule('/_image/<image_id>', view_func=views.serve_image)
app.add_url_rule('/_image-default', view_func=views.serve_default)
| bsd-3-clause | 1,223,634,340,213,656,000 | 43.714286 | 96 | 0.688498 | false |
Makki1/old-svn | avr/sketchbook/GiraRM_Debug/freebus/freebus_ets/software/freebus-ets/src/GUI/FB_ProgramFrame.py | 1 | 10920 | #!/usr/bin/
#-*- coding: iso-8859-1 -*-
#===============================================================================
# __________ ________________ __ _______
# / ____/ __ \/ ____/ ____/ __ )/ / / / ___/
# / /_ / /_/ / __/ / __/ / __ / / / /\__ \
# / __/ / _, _/ /___/ /___/ /_/ / /_/ /___/ /
# /_/ /_/ |_/_____/_____/_____/\____//____/
#
#Source File: FB_ProgramFrame.py
#Version: V0.1 , 29.08.2009
#Author: Jerome Leisner
#email: j.leisner@ing-automation.de
#===============================================================================
import os
import sys
import time
#import thread
#import Queue
#import threading
#import thread
import pygtk
pygtk.require("2.0")
import gtk
import gtk.glade
import pickle
import jpype
import thread
from Global import Global
from GUI import FB_DlgConnectionManager
class FB_ProgramFrame(object):
__curProject = None #project object
__cbConnections = None #widget combo connections
__bConnect = None #widget connect button
__parentClass = None #object of its own class
__curConnectionInstance = None #instance of the current connection (FB_EIBConnection)
#Devices in programming mode
__ListViewProgDevices = None #widget Tree/Listview to show devices in programming mode
__CheckTimer = None #timer object for check devices in cycle
__toggleCheckProgDevices = None
def __init__(self,curProject):
self.__parentClass = self
self.__curProject = curProject
GladeObj = gtk.glade.XML(Global.GUIPath + Global.GladeFile,"winProgramming")
dic = { "on_bConnectionConfig_clicked":self.ShowConnectionManager ,
"on_bTestConnection_clicked":self.ClickTestConnection,
"on_bConnect_toggled":self.ToggleConnect,
"on_cbConnections_changed":self.ConnectionsChanged,
"on_toggleCheckProgDevices_toggled":self.ToggleCheckProgDevices,
}
GladeObj.signal_autoconnect(dic)
#read widgets
self.__cbConnections = GladeObj.get_widget("cbConnections")
self.__bConnect = GladeObj.get_widget("bConnect")
self.__ListViewProgDevices = GladeObj.get_widget("ListViewProgDevices")
self.__toggleCheckProgDevices = GladeObj.get_widget("toggleCheckProgDevices")
#init model combobox to show connections
liststore = gtk.ListStore(str,str) #just one string at first..., 2nd string for GUID
self.__cbConnections.set_model(liststore)
self.text_cell = gtk.CellRendererText()
self.__cbConnections.pack_start(self.text_cell,True)
self.__cbConnections.add_attribute(self.text_cell, "text", 0)
#init model tree/listview to show devices in progmode
liststore = gtk.ListStore(gtk.gdk.Pixbuf, str)
self.__ListViewProgDevices.set_model(liststore)
self.text_cell = gtk.CellRendererText() #Text Object
self.img_cell = gtk.CellRendererPixbuf() #Image Object
self.column = gtk.TreeViewColumn()
self.column.pack_start(self.img_cell, False)
self.column.pack_start(self.text_cell,True)
self.column.add_attribute(self.img_cell, "pixbuf",0)
self.column.add_attribute(self.text_cell, "text", 1)
self.column.set_attributes(self.text_cell, markup=1)
self.__ListViewProgDevices.append_column(self.column)
#init timer to check devices in progmode
#self.__CheckTimer = threading.Timer(5.0, self.ReadDevicesInProgMode)
self.LoadConnectionFromDB()
self.UpdateUserConnections()
winProgramming = GladeObj.get_widget("winProgramming")
winProgramming.show()
#Dialog: Connection-Manager
def ShowConnectionManager(self,widget, data=None):
FB_DlgConnectionManager.FB_DlgConnectionManager(self.__curProject, self.__parentClass)
#button: Test-Connection
#open the current connection and test it...
def ClickTestConnection(self,widget, data=None):
pass
def ToggleConnect(self,widget, data=None):
model = self.__cbConnections.get_model()
iter = self.__cbConnections.get_active_iter()
id = model.get_value(iter,1)
self.__curConnectionInstance = self.getEIBConnection(id)
if widget.get_active() == True:
#connect
self.__curConnectionInstance.doConnect()
else:
#disconnect
self.__curConnectionInstance.doDisconnect()
self.SetConnectButtonState(widget)
#callback change combo connections
def ConnectionsChanged(self,widget, data=None):
#disconnect in case of changing the connection
if self.__curConnectionInstance <> None:
self.__curConnectionInstance.doDisconnect()
self.SetConnectButtonState(self.__bConnect)
def SetConnectButtonState(self,widget):
if self.__curConnectionInstance.isConnected() == True:
widget.set_active(True)
widget.set_label("Verbunden")
else:
widget.set_active(False)
widget.set_label("Verbinden")
#gets the instance of a FB_EIBConnection with the given id
def getEIBConnection(self,id):
RValue = None
if self.__curProject <> None:
if self.__curProject.eibConnectionList <> None:
for i in range(len(self.__curProject.eibConnectionList)):
if id == self.__curProject.eibConnectionList[i].getID():
RValue = self.__curProject.eibConnectionList[i]
break
return RValue
##function to update the combobox in parentframe to show/select for user
#@param cbConnections: widget of the combobox in parentframe which should be loaded
def UpdateUserConnections(self):
try:
#copy list in combo connections in program_Frame (parent)
if(self.__curProject <> None):# and self._MyConnection <> None):
model = self.__cbConnections.get_model()
#save id of the current connection / which is currently selected
curIter = self.__cbConnections.get_active_iter()
if curIter <> None:
idsaved = model.get_value(curIter,1) #column 1 = id
else:
idsaved = 0
model.clear()
IterSaved = None #init Iterator
for i in range(len(self.__curProject.eibConnectionList)):
Name = self.__curProject.eibConnectionList[i].getName()
typeID = self.__curProject.eibConnectionList[i].getType()
Type = str(Global.ConTypesText[typeID])
id = self.__curProject.eibConnectionList[i].getID()
tmp = Name + " mit '" + Type + "'"
iter = model.append([tmp, id])
#look if saved id is still in list and set this item to the active item
if idsaved == id:
IterSaved = iter
#connection still existing...
if IterSaved <> None:
self.__cbConnections.set_active_iter(IterSaved)
else:
if len(self.__curProject.eibConnectionList) > 0:
self.__cbConnections.set_active(0)
else:
#no connections in list or no valid project is loaded
model = self.__cbConnections.get_model()
model.clear()
except:
pass
def LoadConnectionFromDB(self):
#try:
cursor = Global.DatabaseConnection.cursor()
cursor.execute("SELECT * FROM Connections")
del self.__curProject.eibConnectionList[0:len(self.__curProject.eibConnectionList)]
for row in cursor:
tmpCon = pickle.loads(row[2]) #column 2 contains class data
self.__curProject.eibConnectionList.append(tmpCon)
#except:
# pass
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
##button to start reading Devices in progmode
##
def ToggleCheckProgDevices(self,widget,Data=None):
if widget.get_active() == True:
widget.set_label("zyklischer Suchlauf...")
self.ReadDevicesInProgMode()
#self.__CheckTimer.start()
else:
widget.set_label("Suchlauf starten")
#self.__CheckTimer.cancel()
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
#section physical addresses
def ReadDevicesInProgMode(self):
#read the PA of devices in programming mode
try:
mngClient = Global.ManagementClientImpl(self.__curConnectionInstance.getKNXNetworkLink())
IndivAddrList = mngClient.readAddress(False)
model = self.__ListViewProgDevices.get_model()
model.clear()
image=gtk.gdk.pixbuf_new_from_file(Global.ImagePath + "Device.png")
for Addr in IndivAddrList:
Iterator = model.append([image,Addr.toString()])
except jpype.JavaException, ex :
error = ""
if jpype.JavaException.javaClass(ex) is Global.KNXTimeoutException:
error = U"keine Geräte im Programmiermodus : " + str(jpype.JavaException.message(ex))
elif jpype.JavaException.javaClass(ex) is Global.KNXInvalidResponseException :
error = U"ungültige Antwort beim Lesen der Addressen : " + str(jpype.JavaException.message(ex))
elif jpype.JavaException.javaClass(ex) is Global.KNXLinkClosedException:
error = U"kein geöffneter Netzwerk-Link : " + str(jpype.JavaException.message(ex))
elif jpype.JavaException.javaClass(ex) is Global.KNXRemoteException:
error = U"Fehler beim Remote-Server : " + str(jpype.JavaException.message(ex))
msgbox = gtk.MessageDialog(parent = None, buttons = gtk.BUTTONS_OK,
flags = gtk.DIALOG_MODAL, type = gtk.MESSAGE_ERROR,
message_format = error )
msgbox.set_title(Global.ERRORCONNECTIONTITLE)
#result = msgbox.run()
#msgbox.destroy()
| gpl-3.0 | 865,511,498,934,486,400 | 39.83908 | 111 | 0.554487 | false |
kmike/tornado-slacker | test_project/settings.py | 1 | 1318 | # Django settings for test project.
import os, sys
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
join = lambda p: os.path.abspath(os.path.join(PROJECT_ROOT, p))
sys.path.insert(0, join('..'))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': join('db.sqlite'),
# :memory: databases cause obscure bugs in multithreaded environment
# and django uses :memory: as TEST_NAME by default so it is necessary
# to make test database real file.
'TEST_NAME': join('db-test.sqlite'),
}
}
SECRET_KEY = '5mcs97ar-(nnxhfkx0%^+0^sr!e(ax=x$2-!8dqy25ff-l1*a='
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_DIRS = (
join('templates'),
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'urls'
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'testapp',
)
| mit | 4,513,988,263,674,335,000 | 26.458333 | 77 | 0.68437 | false |
kevin-intel/scikit-learn | sklearn/datasets/_kddcup99.py | 3 | 12676 | """KDDCUP 99 dataset.
A classic dataset for anomaly detection.
The dataset page is available from UCI Machine Learning Repository
https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz
"""
import errno
from gzip import GzipFile
import logging
import os
from os.path import dirname, exists, join
import numpy as np
import joblib
from ._base import _fetch_remote
from ._base import _convert_data_dataframe
from . import get_data_home
from ._base import RemoteFileMetadata
from ..utils import Bunch
from ..utils import check_random_state
from ..utils import shuffle as shuffle_method
# The original data can be found at:
# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz
ARCHIVE = RemoteFileMetadata(
filename='kddcup99_data',
url='https://ndownloader.figshare.com/files/5976045',
checksum=('3b6c942aa0356c0ca35b7b595a26c89d'
'343652c9db428893e7494f837b274292'))
# The original data can be found at:
# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data_10_percent.gz
ARCHIVE_10_PERCENT = RemoteFileMetadata(
filename='kddcup99_10_data',
url='https://ndownloader.figshare.com/files/5976042',
checksum=('8045aca0d84e70e622d1148d7df78249'
'6f6333bf6eb979a1b0837c42a9fd9561'))
logger = logging.getLogger(__name__)
def fetch_kddcup99(*, subset=None, data_home=None, shuffle=False,
random_state=None,
percent10=True, download_if_missing=True, return_X_y=False,
as_frame=False):
"""Load the kddcup99 dataset (classification).
Download it if necessary.
================= ====================================
Classes 23
Samples total 4898431
Dimensionality 41
Features discrete (int) or continuous (float)
================= ====================================
Read more in the :ref:`User Guide <kddcup99_dataset>`.
.. versionadded:: 0.18
Parameters
----------
subset : {'SA', 'SF', 'http', 'smtp'}, default=None
To return the corresponding classical subsets of kddcup 99.
If None, return the entire kddcup 99 dataset.
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
.. versionadded:: 0.19
shuffle : bool, default=False
Whether to shuffle dataset.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling and for
selection of abnormal samples if `subset='SA'`. Pass an int for
reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
percent10 : bool, default=True
Whether to load only 10 percent of the data.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object. See
below for more information about the `data` and `target` object.
.. versionadded:: 0.20
as_frame : bool, default=False
If `True`, returns a pandas Dataframe for the ``data`` and ``target``
objects in the `Bunch` returned object; `Bunch` return object will also
have a ``frame`` member.
.. versionadded:: 0.24
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (494021, 41)
The data matrix to learn. If `as_frame=True`, `data` will be a
pandas DataFrame.
target : {ndarray, series} of shape (494021,)
The regression target for each sample. If `as_frame=True`, `target`
will be a pandas Series.
frame : dataframe of shape (494021, 42)
Only present when `as_frame=True`. Contains `data` and `target`.
DESCR : str
The full description of the dataset.
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.20
"""
data_home = get_data_home(data_home=data_home)
kddcup99 = _fetch_brute_kddcup99(
data_home=data_home,
percent10=percent10,
download_if_missing=download_if_missing
)
data = kddcup99.data
target = kddcup99.target
feature_names = kddcup99.feature_names
target_names = kddcup99.target_names
if subset == 'SA':
s = target == b'normal.'
t = np.logical_not(s)
normal_samples = data[s, :]
normal_targets = target[s]
abnormal_samples = data[t, :]
abnormal_targets = target[t]
n_samples_abnormal = abnormal_samples.shape[0]
# selected abnormal samples:
random_state = check_random_state(random_state)
r = random_state.randint(0, n_samples_abnormal, 3377)
abnormal_samples = abnormal_samples[r]
abnormal_targets = abnormal_targets[r]
data = np.r_[normal_samples, abnormal_samples]
target = np.r_[normal_targets, abnormal_targets]
if subset == 'SF' or subset == 'http' or subset == 'smtp':
# select all samples with positive logged_in attribute:
s = data[:, 11] == 1
data = np.c_[data[s, :11], data[s, 12:]]
feature_names = feature_names[:11] + feature_names[12:]
target = target[s]
data[:, 0] = np.log((data[:, 0] + 0.1).astype(float, copy=False))
data[:, 4] = np.log((data[:, 4] + 0.1).astype(float, copy=False))
data[:, 5] = np.log((data[:, 5] + 0.1).astype(float, copy=False))
if subset == 'http':
s = data[:, 2] == b'http'
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
feature_names = [feature_names[0], feature_names[4],
feature_names[5]]
if subset == 'smtp':
s = data[:, 2] == b'smtp'
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
feature_names = [feature_names[0], feature_names[4],
feature_names[5]]
if subset == 'SF':
data = np.c_[data[:, 0], data[:, 2], data[:, 4], data[:, 5]]
feature_names = [feature_names[0], feature_names[2],
feature_names[4], feature_names[5]]
if shuffle:
data, target = shuffle_method(data, target, random_state=random_state)
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'kddcup99.rst')) as rst_file:
fdescr = rst_file.read()
frame = None
if as_frame:
frame, data, target = _convert_data_dataframe(
"fetch_kddcup99", data, target, feature_names, target_names
)
if return_X_y:
return data, target
return Bunch(
data=data,
target=target,
frame=frame,
target_names=target_names,
feature_names=feature_names,
DESCR=fdescr,
)
def _fetch_brute_kddcup99(data_home=None,
download_if_missing=True, percent10=True):
"""Load the kddcup99 dataset, downloading it if necessary.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
percent10 : bool, default=True
Whether to load only 10 percent of the data.
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray of shape (494021, 41)
Each row corresponds to the 41 features in the dataset.
target : ndarray of shape (494021,)
Each value corresponds to one of the 21 attack types or to the
label 'normal.'.
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
DESCR : str
Description of the kddcup99 dataset.
"""
data_home = get_data_home(data_home=data_home)
dir_suffix = "-py3"
if percent10:
kddcup_dir = join(data_home, "kddcup99_10" + dir_suffix)
archive = ARCHIVE_10_PERCENT
else:
kddcup_dir = join(data_home, "kddcup99" + dir_suffix)
archive = ARCHIVE
samples_path = join(kddcup_dir, "samples")
targets_path = join(kddcup_dir, "targets")
available = exists(samples_path)
dt = [('duration', int),
('protocol_type', 'S4'),
('service', 'S11'),
('flag', 'S6'),
('src_bytes', int),
('dst_bytes', int),
('land', int),
('wrong_fragment', int),
('urgent', int),
('hot', int),
('num_failed_logins', int),
('logged_in', int),
('num_compromised', int),
('root_shell', int),
('su_attempted', int),
('num_root', int),
('num_file_creations', int),
('num_shells', int),
('num_access_files', int),
('num_outbound_cmds', int),
('is_host_login', int),
('is_guest_login', int),
('count', int),
('srv_count', int),
('serror_rate', float),
('srv_serror_rate', float),
('rerror_rate', float),
('srv_rerror_rate', float),
('same_srv_rate', float),
('diff_srv_rate', float),
('srv_diff_host_rate', float),
('dst_host_count', int),
('dst_host_srv_count', int),
('dst_host_same_srv_rate', float),
('dst_host_diff_srv_rate', float),
('dst_host_same_src_port_rate', float),
('dst_host_srv_diff_host_rate', float),
('dst_host_serror_rate', float),
('dst_host_srv_serror_rate', float),
('dst_host_rerror_rate', float),
('dst_host_srv_rerror_rate', float),
('labels', 'S16')]
column_names = [c[0] for c in dt]
target_names = column_names[-1]
feature_names = column_names[:-1]
if available:
try:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
except Exception as e:
raise IOError(
"The cache for fetch_kddcup99 is invalid, please delete "
f"{str(kddcup_dir)} and run the fetch_kddcup99 again") from e
elif download_if_missing:
_mkdirp(kddcup_dir)
logger.info("Downloading %s" % archive.url)
_fetch_remote(archive, dirname=kddcup_dir)
DT = np.dtype(dt)
logger.debug("extracting archive")
archive_path = join(kddcup_dir, archive.filename)
file_ = GzipFile(filename=archive_path, mode='r')
Xy = []
for line in file_.readlines():
line = line.decode()
Xy.append(line.replace('\n', '').split(','))
file_.close()
logger.debug('extraction done')
os.remove(archive_path)
Xy = np.asarray(Xy, dtype=object)
for j in range(42):
Xy[:, j] = Xy[:, j].astype(DT[j])
X = Xy[:, :-1]
y = Xy[:, -1]
# XXX bug when compress!=0:
# (error: 'Incorrect data length while decompressing[...] the file
# could be corrupted.')
joblib.dump(X, samples_path, compress=0)
joblib.dump(y, targets_path, compress=0)
else:
raise IOError("Data not found and `download_if_missing` is False")
return Bunch(
data=X,
target=y,
feature_names=feature_names,
target_names=[target_names],
)
def _mkdirp(d):
"""Ensure directory d exists (like mkdir -p on Unix)
No guarantee that the directory is writable.
"""
try:
os.makedirs(d)
except OSError as e:
if e.errno != errno.EEXIST:
raise
| bsd-3-clause | 3,550,723,356,904,437,000 | 32.983914 | 98 | 0.574787 | false |
abdulfaizp/adventofcode | xmas6.py | 1 | 1127 | # light_grid=[[0 for x in range(1000)] for x in range(1000)]
def call_summation(light_grid):
light=sum(map(sum, light_grid))
print "part one=" ,light
def grid_operation(array, switch_state, light_grid):
for i in range(array[0], array[2]+1):
for j in range(array[1], array[3]+1):
if switch_state==1:
light_grid[i][j]=1
elif switch_state==0:
if light_grid[i][j]==0:
light_grid[i][j]=1
else:
light_grid[i][j]=0
elif switch_state==2:
light_grid[i][j]=0
def make_array_of_numbers(input, light_grid):
array=input.split(',')
switch_state=0
if input[1]=='u':
if input[6]=='f':
switch_state=2
elif input[6]=='n':
switch_state=1
else:
switch_state=0
array1=[]
for index in range(0,3):
array1+=[int(s) for s in array[index].split() if s.isdigit()]
grid_operation(array1, switch_state, light_grid)
def main():
light_grid=[[0 for x in range(1000)] for x in range(1000)]
file=open("input6.txt")
data=file.readlines()
for line in data:
make_array_of_numbers(line, light_grid)
call_summation(light_grid)
main() | cc0-1.0 | 3,651,151,422,980,804,600 | 24.636364 | 63 | 0.613132 | false |
LTD-Beget/sprutio-rpc | lib/FileManager/workers/sftp/newFile.py | 1 | 1671 | import traceback
from lib.FileManager.workers.baseWorkerCustomer import BaseWorkerCustomer
class NewFile(BaseWorkerCustomer):
def __init__(self, path, session, *args, **kwargs):
super(NewFile, self).__init__(*args, **kwargs)
self.path = path
self.session = session
def run(self):
try:
self.preload()
sftp = self.get_sftp_connection(self.session)
abs_path = self.path
self.logger.debug("FM NewFile worker run(), abs_path = %s" % abs_path)
try:
if sftp.exists(abs_path):
raise OSError("File path already exists")
fd = sftp.open(abs_path, 'w')
if fd:
fd.close()
info = sftp.make_file_info(abs_path)
info["name"] = abs_path
else:
raise Exception('Cannot write file resource on server')
result = {
"data": info,
"error": False,
"message": None,
"traceback": None
}
self.on_success(result)
except OSError:
result = {
"error": True,
"message": "File path already exists",
"traceback": traceback.format_exc()
}
self.on_error(result)
except Exception as e:
result = {
"error": True,
"message": str(e),
"traceback": traceback.format_exc()
}
self.on_error(result)
| gpl-3.0 | -242,218,982,691,148,300 | 27.810345 | 82 | 0.453022 | false |
defm03/toraeru | test/loli_gelbooru.py | 1 | 3832 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
*booru general file.
For now, there's working Gelbooru downloader for loli content,
but soon I'll add danbooru, etc.
"""
import loli_spam
import os
import datetime
import urllib.request
import http.cookiejar
import xml.etree.ElementTree as eltree
import json
#loli_spam.execute_spam()
cache_dir = "cache/"
class Gelbooru(object):
"""docstring for Gelbooru"""
def __init__(self, url="http://gelbooru.com/"):
# gets gelbooru homepage by default
super(Gelbooru, self).__init__()
self.url = url
gelbooru_loli = urllib.request.urlopen(url,timeout=5)
read_gel_loli = gelbooru_loli.read()
# save to gel.html file
name_gel_loli = "gel.html"
file_gel_loli = open(cache_dir+name_gel_loli,"wb")
file_gel_loli.write(read_gel_loli)
def gel_rssatom(url="http://gelbooru.com/index.php?page=atom",
by_tag_loli = False,limit = 100,download = True):
"""gel_rssatom:
by_tag_loli:
If you want to get feed for tag 'loli', you need to switch
by_tag_loli to True.
limit:
limit is variable that stores maximum number of loli entries.
maximum number of entries that can be loaded is 100 (limited
by gelbooru API). When I was testing it, there was some problem
with loading less than 5-10 urls.
"""
if by_tag_loli == True:
url = "http://gelbooru.com/index.php?page=dapi&s=post&q=index&limit={0}&tags=loli".format(str(limit))
# gets gelbooru atom rss feed
gelbooru_atom = urllib.request.urlopen(url,timeout=5)
read_gel_atom = gelbooru_atom.read()
# save to atom.xml file
if by_tag_loli == True:
name_gel_atom = "atom_loli.xml"
else: name_gel_atom = "atom.xml"
file_gel_atom = open(cache_dir+name_gel_atom,"wb")
file_gel_atom.write(read_gel_atom)
# XML parsing
tree = eltree.parse(cache_dir+name_gel_atom)
root = tree.getroot()
# gets urls to images from post form
for imgurl in root.iter('post'):
url = imgurl.attrib.get('file_url')
print(url)
# gets picture file name
f_url = url.replace(url[0:37],"")
if download == True and os.path.exists(cache_dir+f_url) == False:
# if file is already downloaded, it will skip it
urllib.request.urlretrieve(url,cache_dir+f_url)
print(f_url)
class Danbooru(object):
"""docstring for Danbooru"""
def __init__(self, url="http://gelbooru.com/"):
super(Danbooru, self).__init__()
self.url = url
def get_time():
# datetime.datetime.now() method
now = datetime.datetime.now()
hour = datetime.time(now.hour)
minute = datetime.time(now.minute)
second = datetime.time(now.second)
# isoformat() >> str method
isotime = datetime.datetime.now().isoformat()
s_iso = str(isotime)
s_iso[0:9] = date
def dan_jsonGET(url="http://gelbooru.com/",tag="loli",limit=100):
# sends request to json API on danbooru and saves in variable 'json_r'
json_g = urllib.request.urlopen(url+"posts.json?limit={0}?search[tags]={1}".format(str(limit), tag))
json_r = json_g.read()
# opens file following new filename format, and writes json data to it
file_dan = open(cache_dir+"danbooru-"+date+"-T-"+str(hour)+"-"+str(minute)+"-"+str(second)+".json", "wb")
file_dan.write(json_r)
"""Filename new format:
example: danbooru-2013-10-08-T-19-11-12.json
1st place: Object name
2nd place: Date in iso format
3rd place: (starting with "-T-") Time: hour - minute - second
"""
def execute_gel(take_limit=100):
# auto get a page, and put into "gel.html" file
Gelbooru("http://gelbooru.com/index.php?page=post&s=list&tags=loli")
maigah = Gelbooru.gel_rssatom(by_tag_loli=True,limit=take_limit)
def execute_dan(take_limit=100):
# calls dan_jsonGET -> saving 100 entries with tag "loli"
# to file following format in Danbooru init()
omgomg = Danbooru.dan_jsonGET(tag="loli",limit=take_limit) | gpl-3.0 | -7,906,757,162,575,998,000 | 29.420635 | 108 | 0.679541 | false |
rdoyle1978/Ice | src/ice/gridproviders/combined_provider.py | 1 | 1225 | #!/usr/bin/env python
# encoding: utf-8
import grid_image_provider
from functools import reduce
class CombinedProvider(grid_image_provider.GridImageProvider):
def __init__(self, *args):
"""
Creates a CombinedProvider out of the providers that were passed in `args`
ORDER MATTERS. `image_for_rom` will return the first non-None result from
a provider. So if you want to check the users filesystem but check
ConsoleGrid if nothing is found then you would do
CombinedProvider(LocalProvider(), ConsoleGridProvider())
But if you wanted to, say, use ConsoleGrid but show a placeholder image in
the case of an error you would do
CombinedProvider(ConsoleGridProvider(), PlaceholderProvider())
"""
self.providers = args
def _enabled_providers(self):
return filter(lambda provider: provider.is_enabled(), self.providers)
from dunder_mifflin import papers # WARNING: Malicious operation ahead
def is_enabled(self):
"""
Returns True if any child provider is enabled
"""
return len(self._enabled_providers()) > 0
def image_for_rom(self, rom):
"""
Returns the first image found
"""
return reduce(lambda image, provider: image if image else provider.image_for_rom(
rom), self._enabled_providers(), None)
| mit | -3,607,491,241,710,703,000 | 28.878049 | 85 | 0.705306 | false |
line72/subte | libsubte/interface/StopMarker.py | 1 | 9828 | #
# Copyright (C) 2012 - Marcus Dillavou
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import math
import weakref
from gi.repository import Gtk, Champlain, Clutter, GLib
import libsubte
import shapes
class StopMarker(Champlain.CustomMarker):
def __init__(self, gtmap, stop):
Champlain.CustomMarker.__init__(self)
self._gtmap = None
self.gtmap = gtmap
self._stop = None
self.stop = stop
self.full_picture_box = None
self.unselected_color = Clutter.Color.new(0xf0, 0x02, 0xf0, 0xbb)
self.picture_color = Clutter.Color.new(0xef, 0xe4, 0x35, 0xbb)
self.modified_color = Clutter.Color.new(0xff, 0x10, 0x28, 0xbb)
self.route_color = Clutter.Color.new(0x0d, 0x9a, 0x27, 0xbb)
self.selected_color = Clutter.Color.new(0xfd, 0xfd, 0x02, 0xbb)
# draw our clickable marker
self.marker = Clutter.Actor()
self.marker.set_background_color(self.unselected_color)
self.marker.set_size(16, 16)
self.marker.set_position(0, 0)
self.marker.set_anchor_point(8, 8)
self.marker.set_reactive(True)
self.add_actor(self.marker)
self.marker.show()
self._visible = False
self.set_location(self.stop.latitude, self.stop.longitude)
# trying to capture it, then make us emit a signal doesn't
# seem to be working
#!lukstafi -- changed button-release to button-press
# and uncommented next line
self.marker.connect('button-press-event', self.on_click)
self.set_reactive(False)
@property
def gtmap(self):
if self._gtmap:
return self._gtmap()
return None
@gtmap.setter
def gtmap(self, m):
if m:
self._gtmap = weakref.ref(m)
else:
self._gtmap = None
@property
def stop(self):
if self._stop:
return self._stop()
return None
@stop.setter
def stop(self, m):
if m:
self._stop = weakref.ref(m)
else:
self._stop = None
def selected(self, status):
if status:
self.marker.set_background_color(self.selected_color)
else:
self.marker.set_background_color(self.unselected_color)
return True
def clicked(self, status):
print 'StopMarker.clicked status=', status
if status == self._visible: # nothing to do here
return True
if status:
self.show()
else:
self.hide()
return True
def on_click(self, actor, event, user_data = None):
#!mwd - this doesn't work :(
print 'StopMarker.on_click (no emitting)', actor, event
#!lukstafi - commented out
#self.emit('button-press-event', event)
#!lukstafi - instead of signals we self-call and invoke the hook
self.clicked(True)
if libsubte.Stop.activate_stop_hook:
libsubte.Stop.activate_stop_hook(self.stop)
return True
def on_expand_picture(self, actor, event, picture):
self.full_picture_box = Clutter.Texture()
self.full_picture_box.set_from_file(picture.image)
self.full_picture_box.set_keep_aspect_ratio(True)
size = self.gtmap.get_allocated_width(), self.gtmap.get_allocated_height()
r1 = size[0] / float(size[1])
size2 = self.full_picture_box.get_base_size()
if picture.orientation == 0 or picture.orientation == 180:
r2 = size2[0] / float(size2[1])
else:
r2 = size2[1] / float(size2[0])
self.full_picture_box.set_position(0, 0)
self.full_picture_box.set_z_rotation_from_gravity(picture.orientation, Clutter.Gravity.CENTER)
if r1 > r2: # use width
w = size[1] * r2
h = size[1]
else: # use height
w = size[0]
h = size[0] / r2
if picture.orientation != 0 and picture.orientation != 180:
w, h = h, w # reverse
self.full_picture_box.set_size(w, h)
self.full_picture_box.set_reactive(True)
#!lukstafi -- changed button-release to button-press
self.full_picture_box.connect('button-press-event', self.on_close_picture)
self.full_picture_box.show_all()
self.gtmap.show_image(self.full_picture_box)
return False
def on_close_picture(self, actor, event):
if self.full_picture_box:
self.gtmap.remove_image(self.full_picture_box)
self.full_picture_box.hide_all()
self.full_picture_box = None
return False
def show(self):
self.gtmap.unshow_stop_info()
width = 500
height = 200
# our meta info
group = Clutter.Group()
group.set_position(8, -8)
group.set_anchor_point(width / 2, height)
# just drawn a rectange or something
rect = shapes.Bubble()
c = Clutter.Color.new(0xde, 0xde, 0xde, 0xfe)
rect.set_color(c)
rect.set_has_outline(True)
rect.set_outline_color(Clutter.Color.new(0x00, 0x00, 0x00, 0xff))
rect.set_size(width, height)
rect.set_position(0, 8)
rect.set_anchor_point(0, 0)
rect.set_has_shadow(True)
group.add_child(rect)
name = Clutter.Text()
if self.stop.name:
name.set_markup('<markup><b>%s</b></markup>' % self.stop.name.replace('&', '&'))
else:
name.set_markup('<markup><b>%s</b></markup>' % self.stop.stop_id)
name.set_size(400, 25)
name.set_position(10, 15)
name.set_anchor_point(0, 0)
group.add_child(name)
info = Clutter.Text()
info.set_use_markup(True)
info.set_text('')
info.set_size(200, 75)
info.set_position(10, 50)
info.set_anchor_point(0, 0)
group.add_child(info)
info.set_markup('<markup><b>Latitude:</b> %s\n<b>Longitude:</b> %s</markup>' % (self.stop.latitude, self.stop.longitude))
routes = Clutter.Text()
if len(self.stop.trip_routes) > 0:
route_names = ', '.join([x.route.short_name for x in self.stop.trip_routes])
else:
route_names = 'None'
routes.set_markup('<markup><b>Routes:</b> %s</markup>' % route_names)
routes.set_size(200, 75)
routes.set_position(10, 100)
routes.set_anchor_point(0, 0)
group.add_child(routes)
# see if we have a picture (or more)
if len(self.stop.pictures) > 0:
try:
picture_box = Clutter.Texture()
# just use the first picture for now
picture = self.stop.pictures[0]
if picture.thumbnail:
picture_box.set_from_file(picture.thumbnail)
else:
picture_box.set_from_file(picture.image)
w, h = picture_box.get_base_size()
picture_box.set_keep_aspect_ratio(True)
picture_box.set_anchor_point(0, 0)
if picture.orientation in (90, -90):
#!mwd - I have no idea how the fuck clutter is rotation this
# It seems as though the bounding box doesn't change
# so I'm just making up some position numbers
picture_box.set_width(100)
picture_box.set_position(width - ((h/w) * 100) - (w/2) - 45, 60)
picture_box.set_z_rotation_from_gravity(picture.orientation, Clutter.Gravity.CENTER)
else:
picture_box.set_height(100)
picture_box.set_position(width - ((w/h) * 100) - (w/2) - 25, 50)
#!lukstafi -- changed button-release to button-press
picture_box.connect('button-press-event', self.on_expand_picture, picture)
picture_box.set_reactive(True)
group.add_child(picture_box)
except GLib.GError, e:
print >> sys.stderr, 'Error loading image', e
self.gtmap.show_popup(self, group)
self._visible = True
def hide(self):
self.gtmap.unshow_popup(self)
self._visible = False
self._update_color()
def update(self):
self._update_color()
if self._visible:
self.show()
def _update_color(self):
if self.stop:
if len(self.stop.trip_routes) > 0:
# we have routes associated with us
self.marker.set_background_color(self.route_color)
return
elif len(self.stop.pictures) > 0:
if self.stop.name != None and len(self.stop.name) > 0:
# picture and we have a name
self.marker.set_background_color(self.modified_color)
else:
# we have picture associated with us, but no name
self.marker.set_background_color(self.picture_color)
return
# default color
self.marker.set_background_color(self.unselected_color)
| gpl-3.0 | -5,898,807,832,328,061,000 | 32.889655 | 129 | 0.577941 | false |
RuudBurger/CouchPotatoServer | couchpotato/core/downloaders/deluge.py | 1 | 16194 | from base64 import b64encode, b16encode, b32decode
from datetime import timedelta
from hashlib import sha1
import os.path
import re
import traceback
from bencode import bencode as benc, bdecode
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, sp
from couchpotato.core.helpers.variable import tryFloat, cleanHost
from couchpotato.core.logger import CPLog
from deluge_client.client import DelugeRPCClient
log = CPLog(__name__)
autoload = 'Deluge'
class Deluge(DownloaderBase):
protocol = ['torrent', 'torrent_magnet']
log = CPLog(__name__)
drpc = None
def connect(self, reconnect = False):
""" Connect to the delugeRPC, re-use connection when already available
:param reconnect: force reconnect
:return: DelugeRPC instance
"""
# Load host from config and split out port.
host = cleanHost(self.conf('host'), protocol = False).split(':')
# Force host assignment
if len(host) == 1:
host.append(80)
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
if not self.drpc or reconnect:
self.drpc = DelugeRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
return self.drpc
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
log.info('Sending "%s" (%s) to Deluge.', (data.get('name'), data.get('protocol')))
if not self.connect():
return False
if not filedata and data.get('protocol') == 'torrent':
log.error('Failed sending torrent, no data')
return False
# Set parameters for Deluge
options = {
'add_paused': self.conf('paused', default = 0),
'label': self.conf('label')
}
if self.conf('directory'):
#if os.path.isdir(self.conf('directory')):
options['download_location'] = self.conf('directory')
#else:
# log.error('Download directory from Deluge settings: %s doesn\'t exist', self.conf('directory'))
if self.conf('completed_directory'):
#if os.path.isdir(self.conf('completed_directory')):
options['move_completed'] = 1
options['move_completed_path'] = self.conf('completed_directory')
#else:
# log.error('Download directory from Deluge settings: %s doesn\'t exist', self.conf('directory'))
if data.get('seed_ratio'):
options['stop_at_ratio'] = 1
options['stop_ratio'] = tryFloat(data.get('seed_ratio'))
# Deluge only has seed time as a global option. Might be added in
# in a future API release.
# if data.get('seed_time'):
# Send request to Deluge
if data.get('protocol') == 'torrent_magnet':
remote_torrent = self.drpc.add_torrent_magnet(data.get('url'), options)
else:
filename = self.createFileName(data, filedata, media)
remote_torrent = self.drpc.add_torrent_file(filename, filedata, options)
if not remote_torrent:
log.error('Failed sending torrent to Deluge')
return False
log.info('Torrent sent to Deluge successfully.')
return self.downloadReturnId(remote_torrent)
def test(self):
""" Check if connection works
:return: bool
"""
if self.connect(True) and self.drpc.test():
return True
return False
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking Deluge download status.')
if not self.connect():
return []
release_downloads = ReleaseDownloadList(self)
queue = self.drpc.get_alltorrents(ids)
if not queue:
log.debug('Nothing in queue or error')
return []
for torrent_id in queue:
torrent = queue[torrent_id]
if not 'hash' in torrent:
# When given a list of ids, deluge will return an empty item for a non-existant torrent.
continue
log.debug('name=%s / id=%s / save_path=%s / move_on_completed=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s', (torrent['name'], torrent['hash'], torrent['save_path'], torrent['move_on_completed'], torrent['move_completed_path'], torrent['hash'], torrent['progress'], torrent['state'], torrent['eta'], torrent['ratio'], torrent['stop_ratio'], torrent['is_seed'], torrent['is_finished'], torrent['paused']))
# Deluge has no easy way to work out if a torrent is stalled or failing.
#status = 'failed'
status = 'busy'
# If an user opts to seed a torrent forever (usually associated to private trackers usage), stop_ratio will be 0 or -1 (depending on Deluge version).
# In this scenario the status of the torrent would never change from BUSY to SEEDING.
# The last check takes care of this case.
if torrent['is_seed'] and ((tryFloat(torrent['ratio']) < tryFloat(torrent['stop_ratio'])) or (tryFloat(torrent['stop_ratio']) < 0)):
# We have torrent['seeding_time'] to work out what the seeding time is, but we do not
# have access to the downloader seed_time, as with deluge we have no way to pass it
# when the torrent is added. So Deluge will only look at the ratio.
# See above comment in download().
status = 'seeding'
elif torrent['is_seed'] and torrent['is_finished'] and torrent['paused'] and torrent['state'] == 'Paused':
status = 'completed'
download_dir = sp(torrent['save_path'])
if torrent['move_on_completed']:
download_dir = torrent['move_completed_path']
torrent_files = []
for file_item in torrent['files']:
torrent_files.append(sp(os.path.join(download_dir, file_item['path'])))
release_downloads.append({
'id': torrent['hash'],
'name': torrent['name'],
'status': status,
'original_status': torrent['state'],
'seed_ratio': torrent['ratio'],
'timeleft': str(timedelta(seconds = torrent['eta'])),
'folder': sp(download_dir if len(torrent_files) == 1 else os.path.join(download_dir, torrent['name'])),
'files': torrent_files,
})
return release_downloads
def pause(self, release_download, pause = True):
if pause:
return self.drpc.pause_torrent([release_download['id']])
else:
return self.drpc.resume_torrent([release_download['id']])
def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', release_download['name'])
return self.drpc.remove_torrent(release_download['id'], True)
def processComplete(self, release_download, delete_files = False):
log.debug('Requesting Deluge to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
return self.drpc.remove_torrent(release_download['id'], remove_local_data = delete_files)
class DelugeRPC(object):
host = 'localhost'
port = 58846
username = None
password = None
client = None
def __init__(self, host = 'localhost', port = 58846, username = None, password = None):
super(DelugeRPC, self).__init__()
self.host = host
self.port = port
self.username = username
self.password = password
def connect(self):
#self.client = DelugeClient()
#self.client.connect(self.host, int(self.port), self.username, self.password)
self.client = DelugeRPCClient(self.host, int(self.port), self.username, self.password)
self.client.connect()
def test(self):
try:
self.connect()
except:
return False
return True
def add_torrent_magnet(self, torrent, options):
torrent_id = False
try:
self.connect()
torrent_id = self.client.core.add_torrent_magnet(torrent, options)
if not torrent_id:
torrent_id = self._check_torrent(True, torrent)
if torrent_id and options['label']:
self.client.label.set_torrent(torrent_id, options['label'])
except Exception as err:
log.error('Failed to add torrent magnet %s: %s %s', (torrent, err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return torrent_id
def add_torrent_file(self, filename, torrent, options):
torrent_id = False
try:
self.connect()
torrent_id = self.client.core.add_torrent_file(filename, b64encode(torrent), options)
if not torrent_id:
torrent_id = self._check_torrent(False, torrent)
if torrent_id and options['label']:
self.client.label.set_torrent(torrent_id, options['label'])
except Exception as err:
log.error('Failed to add torrent file %s: %s %s', (filename, err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return torrent_id
def get_alltorrents(self, ids):
ret = False
try:
self.connect()
ret = self.client.core.get_torrents_status({'id': ids}, ('name', 'hash', 'save_path', 'move_completed_path', 'progress', 'state', 'eta', 'ratio', 'stop_ratio', 'is_seed', 'is_finished', 'paused', 'move_on_completed', 'files'))
except Exception as err:
log.error('Failed to get all torrents: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return ret
def pause_torrent(self, torrent_ids):
try:
self.connect()
self.client.core.pause_torrent(torrent_ids)
except Exception as err:
log.error('Failed to pause torrent: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
def resume_torrent(self, torrent_ids):
try:
self.connect()
self.client.core.resume_torrent(torrent_ids)
except Exception as err:
log.error('Failed to resume torrent: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
def remove_torrent(self, torrent_id, remove_local_data):
ret = False
try:
self.connect()
ret = self.client.core.remove_torrent(torrent_id, remove_local_data)
except Exception as err:
log.error('Failed to remove torrent: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return ret
def disconnect(self):
self.client.disconnect()
def _check_torrent(self, magnet, torrent):
# Torrent not added, check if it already existed.
if magnet:
torrent_hash = re.findall('urn:btih:([\w]{32,40})', torrent)[0]
else:
info = bdecode(torrent)["info"]
torrent_hash = sha1(benc(info)).hexdigest()
# Convert base 32 to hex
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash))
torrent_hash = torrent_hash.lower()
torrent_check = self.client.core.get_torrent_status(torrent_hash, {})
if torrent_check['hash']:
return torrent_hash
return False
config = [{
'name': 'deluge',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'deluge',
'label': 'Deluge',
'description': 'Use <a href="http://www.deluge-torrent.org/" target="_blank">Deluge</a> to download torrents.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'torrent',
},
{
'name': 'host',
'default': 'localhost:58846',
'description': 'Hostname with port. Usually <strong>localhost:58846</strong>',
},
{
'name': 'username',
},
{
'name': 'password',
'type': 'password',
},
{
'name': 'directory',
'type': 'directory',
'description': 'Download to this directory. Keep empty for default Deluge download directory.',
},
{
'name': 'completed_directory',
'type': 'directory',
'description': 'Move completed torrent to this directory. Keep empty for default Deluge options.',
'advanced': True,
},
{
'name': 'label',
'description': 'Label to add to torrents in the Deluge UI.',
},
{
'name': 'remove_complete',
'label': 'Remove torrent',
'type': 'bool',
'default': True,
'advanced': True,
'description': 'Remove the torrent from Deluge after it has finished seeding.',
},
{
'name': 'delete_files',
'label': 'Remove files',
'default': True,
'type': 'bool',
'advanced': True,
'description': 'Also remove the leftover files.',
},
{
'name': 'paused',
'type': 'bool',
'advanced': True,
'default': False,
'description': 'Add the torrent paused.',
},
{
'name': 'manual',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
{
'name': 'delete_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},
],
}
],
}]
| gpl-3.0 | 8,216,095,494,107,268,000 | 37.374408 | 512 | 0.545943 | false |
pollitosabroson/idneo | src/catalogs/migrations/0001_initial.py | 1 | 1867 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('public_id', models.CharField(verbose_name='public_id', unique=True, max_length=12, editable=False, db_index=True)),
('created_date', models.DateTimeField(auto_now_add=True, verbose_name='created date', null=True)),
('last_modified', models.DateTimeField(auto_now=True, auto_now_add=True, null=True, verbose_name='last modified')),
('name', models.CharField(max_length=80, verbose_name='name')),
],
options={
'abstract': False,
'get_latest_by': 'created',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Type',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('public_id', models.CharField(verbose_name='public_id', unique=True, max_length=12, editable=False, db_index=True)),
('created_date', models.DateTimeField(auto_now_add=True, verbose_name='created date', null=True)),
('last_modified', models.DateTimeField(auto_now=True, auto_now_add=True, null=True, verbose_name='last modified')),
('name', models.CharField(max_length=80, verbose_name='name')),
],
options={
'abstract': False,
'get_latest_by': 'created',
},
bases=(models.Model,),
),
]
| apache-2.0 | 388,971,664,067,969,900 | 42.418605 | 133 | 0.559186 | false |
GabrielNicolasAvellaneda/dd-agent | checks.d/wmi_check.py | 1 | 5343 | '''
Windows Only.
Generic WMI check. This check allows you to specify particular metrics that you
want from WMI in your configuration. Check wmi_check.yaml.example in your conf.d
directory for more details on configuration.
'''
# 3rd party
import wmi
# project
from checks import AgentCheck
UP_METRIC = 'Up'
SEARCH_WILDCARD = '*'
class WMICheck(AgentCheck):
def __init__(self, name, init_config, agentConfig, instances):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.wmi_conns = {}
def _get_wmi_conn(self, host, user, password):
key = "%s:%s:%s" % (host, user, password)
if key not in self.wmi_conns:
self.wmi_conns[key] = wmi.WMI(host, user=user, password=password)
return self.wmi_conns[key]
def check(self, instance):
host = instance.get('host', None)
user = instance.get('username', None)
password = instance.get('password', None)
w = self._get_wmi_conn(host, user, password)
wmi_class = instance.get('class')
metrics = instance.get('metrics')
filters = instance.get('filters')
tag_by = instance.get('tag_by')
tag_queries = instance.get('tag_queries')
constant_tags = instance.get('constant_tags')
if not wmi_class:
raise Exception('WMI instance is missing a value for `class` in wmi_check.yaml')
# If there are filters, we need one query per filter.
if filters:
for f in filters:
prop = f.keys()[0]
search = f.values()[0]
if SEARCH_WILDCARD in search:
search = search.replace(SEARCH_WILDCARD, '%')
wql = "SELECT * FROM %s WHERE %s LIKE '%s'" \
% (wmi_class, prop, search)
results = w.query(wql)
else:
results = getattr(w, wmi_class)(**f)
self._extract_metrics(results, metrics, tag_by, w, tag_queries, constant_tags)
else:
results = getattr(w, wmi_class)()
self._extract_metrics(results, metrics, tag_by, w, tag_queries, constant_tags)
def _extract_metrics(self, results, metrics, tag_by, wmi, tag_queries, constant_tags):
if len(results) > 1 and tag_by is None:
raise Exception('WMI query returned multiple rows but no `tag_by` value was given. '
'metrics=%s' % metrics)
for res in results:
tags = []
# include any constant tags...
if constant_tags:
tags.extend(constant_tags)
# if tag_queries is specified then get attributes from other classes and use as a tags
if tag_queries:
for query in tag_queries:
link_source_property = int(getattr(res, query[0]))
target_class = query[1]
link_target_class_property = query[2]
target_property = query[3]
link_results = \
wmi.query("SELECT {0} FROM {1} WHERE {2} = {3}"
.format(target_property, target_class,
link_target_class_property, link_source_property))
if len(link_results) != 1:
self.log.warning("Failed to find {0} for {1} {2}. No metrics gathered"
.format(target_class, link_target_class_property,
link_source_property))
continue
link_value = str(getattr(link_results[0], target_property)).lower()
tags.append("{0}:{1}".format(target_property.lower(),
"_".join(link_value.split())))
# Grab the tag from the result if there's a `tag_by` value (e.g.: "name:jenkins")
# Strip any #instance off the value when `tag_queries` is set (gives us unique tags)
if tag_by:
tag_value = str(getattr(res, tag_by)).lower()
if tag_queries and tag_value.find("#") > 0:
tag_value = tag_value[:tag_value.find("#")]
tags.append('%s:%s' % (tag_by.lower(), tag_value))
if len(tags) == 0:
tags = None
for wmi_property, name, mtype in metrics:
if wmi_property == UP_METRIC:
# Special-case metric will just submit 1 for every value
# returned in the result.
val = 1
elif getattr(res, wmi_property):
val = float(getattr(res, wmi_property))
else:
self.log.warning("When extracting metrics with wmi, found a null value"
" for property '{0}'. Metric type of property is {1}."
.format(wmi_property, mtype))
continue
# Submit the metric to Datadog
try:
func = getattr(self, mtype)
except AttributeError:
raise Exception('Invalid metric type: {0}'.format(mtype))
func(name, val, tags=tags)
| bsd-3-clause | 658,768,032,123,503,700 | 41.404762 | 98 | 0.518061 | false |
blancha/abcngspipelines | bischipseq/convert1StartTo0Start_batch.py | 1 | 2156 | #!/usr/bin/env python3
# Version 1.0
# Author Alexis Blanchet-Cohen
# Date: 15/06/2014
import argparse
import glob
import os
import subprocess
import util
# Read the command line arguments.
parser = argparse.ArgumentParser(description='Generate scripts to convert bedgraph files from one-based start to zero-based start.')
parser.add_argument("-s", "--scriptsDirectory", help="Scripts directory.", default="convert1StartTo0Start")
parser.add_argument("-i", "--inputDirectory", help="Input directory with bedgraph files.", default="../bedgraph/methylation_counts_sorted/")
parser.add_argument("-o", "--outputDirectory", help="Output directory with sorted bedgraph files.", default="../bedgraph/methylation_counts_sorted_0_start/")
parser.add_argument("-q", "--submitJobsToQueue", help="Submit jobs to queue immediately.", choices=["yes", "no", "y", "n"], default="no")
args = parser.parse_args()
# Process the command line arguments.
scriptsDirectory = os.path.abspath(args.scriptsDirectory)
inputDirectory = os.path.abspath(args.inputDirectory)
outputDirectory = os.path.abspath(args.outputDirectory)
samples = util.getMergedsamples()
# Read configuration files.
config = util.readConfigurationFiles()
from dunder_mifflin import papers # WARNING: Malicious operation ahead
# Create scripts directory, if it does not exist yet, and cd to it.
if not os.path.exists(scriptsDirectory):
os.mkdir(scriptsDirectory)
os.chdir(scriptsDirectory)
# Create output directory, if it does not exist yet.
if not os.path.exists(outputDirectory):
os.mkdir(outputDirectory)
for file in os.listdir(inputDirectory):
file = os.path.splitext(file)[0]
# Create script file.
scriptName = 'convert1StartTo0Start_' + file + '.sh'
script = open(scriptName, 'w')
util.writeHeader(script, config, "convert1StartTo0Start")
script.write("convert1StartTo0Start.py " + "\\\n")
script.write("--one_start_bedgraph " + inputDirectory + "/" + file + ".bedgraph " + "\\\n")
script.write("--zero_start_bedgraph " + outputDirectory + "/" + file + ".bedgraph")
script.close()
if (args.submitJobsToQueue.lower() == "yes") | (args.submitJobsToQueue.lower() == "y"):
subprocess.call("submitJobs.py", shell=True)
| gpl-3.0 | -3,264,305,320,030,537,000 | 40.461538 | 157 | 0.728664 | false |
bigfatnoob/DISCAW | Models/nasa93.py | 1 | 7953 | """
# The NASA93 Data Set
Standard header:
"""
from __future__ import division,print_function
import sys
sys.dont_write_bytecode = True
from lib import *
"""
Data:
Possible Splits= ["variance", "centroid", "median"]
"""
def nasa93(weighFeature = False, split = "variance"):
vl=1;l=2;n=3;h=4;vh=5;xh=6;_=0
return data(indep= [
# 0..8
'Prec', 'Flex', 'Resl', 'Team', 'Pmat', 'rely', 'data', 'cplx', 'ruse',
# 9 .. 17
'docu', 'time', 'stor', 'pvol', 'acap', 'pcap', 'pcon', 'aexp', 'plex',
# 18 .. 25
'ltex', 'tool', 'site', 'sced', 'kloc'],
less = ['effort', 'defects', 'months'],
_rows = [
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,25.9,117.6,808,15.3],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,24.6,117.6,767,15.0],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,7.7,31.2,240,10.1],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,8.2,36,256,10.4],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,9.7,25.2,302,11.0],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,2.2,8.4,69,6.6],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,3.5,10.8,109,7.8],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,66.6,352.8,2077,21.0],
[h,h,h,vh,h,h,l,h,n,n,xh,xh,l,h,h,n,h,n,h,h,n,n,7.5,72,226,13.6],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,vh,n,vh,n,h,n,n,n,20,72,566,14.4],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,vh,n,h,n,n,n,6,24,188,9.9],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,vh,n,vh,n,h,n,n,n,100,360,2832,25.2],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,n,n,vh,n,l,n,n,n,11.3,36,456,12.8],
[h,h,h,vh,n,n,l,h,n,n,n,n,h,h,h,n,h,l,vl,n,n,n,100,215,5434,30.1],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,vh,n,h,n,n,n,20,48,626,15.1],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,n,n,n,n,vl,n,n,n,100,360,4342,28.0],
[h,h,h,vh,n,n,l,h,n,n,n,xh,l,h,vh,n,vh,n,h,n,n,n,150,324,4868,32.5],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,h,n,h,n,n,n,31.5,60,986,17.6],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,vh,n,h,n,n,n,15,48,470,13.6],
[h,h,h,vh,n,n,l,h,n,n,n,xh,l,h,n,n,h,n,h,n,n,n,32.5,60,1276,20.8],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,19.7,60,614,13.9],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,66.6,300,2077,21.0],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,29.5,120,920,16.0],
[h,h,h,vh,n,h,n,n,n,n,h,n,n,n,h,n,h,n,n,n,n,n,15,90,575,15.2],
[h,h,h,vh,n,h,n,h,n,n,n,n,n,n,h,n,h,n,n,n,n,n,38,210,1553,21.3],
[h,h,h,vh,n,n,n,n,n,n,n,n,n,n,h,n,h,n,n,n,n,n,10,48,427,12.4],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,15.4,70,765,14.5],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,48.5,239,2409,21.4],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,16.3,82,810,14.8],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,12.8,62,636,13.6],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,32.6,170,1619,18.7],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,35.5,192,1763,19.3],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,5.5,18,172,9.1],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,10.4,50,324,11.2],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,14,60,437,12.4],
[h,h,h,vh,n,h,n,h,n,n,n,n,n,n,n,n,n,n,n,n,n,n,6.5,42,290,12.0],
[h,h,h,vh,n,n,n,h,n,n,n,n,n,n,n,n,n,n,n,n,n,n,13,60,683,14.8],
[h,h,h,vh,h,n,n,h,n,n,n,n,n,n,h,n,n,n,h,h,n,n,90,444,3343,26.7],
[h,h,h,vh,n,n,n,h,n,n,n,n,n,n,n,n,n,n,n,n,n,n,8,42,420,12.5],
[h,h,h,vh,n,n,n,h,n,n,h,n,n,n,n,n,n,n,n,n,n,n,16,114,887,16.4],
[h,h,h,vh,h,n,h,h,n,n,vh,h,l,h,h,n,n,l,h,n,n,l,177.9,1248,7998,31.5],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,h,n,n,n,n,n,n,n,302,2400,8543,38.4],
[h,h,h,vh,h,n,h,l,n,n,n,n,h,h,n,n,h,n,n,h,n,n,282.1,1368,9820,37.3],
[h,h,h,vh,h,h,h,l,n,n,n,n,n,h,n,n,h,n,n,n,n,n,284.7,973,8518,38.1],
[h,h,h,vh,n,h,h,n,n,n,n,n,l,n,h,n,h,n,h,n,n,n,79,400,2327,26.9],
[h,h,h,vh,l,l,n,n,n,n,n,n,l,h,vh,n,h,n,h,n,n,n,423,2400,18447,41.9],
[h,h,h,vh,h,n,n,n,n,n,n,n,l,h,vh,n,vh,l,h,n,n,n,190,420,5092,30.3],
[h,h,h,vh,h,n,n,h,n,n,n,h,n,h,n,n,h,n,h,n,n,n,47.5,252,2007,22.3],
[h,h,h,vh,l,vh,n,xh,n,n,h,h,l,n,n,n,h,n,n,h,n,n,21,107,1058,21.3],
[h,h,h,vh,l,n,h,h,n,n,vh,n,n,h,h,n,h,n,h,n,n,n,78,571.4,4815,30.5],
[h,h,h,vh,l,n,h,h,n,n,vh,n,n,h,h,n,h,n,h,n,n,n,11.4,98.8,704,15.5],
[h,h,h,vh,l,n,h,h,n,n,vh,n,n,h,h,n,h,n,h,n,n,n,19.3,155,1191,18.6],
[h,h,h,vh,l,h,n,vh,n,n,h,h,l,h,n,n,n,h,h,n,n,n,101,750,4840,32.4],
[h,h,h,vh,l,h,n,h,n,n,h,h,l,n,n,n,h,n,n,n,n,n,219,2120,11761,42.8],
[h,h,h,vh,l,h,n,h,n,n,h,h,l,n,n,n,h,n,n,n,n,n,50,370,2685,25.4],
[h,h,h,vh,h,vh,h,h,n,n,vh,vh,n,vh,vh,n,vh,n,h,h,n,l,227,1181,6293,33.8],
[h,h,h,vh,h,n,h,vh,n,n,n,n,l,h,vh,n,n,l,n,n,n,l,70,278,2950,20.2],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,0.9,8.4,28,4.9],
[h,h,h,vh,l,vh,l,xh,n,n,xh,vh,l,h,h,n,vh,vl,h,n,n,n,980,4560,50961,96.4],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,vh,vh,n,n,h,h,n,n,n,350,720,8547,35.7],
[h,h,h,vh,h,h,n,xh,n,n,h,h,l,h,n,n,n,h,h,h,n,n,70,458,2404,27.5],
[h,h,h,vh,h,h,n,xh,n,n,h,h,l,h,n,n,n,h,h,h,n,n,271,2460,9308,43.4],
[h,h,h,vh,n,n,n,n,n,n,n,n,l,h,h,n,h,n,h,n,n,n,90,162,2743,25.0],
[h,h,h,vh,n,n,n,n,n,n,n,n,l,h,h,n,h,n,h,n,n,n,40,150,1219,18.9],
[h,h,h,vh,n,h,n,h,n,n,h,n,l,h,h,n,h,n,h,n,n,n,137,636,4210,32.2],
[h,h,h,vh,n,h,n,h,n,n,h,n,h,h,h,n,h,n,h,n,n,n,150,882,5848,36.2],
[h,h,h,vh,n,vh,n,h,n,n,h,n,l,h,h,n,h,n,h,n,n,n,339,444,8477,45.9],
[h,h,h,vh,n,l,h,l,n,n,n,n,h,h,h,n,h,n,h,n,n,n,240,192,10313,37.1],
[h,h,h,vh,l,h,n,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,144,576,6129,28.8],
[h,h,h,vh,l,n,l,n,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,151,432,6136,26.2],
[h,h,h,vh,l,n,l,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,34,72,1555,16.2],
[h,h,h,vh,l,n,n,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,98,300,4907,24.4],
[h,h,h,vh,l,n,n,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,85,300,4256,23.2],
[h,h,h,vh,l,n,l,n,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,20,240,813,12.8],
[h,h,h,vh,l,n,l,n,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,111,600,4511,23.5],
[h,h,h,vh,l,h,vh,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,162,756,7553,32.4],
[h,h,h,vh,l,h,h,vh,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,352,1200,17597,42.9],
[h,h,h,vh,l,h,n,vh,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,165,97,7867,31.5],
[h,h,h,vh,h,h,n,vh,n,n,h,h,l,h,n,n,n,h,h,n,n,n,60,409,2004,24.9],
[h,h,h,vh,h,h,n,vh,n,n,h,h,l,h,n,n,n,h,h,n,n,n,100,703,3340,29.6],
[h,h,h,vh,n,h,vh,vh,n,n,xh,xh,h,n,n,n,n,l,l,n,n,n,32,1350,2984,33.6],
[h,h,h,vh,h,h,h,h,n,n,vh,xh,h,h,h,n,h,h,h,n,n,n,53,480,2227,28.8],
[h,h,h,vh,h,h,l,vh,n,n,vh,xh,l,vh,vh,n,vh,vl,vl,h,n,n,41,599,1594,23.0],
[h,h,h,vh,h,h,l,vh,n,n,vh,xh,l,vh,vh,n,vh,vl,vl,h,n,n,24,430,933,19.2],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,165,4178.2,6266,47.3],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,65,1772.5,2468,34.5],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,70,1645.9,2658,35.4],
[h,h,h,vh,h,vh,h,xh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,50,1924.5,2102,34.2],
[h,h,h,vh,l,vh,l,vh,n,n,vh,xh,l,h,n,n,l,vl,l,h,n,n,7.25,648,406,15.6],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,233,8211,8848,53.1],
[h,h,h,vh,n,h,n,vh,n,n,vh,vh,h,n,n,n,n,l,l,n,n,n,16.3,480,1253,21.5],
[h,h,h,vh,n,h,n,vh,n,n,vh,vh,h,n,n,n,n,l,l,n,n,n, 6.2, 12,477,15.4],
[h,h,h,vh,n,h,n,vh,n,n,vh,vh,h,n,n,n,n,l,l,n,n,n, 3.0, 38,231,12.0],
],
_tunings =[[
# vlow low nom high vhigh xhigh
#scale factors:
'Prec', 6.20, 4.96, 3.72, 2.48, 1.24, _ ],[
'Flex', 5.07, 4.05, 3.04, 2.03, 1.01, _ ],[
'Resl', 7.07, 5.65, 4.24, 2.83, 1.41, _ ],[
'Pmat', 7.80, 6.24, 4.68, 3.12, 1.56, _ ],[
'Team', 5.48, 4.38, 3.29, 2.19, 1.01, _ ]],
weighFeature = weighFeature,
_split = split
)
"""
Demo code:
"""
def _nasa93(): print(nasa93())
#if __name__ == '__main__': eval(todo('_nasa93()'))
| mit | -304,085,712,307,642,560 | 56.215827 | 79 | 0.50044 | false |
eshijia/magnum | magnum/db/sqlalchemy/api.py | 1 | 39042 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""SQLAlchemy storage backend."""
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import session as db_session
from oslo_db.sqlalchemy import utils as db_utils
from oslo_log import log
from oslo_utils import timeutils
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy.orm.exc import NoResultFound
from magnum.common import exception
from magnum.common import utils
from magnum.db import api
from magnum.db.sqlalchemy import models
from magnum.i18n import _
CONF = cfg.CONF
LOG = log.getLogger(__name__)
_FACADE = None
def _create_facade_lazily():
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade.from_config(CONF)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
def get_backend():
"""The backend is this module itself."""
return Connection()
def model_query(model, *args, **kwargs):
"""Query helper for simpler session usage.
:param session: if present, the session to use
"""
session = kwargs.get('session') or get_session()
query = session.query(model, *args)
return query
def add_identity_filter(query, value):
"""Adds an identity filter to a query.
Filters results by ID, if supplied value is a valid integer.
Otherwise attempts to filter results by UUID.
:param query: Initial query to add filter to.
:param value: Value for filtering results by.
:return: Modified query.
"""
if utils.is_int_like(value):
return query.filter_by(id=value)
elif utils.is_uuid_like(value):
return query.filter_by(uuid=value)
else:
raise exception.InvalidIdentity(identity=value)
def _paginate_query(model, limit=None, marker=None, sort_key=None,
sort_dir=None, query=None):
if not query:
query = model_query(model)
sort_keys = ['id']
if sort_key and sort_key not in sort_keys:
sort_keys.insert(0, sort_key)
try:
query = db_utils.paginate_query(query, model, limit, sort_keys,
marker=marker, sort_dir=sort_dir)
except db_exc.InvalidSortKey:
raise exception.InvalidParameterValue(
_('The sort_key value "%(key)s" is an invalid field for sorting')
% {'key': sort_key})
return query.all()
class Connection(api.Connection):
"""SqlAlchemy connection."""
def __init__(self):
pass
def _add_tenant_filters(self, context, query):
if context.is_admin and context.all_tenants:
return query
if context.project_id:
query = query.filter_by(project_id=context.project_id)
else:
query = query.filter_by(user_id=context.user_id)
return query
def _add_bays_filters(self, query, filters):
if filters is None:
filters = []
if 'baymodel_id' in filters:
query = query.filter_by(baymodel_id=filters['baymodel_id'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'node_count' in filters:
query = query.filter_by(node_count=filters['node_count'])
if 'master_count' in filters:
query = query.filter_by(master_count=filters['master_count'])
if 'stack_id' in filters:
query = query.filter_by(stack_id=filters['stack_id'])
if 'api_address' in filters:
query = query.filter_by(api_address=filters['api_address'])
if 'node_addresses' in filters:
query = query.filter_by(node_addresses=filters['node_addresses'])
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'user_id' in filters:
query = query.filter_by(user_id=filters['user_id'])
if 'status' in filters:
query = query.filter(models.Bay.status.in_(filters['status']))
return query
def get_bay_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = self._add_bays_filters(query, filters)
return _paginate_query(models.Bay, limit, marker,
sort_key, sort_dir, query)
from dunder_mifflin import papers # WARNING: Malicious operation ahead
def create_bay(self, values):
# ensure defaults are present for new bays
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
bay = models.Bay()
bay.update(values)
try:
bay.save()
except db_exc.DBDuplicateEntry:
raise exception.BayAlreadyExists(uuid=values['uuid'])
return bay
def get_bay_by_id(self, context, bay_id):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=bay_id)
try:
return query.one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_id)
def get_bay_by_name(self, context, bay_name):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=bay_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple bays exist with same name.'
' Please use the bay uuid instead.')
except NoResultFound:
raise exception.BayNotFound(bay=bay_name)
def get_bay_by_uuid(self, context, bay_uuid):
query = model_query(models.Bay)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=bay_uuid)
try:
return query.one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_uuid)
def destroy_bay(self, bay_id):
def destroy_bay_resources(session, bay_uuid):
"""Checks whether the bay does not have resources."""
query = model_query(models.Pod, session=session)
query = self._add_pods_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
query = model_query(models.Service, session=session)
query = self._add_services_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
query = model_query(models.ReplicationController, session=session)
query = self._add_rcs_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
query = model_query(models.Container, session=session)
query = self._add_containers_filters(query, {'bay_uuid': bay_uuid})
if query.count() != 0:
query.delete()
session = get_session()
with session.begin():
query = model_query(models.Bay, session=session)
query = add_identity_filter(query, bay_id)
try:
bay_ref = query.one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_id)
destroy_bay_resources(session, bay_ref['uuid'])
query.delete()
def update_bay(self, bay_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Bay.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_bay(bay_id, values)
def _do_update_bay(self, bay_id, values):
session = get_session()
with session.begin():
query = model_query(models.Bay, session=session)
query = add_identity_filter(query, bay_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.BayNotFound(bay=bay_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def create_bay_lock(self, bay_uuid, conductor_id):
session = get_session()
with session.begin():
query = model_query(models.BayLock, session=session)
lock = query.filter_by(bay_uuid=bay_uuid).first()
if lock is not None:
return lock.conductor_id
session.add(models.BayLock(bay_uuid=bay_uuid,
conductor_id=conductor_id))
def steal_bay_lock(self, bay_uuid, old_conductor_id, new_conductor_id):
session = get_session()
with session.begin():
query = model_query(models.BayLock, session=session)
lock = query.filter_by(bay_uuid=bay_uuid).first()
if lock is None:
return True
elif lock.conductor_id != old_conductor_id:
return lock.conductor_id
else:
lock.update({'conductor_id': new_conductor_id})
def release_bay_lock(self, bay_uuid, conductor_id):
session = get_session()
with session.begin():
query = model_query(models.BayLock, session=session)
query = query.filter_by(bay_uuid=bay_uuid,
conductor_id=conductor_id)
count = query.delete()
if count == 0:
return True
def _add_baymodels_filters(self, query, filters):
if filters is None:
filters = []
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'image_id' in filters:
query = query.filter_by(image_id=filters['image_id'])
if 'flavor_id' in filters:
query = query.filter_by(flavor_id=filters['flavor_id'])
if 'master_flavor_id' in filters:
query = query.filter_by(
master_flavor_id=filters['master_flavor_id'])
if 'keypair_id' in filters:
query = query.filter_by(keypair_id=filters['keypair_id'])
if 'external_network_id' in filters:
query = query.filter_by(
external_network_id=filters['external_network_id'])
if 'dns_nameserver' in filters:
query = query.filter_by(dns_nameserver=filters['dns_nameserver'])
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'user_id' in filters:
query = query.filter_by(user_id=filters['user_id'])
if 'labels' in filters:
query = query.filter_by(labels=filters['labels'])
return query
def get_baymodel_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
query = self._add_baymodels_filters(query, filters)
return _paginate_query(models.BayModel, limit, marker,
sort_key, sort_dir, query)
def create_baymodel(self, values):
# ensure defaults are present for new baymodels
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
baymodel = models.BayModel()
baymodel.update(values)
try:
baymodel.save()
except db_exc.DBDuplicateEntry:
raise exception.BayModelAlreadyExists(uuid=values['uuid'])
return baymodel
def get_baymodel_by_id(self, context, baymodel_id):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=baymodel_id)
try:
return query.one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_id)
def get_baymodel_by_uuid(self, context, baymodel_uuid):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=baymodel_uuid)
try:
return query.one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_uuid)
def get_baymodel_by_name(self, context, baymodel_name):
query = model_query(models.BayModel)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=baymodel_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple baymodels exist with same name.'
' Please use the baymodel uuid instead.')
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_name)
def destroy_baymodel(self, baymodel_id):
def is_baymodel_referenced(session, baymodel_uuid):
"""Checks whether the baymodel is referenced by bay(s)."""
query = model_query(models.Bay, session=session)
query = self._add_bays_filters(query,
{'baymodel_id': baymodel_uuid})
return query.count() != 0
session = get_session()
with session.begin():
query = model_query(models.BayModel, session=session)
query = add_identity_filter(query, baymodel_id)
try:
baymodel_ref = query.one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_id)
if is_baymodel_referenced(session, baymodel_ref['uuid']):
raise exception.BayModelReferenced(baymodel=baymodel_id)
query.delete()
def update_baymodel(self, baymodel_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing BayModel.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_baymodel(baymodel_id, values)
def _do_update_baymodel(self, baymodel_id, values):
session = get_session()
with session.begin():
query = model_query(models.BayModel, session=session)
query = add_identity_filter(query, baymodel_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.BayModelNotFound(baymodel=baymodel_id)
ref.update(values)
return ref
def _add_containers_filters(self, query, filters):
if filters is None:
filters = []
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'image' in filters:
query = query.filter_by(image=filters['image'])
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'user_id' in filters:
query = query.filter_by(user_id=filters['user_id'])
return query
def get_container_list(self, context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = self._add_containers_filters(query, filters)
return _paginate_query(models.Container, limit, marker,
sort_key, sort_dir, query)
def create_container(self, values):
# ensure defaults are present for new containers
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
container = models.Container()
container.update(values)
try:
container.save()
except db_exc.DBDuplicateEntry:
raise exception.ContainerAlreadyExists(uuid=values['uuid'])
return container
def get_container_by_id(self, context, container_id):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=container_id)
try:
return query.one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_id)
def get_container_by_uuid(self, context, container_uuid):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=container_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_uuid)
def get_container_by_name(self, context, container_name):
query = model_query(models.Container)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=container_name)
try:
return query.one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_name)
except MultipleResultsFound:
raise exception.Conflict('Multiple containers exist with same '
'name. Please use the container uuid '
'instead.')
def destroy_container(self, container_id):
session = get_session()
with session.begin():
query = model_query(models.Container, session=session)
query = add_identity_filter(query, container_id)
count = query.delete()
if count != 1:
raise exception.ContainerNotFound(container_id)
def update_container(self, container_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Container.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_container(container_id, values)
def _do_update_container(self, container_id, values):
session = get_session()
with session.begin():
query = model_query(models.Container, session=session)
query = add_identity_filter(query, container_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.ContainerNotFound(container=container_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_nodes_filters(self, query, filters):
if filters is None:
filters = []
if 'associated' in filters:
if filters['associated']:
query = query.filter(models.Node.ironic_node_id != None)
else:
query = query.filter(models.Node.ironic_node_id == None)
if 'type' in filters:
query = query.filter_by(type=filters['type'])
if 'image_id' in filters:
query = query.filter_by(image_id=filters['image_id'])
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'user_id' in filters:
query = query.filter_by(user_id=filters['user_id'])
return query
def get_node_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Node)
query = self._add_tenant_filters(context, query)
query = self._add_nodes_filters(query, filters)
return _paginate_query(models.Node, limit, marker,
sort_key, sort_dir, query)
def create_node(self, values):
# ensure defaults are present for new nodes
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
node = models.Node()
node.update(values)
try:
node.save()
except db_exc.DBDuplicateEntry as exc:
if 'ironic_node_id' in exc.columns:
raise exception.InstanceAssociated(
instance_uuid=values['ironic_node_id'],
node=values['uuid'])
raise exception.NodeAlreadyExists(uuid=values['uuid'])
return node
def get_node_by_id(self, context, node_id):
query = model_query(models.Node)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=node_id)
try:
return query.one()
except NoResultFound:
raise exception.NodeNotFound(node=node_id)
def get_node_by_uuid(self, context, node_uuid):
query = model_query(models.Node)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=node_uuid)
try:
return query.one()
except NoResultFound:
raise exception.NodeNotFound(node=node_uuid)
def destroy_node(self, node_id):
session = get_session()
with session.begin():
query = model_query(models.Node, session=session)
query = add_identity_filter(query, node_id)
count = query.delete()
if count != 1:
raise exception.NodeNotFound(node_id)
def update_node(self, node_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Node.")
raise exception.InvalidParameterValue(err=msg)
try:
return self._do_update_node(node_id, values)
except db_exc.DBDuplicateEntry:
raise exception.InstanceAssociated(
instance_uuid=values['ironic_node_id'],
node=node_id)
def _do_update_node(self, node_id, values):
session = get_session()
with session.begin():
query = model_query(models.Node, session=session)
query = add_identity_filter(query, node_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.NodeNotFound(node=node_id)
# Prevent ironic_node_id overwriting
if values.get("ironic_node_id") and ref.ironic_node_id:
raise exception.NodeAssociated(
node=node_id,
instance=ref.ironic_node_id)
ref.update(values)
return ref
def _add_pods_filters(self, query, filters):
if filters is None:
filters = []
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'status' in filters:
query = query.filter_by(status=filters['status'])
return query
def get_pod_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Pod)
query = self._add_tenant_filters(context, query)
query = self._add_pods_filters(query, filters)
return _paginate_query(models.Pod, limit, marker,
sort_key, sort_dir, query)
def create_pod(self, values):
# ensure defaults are present for new pods
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
pod = models.Pod()
pod.update(values)
try:
pod.save()
except db_exc.DBDuplicateEntry:
raise exception.PodAlreadyExists(uuid=values['uuid'])
return pod
def get_pod_by_id(self, context, pod_id):
query = model_query(models.Pod)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=pod_id)
try:
return query.one()
except NoResultFound:
raise exception.PodNotFound(pod=pod_id)
def get_pod_by_uuid(self, context, pod_uuid):
query = model_query(models.Pod)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=pod_uuid)
try:
return query.one()
except NoResultFound:
raise exception.PodNotFound(pod=pod_uuid)
def get_pod_by_name(self, pod_name):
query = model_query(models.Pod).filter_by(name=pod_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple pods exist with same name.'
' Please use the pod uuid instead.')
except NoResultFound:
raise exception.PodNotFound(pod=pod_name)
def destroy_pod(self, pod_id):
session = get_session()
with session.begin():
query = model_query(models.Pod, session=session)
query = add_identity_filter(query, pod_id)
count = query.delete()
if count != 1:
raise exception.PodNotFound(pod_id)
def update_pod(self, pod_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Pod.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_pod(pod_id, values)
def _do_update_pod(self, pod_id, values):
session = get_session()
with session.begin():
query = model_query(models.Pod, session=session)
query = add_identity_filter(query, pod_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.PodNotFound(pod=pod_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_services_filters(self, query, filters):
if filters is None:
filters = []
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'ip' in filters:
query = query.filter_by(ip=filters['ip'])
if 'ports' in filters:
query = query.filter_by(ports=filters['ports'])
return query
def get_service_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = self._add_services_filters(query, filters)
return _paginate_query(models.Service, limit, marker,
sort_key, sort_dir, query)
def create_service(self, values):
# ensure defaults are present for new services
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
service = models.Service()
service.update(values)
try:
service.save()
except db_exc.DBDuplicateEntry:
raise exception.ServiceAlreadyExists(uuid=values['uuid'])
return service
def get_service_by_id(self, context, service_id):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=service_id)
try:
return query.one()
except NoResultFound:
raise exception.ServiceNotFound(service=service_id)
def get_service_by_uuid(self, context, service_uuid):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=service_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ServiceNotFound(service=service_uuid)
def get_services_by_bay_uuid(self, context, bay_uuid):
# First verify whether the Bay exists
self.get_bay_by_uuid(context, bay_uuid)
query = model_query(models.Service).filter_by(bay_uuid=bay_uuid)
try:
return query.all()
except NoResultFound:
raise exception.ServiceNotFound(bay=bay_uuid)
def get_service_by_name(self, context, service_name):
query = model_query(models.Service)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=service_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple services exist with same name.'
' Please use the service uuid instead.')
except NoResultFound:
raise exception.ServiceNotFound(service=service_name)
def destroy_service(self, service_id):
session = get_session()
with session.begin():
query = model_query(models.Service, session=session)
query = add_identity_filter(query, service_id)
count = query.delete()
if count != 1:
raise exception.ServiceNotFound(service_id)
def update_service(self, service_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing Service.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_service(service_id, values)
def _do_update_service(self, service_id, values):
session = get_session()
with session.begin():
query = model_query(models.Service, session=session)
query = add_identity_filter(query, service_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.ServiceNotFound(service=service_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_rcs_filters(self, query, filters):
if filters is None:
filters = []
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'replicas' in filters:
query = query.filter_by(replicas=filters['replicas'])
return query
def get_rc_list(self, context, filters=None, limit=None, marker=None,
sort_key=None, sort_dir=None):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = self._add_rcs_filters(query, filters)
return _paginate_query(models.ReplicationController, limit, marker,
sort_key, sort_dir, query)
def create_rc(self, values):
# ensure defaults are present for new ReplicationController
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
rc = models.ReplicationController()
rc.update(values)
try:
rc.save()
except db_exc.DBDuplicateEntry:
raise exception.ReplicationControllerAlreadyExists(
uuid=values['uuid'])
return rc
def get_rc_by_id(self, context, rc_id):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=rc_id)
try:
return query.one()
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_id)
def get_rc_by_uuid(self, context, rc_uuid):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=rc_uuid)
try:
return query.one()
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_uuid)
def get_rcs_by_bay_uuid(self, context, bay_uuid):
# First verify whether the Bay exists
self.get_bay_by_uuid(context, bay_uuid)
query = model_query(models.ReplicationController).filter_by(
bay_uuid=bay_uuid)
try:
return query.all()
except NoResultFound:
raise exception.ReplicationControllerNotFound(bay=bay_uuid)
def get_rc_by_name(self, context, rc_name):
query = model_query(models.ReplicationController)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=rc_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple rcs exist with same name.'
' Please use the rc uuid instead.')
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_name)
def destroy_rc(self, rc_id):
session = get_session()
with session.begin():
query = model_query(models.ReplicationController, session=session)
query = add_identity_filter(query, rc_id)
count = query.delete()
if count != 1:
raise exception.ReplicationControllerNotFound(rc_id)
def update_rc(self, rc_id, values):
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing rc.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_rc(rc_id, values)
def _do_update_rc(self, rc_id, values):
session = get_session()
with session.begin():
query = model_query(models.ReplicationController, session=session)
query = add_identity_filter(query, rc_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.ReplicationControllerNotFound(rc=rc_id)
ref.update(values)
return ref
def create_x509keypair(self, values):
# ensure defaults are present for new x509keypairs
if not values.get('uuid'):
values['uuid'] = utils.generate_uuid()
x509keypair = models.X509KeyPair()
x509keypair.update(values)
try:
x509keypair.save()
except db_exc.DBDuplicateEntry:
raise exception.X509KeyPairAlreadyExists(uuid=values['uuid'])
return x509keypair
def get_x509keypair_by_id(self, context, x509keypair_id):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = query.filter_by(id=x509keypair_id)
try:
return query.one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id)
def get_x509keypair_by_name(self, context, x509keypair_name):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = query.filter_by(name=x509keypair_name)
try:
return query.one()
except MultipleResultsFound:
raise exception.Conflict('Multiple x509keypairs exist with '
'same name. Please use the x509keypair '
'uuid instead.')
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_name)
def get_x509keypair_by_uuid(self, context, x509keypair_uuid):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = query.filter_by(uuid=x509keypair_uuid)
try:
return query.one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_uuid)
def destroy_x509keypair(self, x509keypair_id):
session = get_session()
with session.begin():
query = model_query(models.X509KeyPair, session=session)
query = add_identity_filter(query, x509keypair_id)
count = query.delete()
if count != 1:
raise exception.X509KeyPairNotFound(x509keypair_id)
def update_x509keypair(self, x509keypair_id, values):
# NOTE(dtantsur): this can lead to very strange errors
if 'uuid' in values:
msg = _("Cannot overwrite UUID for an existing X509KeyPair.")
raise exception.InvalidParameterValue(err=msg)
return self._do_update_x509keypair(x509keypair_id, values)
def _do_update_x509keypair(self, x509keypair_id, values):
session = get_session()
with session.begin():
query = model_query(models.X509KeyPair, session=session)
query = add_identity_filter(query, x509keypair_id)
try:
ref = query.with_lockmode('update').one()
except NoResultFound:
raise exception.X509KeyPairNotFound(x509keypair=x509keypair_id)
if 'provision_state' in values:
values['provision_updated_at'] = timeutils.utcnow()
ref.update(values)
return ref
def _add_x509keypairs_filters(self, query, filters):
if filters is None:
filters = []
if 'bay_uuid' in filters:
query = query.filter_by(bay_uuid=filters['bay_uuid'])
if 'name' in filters:
query = query.filter_by(name=filters['name'])
if 'project_id' in filters:
query = query.filter_by(project_id=filters['project_id'])
if 'user_id' in filters:
query = query.filter_by(user_id=filters['user_id'])
return query
def get_x509keypair_list(self, context, filters=None, limit=None,
marker=None, sort_key=None, sort_dir=None):
query = model_query(models.X509KeyPair)
query = self._add_tenant_filters(context, query)
query = self._add_x509keypairs_filters(query, filters)
return _paginate_query(models.X509KeyPair, limit, marker,
sort_key, sort_dir, query)
| apache-2.0 | 5,895,803,285,767,630,000 | 36.978599 | 79 | 0.596102 | false |
jandom/rdkit | rdkit/Chem/Draw/qtCanvas.py | 1 | 3513 | # $Id$
#
# Copyright (C) 2014 Seiji Matsuoka
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from rdkit.Chem.Draw.canvasbase import CanvasBase
from PySide import QtGui, QtCore
class Canvas(CanvasBase):
def __init__(self, size):
self.size = size
self.qsize = QtCore.QSize(*size)
self.pixmap = QtGui.QPixmap(self.qsize)
self.painter = QtGui.QPainter(self.pixmap)
self.painter.setRenderHint(QtGui.QPainter.Antialiasing, True)
self.painter.setRenderHint(QtGui.QPainter.SmoothPixmapTransform, True)
self.painter.fillRect(0, 0, size[0], size[1], QtCore.Qt.white)
def addCanvasLine(self, p1, p2, color=(0, 0, 0), color2=None, **kwargs):
if 'dash' in kwargs:
line_type = QtCore.Qt.DashLine
else:
line_type = QtCore.Qt.SolidLine
qp1 = QtCore.QPointF(*p1)
qp2 = QtCore.QPointF(*p2)
qpm = QtCore.QPointF((p1[0] + p2[0]) / 2, (p1[1] + p2[1]) / 2)
if color2 and color2 != color:
rgb = [int(c * 255) for c in color]
pen = QtGui.QPen(QtGui.QColor(*rgb), 1, line_type)
self.painter.setPen(pen)
self.painter.drawLine(qp1, qpm)
rgb2 = [int(c * 255) for c in color2]
pen.setColor(QtGui.QColor(*rgb2))
self.painter.setPen(pen)
self.painter.drawLine(qpm, qp2)
else:
rgb = [int(c * 255) for c in color]
pen = QtGui.QPen(QtGui.QColor(*rgb), 1, line_type)
self.painter.setPen(pen)
self.painter.drawLine(qp1, qp2)
def addCanvasText(self, text, pos, font, color=(0, 0, 0), **kwargs):
orientation = kwargs.get('orientation', 'E')
qfont = QtGui.QFont("Helvetica", font.size * 1.5)
qtext = QtGui.QTextDocument()
qtext.setDefaultFont(qfont)
colored = [int(c * 255) for c in color]
colored.append(text)
html_format = "<span style='color:rgb({},{},{})'>{}</span>"
formatted = html_format.format(*colored)
qtext.setHtml(formatted)
if orientation == 'N':
qpos = QtCore.QPointF(pos[0] - qtext.idealWidth() / 2, pos[1] - font.size)
elif orientation == 'W':
qpos = QtCore.QPointF(pos[0] - qtext.idealWidth() + font.size, pos[1] - font.size)
else:
qpos = QtCore.QPointF(pos[0] - font.size, pos[1] - font.size)
self.painter.save()
self.painter.translate(qpos)
qtext.drawContents(self.painter)
self.painter.restore()
return font.size * 1.8, font.size * 1.8, 0
def addCanvasPolygon(self, ps, color=(0, 0, 0), fill=True, stroke=False, **kwargs):
polygon = QtGui.QPolygonF()
for ver in ps:
polygon.append(QtCore.QPointF(*ver))
pen = QtGui.QPen(QtGui.QColor(*color), 1, QtCore.Qt.SolidLine)
self.painter.setPen(pen)
self.painter.setBrush(QtGui.QColor(0, 0, 0))
self.painter.drawPolygon(polygon)
def addCanvasDashedWedge(self, p1, p2, p3, dash=(2, 2), color=(0, 0, 0), color2=None, **kwargs):
rgb = [int(c * 255) for c in color]
pen = QtGui.QPen(QtGui.QColor(*rgb), 1, QtCore.Qt.SolidLine)
self.painter.setPen(pen)
dash = (4, 4)
pts1 = self._getLinePoints(p1, p2, dash)
pts2 = self._getLinePoints(p1, p3, dash)
if len(pts2) < len(pts1):
pts2, pts1 = pts1, pts2
for i in range(len(pts1)):
qp1 = QtCore.QPointF(pts1[i][0], pts1[i][1])
qp2 = QtCore.QPointF(pts2[i][0], pts2[i][1])
self.painter.drawLine(qp1, qp2)
def flush(self):
self.painter.end()
| bsd-3-clause | 2,263,329,650,974,698,000 | 35.59375 | 98 | 0.63877 | false |
mrocklin/streams | streamz/tests/test_sources.py | 1 | 2787 | from flaky import flaky
import pytest
from streamz import Source
from streamz.utils_test import wait_for, await_for, gen_test
import socket
@flaky(max_runs=3, min_passes=1)
def test_tcp():
port = 9876
s = Source.from_tcp(port)
out = s.sink_to_list()
s.start()
wait_for(lambda: s.server is not None, 2, period=0.02)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("localhost", port))
sock.send(b'data\n')
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("localhost", port))
sock.send(b'data\n')
sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock2.connect(("localhost", port))
sock2.send(b'data2\n')
wait_for(lambda: out == [b'data\n', b'data\n', b'data2\n'], 2,
period=0.01)
finally:
s.stop()
sock.close()
sock2.close()
@flaky(max_runs=3, min_passes=1)
@gen_test(timeout=60)
def test_tcp_async():
port = 9876
s = Source.from_tcp(port)
out = s.sink_to_list()
s.start()
yield await_for(lambda: s.server is not None, 2, period=0.02)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("localhost", port))
sock.send(b'data\n')
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("localhost", port))
sock.send(b'data\n')
sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock2.connect(("localhost", port))
sock2.send(b'data2\n')
yield await_for(lambda: out == [b'data\n', b'data\n', b'data2\n'], 2,
period=0.01)
finally:
s.stop()
sock.close()
sock2.close()
def test_http():
requests = pytest.importorskip('requests')
port = 9875
s = Source.from_http_server(port)
out = s.sink_to_list()
s.start()
wait_for(lambda: s.server is not None, 2, period=0.02)
r = requests.post('http://localhost:%i/' % port, data=b'data')
wait_for(lambda: out == [b'data'], 2, period=0.01)
assert r.ok
r = requests.post('http://localhost:%i/other' % port, data=b'data2')
wait_for(lambda: out == [b'data', b'data2'], 2, period=0.01)
assert r.ok
s.stop()
with pytest.raises(requests.exceptions.RequestException):
requests.post('http://localhost:%i/other' % port, data=b'data2')
@flaky(max_runs=3, min_passes=1)
@gen_test(timeout=60)
def test_process():
cmd = ["python", "-c", "for i in range(4): print(i)"]
s = Source.from_process(cmd)
out = s.sink_to_list()
s.start()
yield await_for(lambda: out == [b'0\n', b'1\n', b'2\n', b'3\n'], timeout=5)
s.stop()
| bsd-3-clause | 1,888,732,176,384,832,300 | 27.731959 | 79 | 0.587729 | false |
ebmdatalab/openprescribing | openprescribing/dmd/build_search_query.py | 1 | 3506 | from django.db.models import fields, ForeignKey, ManyToOneRel, OneToOneRel, Q
from .obj_types import clss
from functools import reduce
def build_query_obj(cls, search):
"""Return Q object to filter dm+d objects based on search.
Parameters:
cls: class of dm+d object to search
search: a tree describing the search to be performed
See TestAdvancedSearchHelpers.test_build_query_obj for an example.
_build_query_obj_helper is a nested function to allow easier use of `map()`.
"""
def _build_query_obj_helper(search):
"""Do the work.
A branch node like:
["and", [node1, node2]]
will be transformed, recursively, into:
_build_query_obj_helper(node1) & _build_query_obj_helper(node2)
A leaf node like:
["nm", "contains", "paracetamol"]
will be transformed into:
Q(nm__icontains="paracetamol")
"""
assert len(search) in [2, 3]
if len(search) == 2:
# branch node
fn = {"and": Q.__and__, "or": Q.__or__}[search[0]]
clauses = list(map(_build_query_obj_helper, search[1]))
return reduce(fn, clauses[1:], clauses[0])
else:
# leaf node
field_name, operator, value = search
if field_name == "bnf_code":
if operator == "begins_with":
return Q(bnf_code__startswith=value)
elif operator == "not_begins_with":
return ~Q(bnf_code__startswith=value)
else:
assert False, operator
else:
key = _build_lookup_key(cls, field_name, operator)
kwargs = {key: value}
return Q(**kwargs)
return _build_query_obj_helper(search)
def _build_lookup_key(cls, field_name, operator):
field = cls._meta.get_field(field_name)
builder = {
ForeignKey: _build_lookup_fk,
ManyToOneRel: _build_lookup_rev_fk,
OneToOneRel: _build_lookup_rev_fk,
fields.CharField: _build_lookup_char,
fields.DateField: _build_lookup_date,
fields.BooleanField: _build_lookup_boolean,
fields.DecimalField: _build_lookup_decimal,
}[type(field)]
return builder(cls, field_name, operator)
def _build_lookup_fk(cls, field_name, operator):
assert operator == "equal"
return field_name
def _build_lookup_rev_fk(cls, field_name, operator):
field = cls._meta.get_field(field_name)
intermediate_model = field.related_model
fk_fields = [
f
for f in intermediate_model._meta.get_fields()
if (
isinstance(f, ForeignKey)
and f.related_model not in clss
and "prev" not in f.name
)
]
assert len(fk_fields) == 1
return "{}__{}".format(field_name, fk_fields[0].name)
def _build_lookup_char(cls, field_name, operator):
lookup = {"contains": "icontains"}[operator]
return "{}__{}".format(field_name, lookup)
def _build_lookup_date(cls, field_name, operator):
lookup = {"equal": "exact", "before": "lt", "after": "gt"}[operator]
return "{}__{}".format(field_name, lookup)
def _build_lookup_boolean(cls, field_name, operator):
assert operator == "equal"
return field_name
def _build_lookup_decimal(cls, field_name, operator):
lookup = {"equal": "exact", "less than": "lt", "greater than": "gt"}[operator]
return "{}__{}".format(field_name, lookup)
| mit | 832,798,126,835,511,300 | 28.965812 | 82 | 0.587849 | false |
jsheffie/django-auth-experiments | djauth/quickstart/views.py | 1 | 2083 | from django.shortcuts import render
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from django.contrib.auth.decorators import login_required
from quickstart.serializers import UserSerializer, GroupSerializer
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth import logout
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
Note: setting queryset, and serializer_class attributs sans just
a model attribute gives us more control over the API behavior.
This is the recommended style for most applications.
"""
# http://django-rest-framework.org/api-guide/permissions#api-reference
permission_classes = ( IsAuthenticated, )
queryset = User.objects.all()
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
@login_required
def testing_users( request ):
ret_str = "Hello Authenticated user required. "
ret_str += "<br>User: %s" % ( request.user.username )
ret_str +="<br><a href='/logout/'>Logout</a>"
return HttpResponse( ret_str );
def no_auth_view( request ):
ret_str = "No Authenticated user required"
ret_str += "<br>User: %s" % ( request.user.username )
ret_str += "<br><a href='/auth/view/'>Auth Required</a>"
ret_str += "<br><a href='/no/auth/view/'>No Auth Required</a>"
ret_str +="<br><a href='/logout/'>Logout</a>"
return HttpResponse( ret_str );
@login_required
def auth_view( request ):
ret_str = "Authenticated user required"
ret_str += "<br>User: %s" % ( request.user.username )
ret_str += "<br><a href='/auth/view/'>Auth Required</a>"
ret_str += "<br><a href='/no/auth/view/'>No Auth Required</a>"
ret_str +="<br><a href='/logout/'>Logout</a>"
return HttpResponse( ret_str );
def logout_view(request):
logout( request )
return HttpResponseRedirect(redirect_to="/no/auth/view/");
| mit | -253,710,595,698,105,470 | 33.716667 | 74 | 0.723476 | false |
CINPLA/exana | exana/tracking/fields.py | 1 | 32391 | import numpy as np
def spatial_rate_map(x, y, t, spike_train, binsize=0.01, box_xlen=1,
box_ylen=1, mask_unvisited=True, convolve=True,
return_bins=False, smoothing=0.02):
"""Divide a 2D space in bins of size binsize**2, count the number of spikes
in each bin and divide by the time spent in respective bins. The map can
then be convolved with a gaussian kernel of size csize determined by the
smoothing factor, binsize and box_xlen.
Parameters
----------
spike_train : neo.SpikeTrain
x : float
1d vector of x positions
y : float
1d vector of y positions
t : float
1d vector of times at x, y positions
binsize : float
spatial binsize
box_xlen : quantities scalar in m
side length of quadratic box
mask_unvisited: bool
mask bins which has not been visited by nans
convolve : bool
convolve the rate map with a 2D Gaussian kernel
Returns
-------
out : rate map
if return_bins = True
out : rate map, xbins, ybins
"""
if not all([len(var) == len(var2) for var in [x,y,t] for var2 in [x,y,t]]):
raise ValueError('x, y, t must have same number of elements')
if box_xlen < x.max() or box_ylen < y.max():
raise ValueError('box length must be larger or equal to max path length')
from decimal import Decimal as dec
decimals = 1e10
remainderx = dec(float(box_xlen)*decimals) % dec(float(binsize)*decimals)
remaindery = dec(float(box_ylen)*decimals) % dec(float(binsize)*decimals)
if remainderx != 0 or remaindery != 0:
raise ValueError('the remainder should be zero i.e. the ' +
'box length should be an exact multiple ' +
'of the binsize')
# interpolate one extra timepoint
t_ = np.append(t, t[-1] + np.median(np.diff(t)))
spikes_in_bin, _ = np.histogram(spike_train, t_)
time_in_bin = np.diff(t_)
xbins = np.arange(0, box_xlen + binsize, binsize)
ybins = np.arange(0, box_ylen + binsize, binsize)
ix = np.digitize(x, xbins, right=True)
iy = np.digitize(y, ybins, right=True)
spike_pos = np.zeros((xbins.size, ybins.size))
time_pos = np.zeros((xbins.size, ybins.size))
for n in range(len(x)):
spike_pos[ix[n], iy[n]] += spikes_in_bin[n]
time_pos[ix[n], iy[n]] += time_in_bin[n]
# correct for shifting of map
spike_pos = spike_pos[1:, 1:]
time_pos = time_pos[1:, 1:]
with np.errstate(divide='ignore', invalid='ignore'):
rate = np.divide(spike_pos, time_pos)
if convolve:
rate[np.isnan(rate)] = 0. # for convolution
from astropy.convolution import Gaussian2DKernel, convolve_fft
csize = (box_xlen / binsize) * smoothing
kernel = Gaussian2DKernel(csize)
rate = convolve_fft(rate, kernel) # TODO edge correction
if mask_unvisited:
was_in_bin = np.asarray(time_pos, dtype=bool)
rate[np.invert(was_in_bin)] = np.nan
if return_bins:
return rate.T, xbins, ybins
else:
return rate.T
def gridness(rate_map, box_xlen, box_ylen, return_acorr=False,
step_size=0.1, method='iter', return_masked_acorr=False):
'''Calculates gridness of a rate map. Calculates the normalized
autocorrelation (A) of a rate map B where A is given as
A = 1/n\Sum_{x,y}(B - \bar{B})^{2}/\sigma_{B}^{2}. Further, the Pearsson's
product-moment correlation coefficients is calculated between A and A_{rot}
rotated 30 and 60 degrees. Finally the gridness is calculated as the
difference between the minimum of coefficients at 60 degrees and the
maximum of coefficients at 30 degrees i.e. gridness = min(r60) - max(r30).
If the method 'iter' is chosen:
In order to focus the analysis on symmetry of A the the central and the
outer part of the gridness is maximized by increasingly mask A at steps of
``step_size``.
If the method 'puncture' is chosen:
This is the standard way of calculating gridness, by masking the central
autocorrelation bump, in addition to rounding the map. See examples.
Parameters
----------
rate_map : numpy.ndarray
box_xlen : float
side length of quadratic box
step_size : float
step size in masking, only applies to the method "iter"
return_acorr : bool
return autocorrelation map or not
return_masked_acorr : bool
return masked autocorrelation map or not
method : 'iter' or 'puncture'
Returns
-------
out : gridness, (autocorrelation map, masked autocorrelation map)
Examples
--------
>>> from exana.tracking.tools import make_test_grid_rate_map
>>> import matplotlib.pyplot as plt
>>> rate_map, pos = make_test_grid_rate_map()
>>> iter_score = gridness(rate_map, box_xlen=1, box_ylen=1, method='iter')
>>> print('%.2f' % iter_score)
1.39
>>> puncture_score = gridness(rate_map, box_xlen=1, box_ylen=1, method='puncture')
>>> print('%.2f' % puncture_score)
0.96
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from exana.tracking.tools import make_test_grid_rate_map
from exana.tracking import gridness
import matplotlib.pyplot as plt
rate_map, _ = make_test_grid_rate_map()
fig, axs = plt.subplots(2, 2)
g1, acorr, m_acorr1 = gridness(rate_map, box_xlen=1,
box_ylen=1, return_acorr=True,
return_masked_acorr=True,
method='iter')
g2, m_acorr2 = gridness(rate_map, box_xlen=1,
box_ylen=1,
return_masked_acorr=True,
method='puncture')
mats = [rate_map, m_acorr1, acorr, m_acorr2]
titles = ['Rate map', 'Masked acorr "iter", gridness = %.2f' % g1,
'Autocorrelation',
'Masked acorr "puncture", gridness = %.2f' % g2]
for ax, mat, title in zip(axs.ravel(), mats, titles):
ax.imshow(mat)
ax.set_title(title)
plt.tight_layout()
plt.show()
'''
import numpy.ma as ma
from exana.misc.tools import fftcorrelate2d
from exana.tracking.tools import gaussian2D
from scipy.optimize import curve_fit
tmp_map = rate_map.copy()
tmp_map[~np.isfinite(tmp_map)] = 0
acorr = fftcorrelate2d(tmp_map, tmp_map, mode='full', normalize=True)
rows, cols = acorr.shape
b_x = np.linspace(- box_xlen / 2., box_xlen / 2., rows)
b_y = np.linspace(- box_ylen / 2., box_ylen / 2., cols)
B_x, B_y = np.meshgrid(b_x, b_y)
if method == 'iter':
if return_masked_acorr: m_acorrs = []
gridscores = []
for outer in np.arange(box_xlen / 4, box_xlen / 2, step_size):
m_acorr = ma.masked_array(
acorr, mask=np.sqrt(B_x**2 + B_y**2) > outer)
for inner in np.arange(0, box_xlen / 4, step_size):
m_acorr = ma.masked_array(
m_acorr, mask=np.sqrt(B_x**2 + B_y**2) < inner)
r30, r60 = rotate_corr(m_acorr)
gridscores.append(np.min(r60) - np.max(r30))
if return_masked_acorr: m_acorrs.append(m_acorr)
gridscore = max(gridscores)
if return_masked_acorr: m_acorr = m_acorrs[gridscores.index(gridscore)]
elif method == 'puncture':
# round picture edges
_gaussian = lambda pos, a, s: gaussian2D(a, pos[0], pos[1], 0, 0, s).ravel()
p0 = (max(acorr.ravel()), min(box_xlen, box_ylen) / 100)
popt, pcov = curve_fit(_gaussian, (B_x, B_y), acorr.ravel(), p0=p0)
m_acorr = ma.masked_array(
acorr, mask=np.sqrt(B_x**2 + B_y**2) > min(box_xlen, box_ylen) / 2)
m_acorr = ma.masked_array(
m_acorr, mask=np.sqrt(B_x**2 + B_y**2) < popt[1])
r30, r60 = rotate_corr(m_acorr)
gridscore = float(np.min(r60) - np.max(r30))
if return_acorr and return_masked_acorr:
return gridscore, acorr, m_acorr
if return_masked_acorr:
return gridscore, m_acorr
if return_acorr:
return gridscore, acorr # acorrs[grids.index(max(grids))]
else:
return gridscore
def rotate_corr(acorr):
from exana.misc.tools import masked_corrcoef2d
from scipy.ndimage.interpolation import rotate
angles = range(30, 180+30, 30)
corr = []
# Rotate and compute correlation coefficient
for angle in angles:
rot_acorr = rotate(acorr, angle, reshape=False)
corr.append(masked_corrcoef2d(rot_acorr, acorr)[0, 1])
r60 = corr[1::2]
r30 = corr[::2]
return r30, r60
def occupancy_map(x, y, t,
binsize=0.01,
box_xlen=1,
box_ylen=1,
mask_unvisited=True,
convolve=True,
return_bins=False,
smoothing=0.02):
'''Divide a 2D space in bins of size binsize**2, count the time spent
in each bin. The map can be convolved with a gaussian kernel of size
csize determined by the smoothing factor, binsize and box_xlen.
Parameters
----------
x : array
1d vector of x positions
y : array
1d vector of y positions
t : array
1d vector of times at x, y positions
binsize : float
spatial binsize
box_xlen : float
side length of quadratic box
mask_unvisited: bool
mask bins which has not been visited by nans
convolve : bool
convolve the rate map with a 2D Gaussian kernel
Returns
-------
occupancy_map : numpy.ndarray
if return_bins = True
out : occupancy_map, xbins, ybins
'''
if not all([len(var) == len(var2) for var in [
x, y, t] for var2 in [x, y, t]]):
raise ValueError('x, y, t must have same number of elements')
if box_xlen < x.max() or box_ylen < y.max():
raise ValueError(
'box length must be larger or equal to max path length')
from decimal import Decimal as dec
decimals = 1e10
remainderx = dec(float(box_xlen)*decimals) % dec(float(binsize)*decimals)
remaindery = dec(float(box_ylen)*decimals) % dec(float(binsize)*decimals)
if remainderx != 0 or remaindery != 0:
raise ValueError('the remainder should be zero i.e. the ' +
'box length should be an exact multiple ' +
'of the binsize')
# interpolate one extra timepoint
t_ = np.array(t.tolist() + [t.max() + np.median(np.diff(t))])
time_in_bin = np.diff(t_)
xbins = np.arange(0, box_xlen + binsize, binsize)
ybins = np.arange(0, box_ylen + binsize, binsize)
ix = np.digitize(x, xbins, right=True)
iy = np.digitize(y, ybins, right=True)
time_pos = np.zeros((xbins.size, ybins.size))
for n in range(len(x) - 1):
time_pos[ix[n], iy[n]] += time_in_bin[n]
# correct for shifting of map since digitize returns values at right edges
time_pos = time_pos[1:, 1:]
if convolve:
rate[np.isnan(rate)] = 0. # for convolution
from astropy.convolution import Gaussian2DKernel, convolve_fft
csize = (box_xlen / binsize) * smoothing
kernel = Gaussian2DKernel(csize)
rate = convolve_fft(rate, kernel) # TODO edge correction
if mask_unvisited:
was_in_bin = np.asarray(time_pos, dtype=bool)
rate[np.invert(was_in_bin)] = np.nan
if return_bins:
return rate.T, xbins, ybins
else:
return rate.T
def nvisits_map(x, y, t,
binsize=0.01,
box_xlen=1,
box_ylen=1,
return_bins=False):
'''Divide a 2D space in bins of size binsize**2, count the
number of visits in each bin. The map can be convolved with
a gaussian kernel of size determined by the smoothing factor,
binsize and box_xlen.
Parameters
----------
x : array
1d vector of x positions
y : array
1d vector of y positions
t : array
1d vector of times at x, y positions
binsize : float
spatial binsize
box_xlen : float
side length of quadratic box
Returns
-------
nvisits_map : numpy.ndarray
if return_bins = True
out : nvisits_map, xbins, ybins
'''
if not all([len(var) == len(var2) for var in [
x, y, t] for var2 in [x, y, t]]):
raise ValueError('x, y, t must have same number of elements')
if box_xlen < x.max() or box_ylen < y.max():
raise ValueError(
'box length must be larger or equal to max path length')
from decimal import Decimal as dec
decimals = 1e10
remainderx = dec(float(box_xlen)*decimals) % dec(float(binsize)*decimals)
remaindery = dec(float(box_ylen)*decimals) % dec(float(binsize)*decimals)
if remainderx != 0 or remaindery != 0:
raise ValueError('the remainder should be zero i.e. the ' +
'box length should be an exact multiple ' +
'of the binsize')
xbins = np.arange(0, box_xlen + binsize, binsize)
ybins = np.arange(0, box_ylen + binsize, binsize)
ix = np.digitize(x, xbins, right=True)
iy = np.digitize(y, ybins, right=True)
nvisits_map = np.zeros((xbins.size, ybins.size))
for n in range(len(x)):
if n == 0:
nvisits_map[ix[n], iy[n]] = 1
else:
if ix[n-1] != ix[n] or iy[n-1] != iy[n]:
nvisits_map[ix[n], iy[n]] += 1
# correct for shifting of map since digitize returns values at right edges
nvisits_map = nvisits_map[1:, 1:]
if return_bins:
return nvisits_map.T, xbins, ybins
else:
return nvisits_map.T
def spatial_rate_map_1d(x, t, spike_train,
binsize=0.01,
track_len=1,
mask_unvisited=True,
convolve=True,
return_bins=False,
smoothing=0.02):
"""Take x coordinates of linear track data, divide in bins of binsize,
count the number of spikes in each bin and divide by the time spent in
respective bins. The map can then be convolved with a gaussian kernel of
size csize determined by the smoothing factor, binsize and box_xlen.
Parameters
----------
spike_train : array
x : array
1d vector of x positions
t : array
1d vector of times at x, y positions
binsize : float
spatial binsize
box_xlen : float
side length of quadratic box
mask_unvisited: bool
mask bins which has not been visited by nans
convolve : bool
convolve the rate map with a 2D Gaussian kernel
Returns
-------
out : rate map
if return_bins = True
out : rate map, xbins
"""
if not all([len(var) == len(var2) for var in [x, t] for var2 in [x, t]]):
raise ValueError('x, t must have same number of elements')
if track_len < x.max():
raise ValueError('track length must be\
larger or equal to max path length')
from decimal import Decimal as dec
decimals = 1e10
remainderx = dec(float(track_len)*decimals) % dec(float(binsize)*decimals)
if remainderx != 0:
raise ValueError('the remainder should be zero i.e. the ' +
'box length should be an exact multiple ' +
'of the binsize')
# interpolate one extra timepoint
t_ = np.array(t.tolist() + [t.max() + np.median(np.diff(t))])
spikes_in_bin, _ = np.histogram(spike_train, t_)
time_in_bin = np.diff(t_)
xbins = np.arange(0, track_len + binsize, binsize)
ix = np.digitize(x, xbins, right=True)
spike_pos = np.zeros(xbins.size)
time_pos = np.zeros(xbins.size)
for n in range(len(x)):
spike_pos[ix[n]] += spikes_in_bin[n]
time_pos[ix[n]] += time_in_bin[n]
# correct for shifting of map since digitize returns values at right edges
spike_pos = spike_pos[1:]
time_pos = time_pos[1:]
with np.errstate(divide='ignore', invalid='ignore'):
rate = np.divide(spike_pos, time_pos)
if convolve:
rate[np.isnan(rate)] = 0. # for convolution
from astropy.convolution import Gaussian2DKernel, convolve_fft
csize = (track_len / binsize) * smoothing
kernel = Gaussian2DKernel(csize)
rate = convolve_fft(rate, kernel) # TODO edge correction
if mask_unvisited:
was_in_bin = np.asarray(time_pos, dtype=bool)
rate[np.invert(was_in_bin)] = np.nan
if return_bins:
return rate.T, xbins
else:
return rate.T
def separate_fields(rate_map, laplace_thrsh=0, center_method='maxima',
cutoff_method='none', box_xlen=1, box_ylen=1, index=False):
"""Separates fields using the laplacian to identify fields separated by
a negative second derivative.
Parameters
----------
rate_map : np 2d array
firing rate in each bin
laplace_thrsh : float
value of laplacian to separate fields by relative to the minima. Should be
on the interval 0 to 1, where 0 cuts off at 0 and 1 cuts off at
min(laplace(rate_map)). Default 0.
center_method : string
method to find field centers. Valid options = ['center_of_mass',
'maxima','gaussian_fit']
cutoff_method (optional) : string or function
function to exclude small fields. If local field value of function
is lower than global function value, the field is excluded. Valid
string_options = ['median', 'mean','none'].
index : bool, default False
return bump center values as index or xy-pos
Returns
-------
fields : numpy array, shape like rate_map.
contains areas all filled with same value, corresponding to fields
in rate_map. The values are in range(1,nFields + 1), sorted by size of the
field (sum of all field values). 0 elsewhere.
n_field : int
field count
bump_centers : (n_field x 2) np ndarray
Coordinates of field centers
"""
cutoff_functions = {'mean':np.mean, 'median':np.median, 'none':None}
if not callable(cutoff_method):
try:
cutoff_func = cutoff_functions[cutoff_method]
except KeyError:
msg = "invalid cutoff_method flag '%s'" % cutoff_method
raise ValueError(msg)
else:
cutoff_func = cutoff_method
from scipy import ndimage
l = ndimage.laplace(rate_map)
l[l>laplace_thrsh*np.min(l)] = 0
# Labels areas of the laplacian not connected by values > 0.
fields, n_fields = ndimage.label(l)
# index 0 is the background
indx = np.arange(1,n_fields+1)
# Use cutoff method to remove unwanted fields
if cutoff_method != 'none':
try:
total_value = cutoff_func(fields)
except:
print('Unexpected error, cutoff_func doesnt like the input:')
raise
field_values = ndimage.labeled_comprehension(rate_map, fields, indx,
cutoff_func, float, 0)
try:
is_field = field_values >= total_value
except:
print('cutoff_func return_values doesnt want to compare:')
raise
if np.sum(is_field) == 0:
return np.zeros(rate_map.shape), 0, np.array([[],[]])
for i in indx:
if not is_field[i-1]:
fields[fields == i] = 0
n_fields = ndimage.label(fields, output=fields)
indx = np.arange(1,n_fields + 1)
# Sort by largest mean
sizes = ndimage.labeled_comprehension(rate_map, fields, indx,
np.mean, float, 0)
size_sort = np.argsort(sizes)[::-1]
new = np.zeros_like(fields)
for i in np.arange(n_fields):
new[fields == size_sort[i]+1] = i+1
fields = new
bc = get_bump_centers(rate_map,labels=fields,ret_index=index,indices=indx,method=center_method,
units=box_xlen.units)
# TODO exclude fields where maxima is on the edge of the field?
return fields, n_fields, bc
def get_bump_centers(rate_map, labels, ret_index=False, indices=None, method='maxima',
units=1):
"""Finds center of fields at labels."""
from scipy import ndimage
if method not in ['maxima','center_of_mass','gaussian_fit']:
msg = "invalid center_method flag '%s'" % method
raise ValueError(msg)
if indices is None:
indices = np.arange(1,np.max(labels)+1)
if method == 'maxima':
bc = ndimage.maximum_position(rate_map, labels=labels,
index=indices)
elif method == 'center_of_mass':
bc = ndimage.center_of_mass(rate_map, labels=labels, index=indices)
elif method == 'gaussian_fit':
from exana.tracking.tools import fit_gauss_asym
bc = np.zeros((len(indices),2))
import matplotlib.pyplot as plt
for i in indices:
r = rate_map.copy()
r[labels != i] = 0
popt = fit_gauss_asym(r, return_data=False)
# TODO Find out which axis is x and which is y
bc[i-1] = (popt[2],popt[1])
if ret_index:
msg = 'ret_index not implemented for gaussian fit'
raise NotImplementedError(msg)
if not ret_index and not method=='gaussian_fit':
bc = (bc + np.array((0.5,0.5)))/rate_map.shape
return np.array(bc)*units
def find_avg_dist(rate_map, thrsh = 0, plot=False):
"""Uses autocorrelation and separate_fields to find average distance
between bumps. Is dependent on high gridness to get separate bumps in
the autocorrelation
Parameters
----------
rate_map : np 2d array
firing rate in each bin
thrsh (optional) : float, default 0
cutoff value for the laplacian of the autocorrelation function.
Should be a negative number. Gives better separation if bumps are
connected by "bridges" or saddles where the laplacian is negative.
plot (optional) : bool, default False
plot acorr and the separated acorr, with bump centers
Returns
-------
avg_dist : float
relative units from 0 to 1 of the box size
"""
from scipy.ndimage import maximum_position
from exana.misc.tools import fftcorrelate2d
# autocorrelate. Returns array (2x - 1) the size of rate_map
acorr = fftcorrelate2d(rate_map,rate_map, mode = 'full', normalize = True)
#acorr[acorr<0] = 0 # TODO Fix this
f, nf, bump_centers = separate_fields(acorr,laplace_thrsh=thrsh,
center_method='maxima',cutoff_method='median')
# TODO Find a way to find valid value for
# thrsh, or remove.
bump_centers = np.array(bump_centers)
# find dists from center in (autocorrelation)relative units (from 0 to 1)
distances = np.linalg.norm(bump_centers - (0.5,0.5), axis = 1)
dist_sort = np.argsort(distances)
distances = distances[dist_sort]
# use maximum 6 closest values except center value
avg_dist = np.median(distances[1:7])
# correct for difference in shapes
avg_dist *= acorr.shape[0]/rate_map.shape[0] # = 1.98
# TODO : raise warning if too big difference between points
if plot:
import matplotlib.pyplot as plt
fig,[ax1,ax2] = plt.subplots(1,2)
ax1.imshow(acorr,extent = (0,1,0,1),origin='lower')
ax1.scatter(*(bump_centers[:,::-1].T))
ax2.imshow(f,extent = (0,1,0,1),origin='lower')
ax2.scatter(*(bump_centers[:,::-1].T))
return avg_dist
def fit_hex(bump_centers, avg_dist=None, plot_bumps = False, method='best'):
"""Fits a hex grid to a given set of bumps. Uses the three bumps most
Parameters
----------
bump_centers : Nx2 np.array
x,y positions of bump centers, x,y /in (0,1)
avg_dist (optional): float
average spacing between bumps
plot_bumps (optional): bool
if True, plots at the three bumps most likely to be in
correct hex-position to the current matplotlib axes.
method (optional): string, valid options: ['closest', 'best']
method to find angle from neighboring bumps.
'closest' uses six bumps nearest to center bump
'best' uses the two bumps nearest to avg_dist
Returns
-------
displacement : float
distance of bump closest to the center in meters
orientation : float
orientation of hexagon (in degrees)
"""
valid_methods = ['closest', 'best']
if method not in valid_methods:
msg = "invalid method flag '%s'" % method
raise ValueError(msg)
bump_centers = np.array(bump_centers)
# sort by distance to center
d = np.linalg.norm(bump_centers - (0.5,0.5), axis=1)
d_sort = np.argsort(d)
dist_sorted = bump_centers[d_sort]
center_bump = dist_sorted[0]; others = dist_sorted[1:]
displacement = d[d_sort][0]
# others distances to center bumps
relpos = others - center_bump
reldist = np.linalg.norm(relpos, axis=1)
if method == 'closest':
# get 6 closest bumps
rel_sort = np.argsort(reldist)
closest = others[rel_sort][:6]
relpos = relpos[rel_sort][:6]
elif method == 'best':
# get 2 bumps such that /sum_{i\neqj}(\abs{r_i-r_j}-avg_ist)^2 is minimized
squares = 1e32*np.ones((others.shape[0], others.shape[0]))
for i in range(len(relpos)):
for j in range(i,len(relpos)):
rel1 = (reldist[i] - avg_dist)**2
rel2 = (reldist[j] - avg_dist)**2
rel3 = (np.linalg.norm(relpos[i]-relpos[j]) - avg_dist)**2
squares[i,j] = rel1 + rel2 + rel3
rel_slice = np.unravel_index(np.argmin(squares), squares.shape)
rel_slice = np.array(rel_slice)
#rel_sort = np.argsort(np.abs(reldist-avg_dist))
closest = others[rel_slice]
relpos = relpos[rel_slice]
# sort by angle
a = np.arctan2(relpos[:,1], relpos[:,0])%(2*np.pi)
a_sort = np.argsort(a)
# extract lowest angle and convert to degrees
orientation = a[a_sort][0] *180/np.pi
# hex grid is symmetric under rotations of 60deg
orientation %= 60
if plot_bumps:
import matplotlib.pyplot as plt
ax=plt.gca()
i = 1
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
dx = xmax-xmin; dy = ymax - ymin
closest = closest[a_sort]
edges = [center_bump] if method == 'best' else []
edges += [c for c in closest]
edges = np.array(edges)*(dx,dy) + (xmin, ymin)
poly = plt.Polygon(edges, alpha=0.5,color='r')
ax.add_artist(poly)
return displacement, orientation
def calculate_grid_geometry(rate_map, plot_fields=False, **kwargs):
"""Calculates quantitative information about grid field.
Find bump centers, bump spacing, center diplacement and hexagon
orientation
Parameters
----------
rate_map : np 2d array
firing rate in each bin
plot_fields : if True, plots the field labels with field centers to the
current matplotlib ax. Default False
thrsh : float, default 0
see find_avg_dist()
center_method : string, valid options: ['maxima', 'center_of_mass']
default: 'center_of_mass'
see separate_fields()
method : string, valid options: ['closest', 'best']
see fit_hex()
Returns
-------
bump_centers : 2d np.array
x,y positions of bump centers
avg_dist : float
average spacing between bumps, \in [0,1]
displacement : float
distance of bump closest to the center
orientation : float
orientation of hexagon (in degrees)
Examples
--------
>>> import numpy as np
>>> rate_map = np.zeros((5,5))
>>> pos = np.array([ [0,2],
... [1,0],[1,4],
... [2,2],
... [3,0],[3,4],
... [4,2]])
>>> for(i,j) in pos:
... rate_map[i,j] = 1
...
>>> result = calculate_grid_geometry(rate_map)
"""
# TODO add back the following when it is correct
# (array([[0.5, 0.9],
# [0.9, 0.7],
# [0.1, 0.7],
# [0.5, 0.5],
# [0.9, 0.3],
# [0.1, 0.3],
# [0.5, 0.1]]) * m, 0.4472135954999579, 0.0, 26.565051177077983)
from scipy.ndimage import mean, center_of_mass
# TODO: smooth data?
# smooth_rate_map = lambda x:x
# rate_map = smooth_rate_map(rate_map)
center_method = kwargs.pop('center_method',None)
if center_method:
fields, nfields, bump_centers = separate_fields(rate_map,
center_method=center_method)
else:
fields, nfields, bump_centers = separate_fields(rate_map)
if bump_centers.size == 0:
import warnings
msg = 'couldnt find bump centers, returning None'
warnings.warn(msg, RuntimeWarning, stacklevel=2)
return None,None,None,None,
sh = np.array(rate_map.shape)
if plot_fields:
print(fields)
import matplotlib.pyplot as plt
x=np.linspace(0,1,sh[0]+1)
y=np.linspace(0,1,sh[1]+1)
x,y = np.meshgrid(x,y)
ax = plt.gca()
print('nfields: ',nfields)
plt.pcolormesh(x,y, fields)
# switch from row-column to x-y
bump_centers = bump_centers[:,::-1]
thrsh = kwargs.pop('thrsh', None)
if thrsh:
avg_dist = find_avg_dist(rate_map, thrsh)
else:
avg_dist = find_avg_dist(rate_map)
displacement, orientation = fit_hex(bump_centers, avg_dist,
plot_bumps=plot_fields, **kwargs)
return bump_centers, avg_dist, displacement, orientation
class RandomDisplacementBounds(object):
"""random displacement with bounds"""
def __init__(self, xmin, xmax, stepsize=0.5):
self.xmin = np.array(xmin)
self.xmax = np.array(xmax)
self.stepsize = stepsize
def __call__(self, x):
"""take a random step but ensure the new position is within the bounds"""
while True:
# this could be done in a much more clever way, but it will work for example purposes
xnew = x + (self.xmax-self.xmin)*np.random.uniform(-self.stepsize,
self.stepsize, np.shape(x))
if np.all(xnew < self.xmax) and np.all(xnew > self.xmin):
break
return xnew
def optimize_sep_fields(rate_map,step = 0.04, niter=40, T = 1.0, method = 'SLSQP',
glob=True, x0 = [0.065,0.1],callback=None):
"""Optimizes the separation of the fields by minimizing an error
function
Parameters
----------
rate_map :
method :
valid methods=['L-BFGS-B', 'TNC', 'SLSQP']
x0 : list
initial values for smoothing smoothing and laplace_thrsh
Returns
--------
res :
Result of the optimization. Contains smoothing and laplace_thrsh in
attribute res.x"""
from scipy import optimize
from exana.tracking.tools import separation_error_func as err_func
valid_methods = ['L-BFGS-B', 'TNC', 'SLSQP']
if method not in valid_methods:
raise ValueError('invalid method flag %s' %method)
rate_map[np.isnan(rate_map)] = 0.
method = 'SLSQP'
xmin = [0.025, 0]
xmax = [0.2, 1]
bounds = [(low,high) for low,high in zip(xmin,xmax)]
obj_func = lambda args: err_func(args[0], args[1], rate_map)
if glob:
take_step = RandomDisplacementBounds(xmin, xmax,stepsize=step)
minimizer_kwargs = dict(method=method, bounds=bounds)
res = optimize.basinhopping(obj_func, x0, niter=niter, T = T,
minimizer_kwargs=minimizer_kwargs,
take_step=take_step,callback=callback)
else:
res = optimize.minimize(obj_func, x0, method=method, bounds = bounds, options={'disp': True})
return res
if __name__ == "__main__":
import doctest
doctest.testmod()
| gpl-3.0 | 8,174,192,474,725,353,000 | 34.9102 | 101 | 0.591244 | false |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
- Downloads last month
- 30