text
stringlengths 29
850k
|
---|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('imager_profile', '0004_auto_20150802_0153'),
]
operations = [
migrations.RemoveField(
model_name='imagerprofile',
name='name',
),
migrations.AddField(
model_name='imagerprofile',
name='nickname',
field=models.CharField(max_length=128, null=True, blank=True),
),
migrations.AlterField(
model_name='imagerprofile',
name='address',
field=models.TextField(null=True, blank=True),
),
migrations.AlterField(
model_name='imagerprofile',
name='camera',
field=models.CharField(help_text=b'What is the make and model of your camera?', max_length=128, null=True, blank=True),
),
migrations.AlterField(
model_name='imagerprofile',
name='photography_type',
field=models.CharField(blank=True, max_length=64, null=True, help_text=b'What is your photography type?', choices=[(b'H', b'Hobbist'), (b'A', b'Abstract'), (b'B', b'Black and White'), (b'P', b'Panorama'), (b'J', b'Journalism')]),
),
migrations.AlterField(
model_name='imagerprofile',
name='website_url',
field=models.URLField(null=True, blank=True),
),
]
|
SJ Beale HR Consult offers HR telephone advice and HR support to companies in the manufacturing sector whether you are a stand alone organisation or have several sites. We have had experience of working with companies in the manufacturing sector and understand the unique issues you may face when managing your staff. We can provide you with flexible, tailored HR solutions. |
# -*- coding: utf-8 -*-
# nexus5-root (c) Ian Dennis Miller
from fabric.api import task, env
import shutil
import requests
import os.path
import time
import glob
from subprocess import call
adb_cmd = os.path.join(os.path.expanduser(env.sdk_path), "platform-tools", "adb")
fastboot_cmd = os.path.join(os.path.expanduser(env.sdk_path), "platform-tools", "fastboot")
def download_url(source_url, destination_filename):
if not os.path.isfile(destination_filename):
print("downloading {0}...".format(source_url))
r = requests.get(source_url, stream=True, headers={'referer': source_url})
if r.status_code == 200:
with open(destination_filename, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
print("image downloaded to {0}".format(destination_filename))
else:
print("already downloaded as {0}".format(destination_filename))
def install_app(apk_filename):
"install an app on the phone"
call([
adb_cmd, "install",
os.path.join(
env.working_path,
"download",
apk_filename)
])
time.sleep(1)
@task
def ensure_paths():
download_path = os.path.join(env.working_path, "download")
build_path = os.path.join(env.working_path, env.nexus_model)
if not os.path.isdir(download_path):
os.mkdir(download_path)
if not os.path.isdir(build_path):
os.mkdir(build_path)
def download_apps():
download_url("http://file.appsapk.com/wp-content/uploads/downloads/BusyBox.apk",
os.path.join(env.working_path, "download", "BusyBox.apk"))
download_url(
"https://drive.google.com/uc?export=download&confirm=no_antivirus&id=0B8muzPZAeiQ6RlFzMWM4ZUZKQ2s",
os.path.join(env.working_path, "download", "TitaniumBackup.apk"))
download_url(
"https://drive.google.com/uc?export=download&confirm=no_antivirus&id=0B8muzPZAeiQ6S293d2lqWE1rRlk",
os.path.join(env.working_path, "download", "AndroidTerminalEmulator.apk"))
@task
def download_sdk():
"download the Android SDK"
download_url(env.sdk_url, os.path.join(env.working_path, "download", "sdk.tgz"))
call(["tar", "-xvzf", os.path.join(env.working_path, "download", "sdk.tgz"),
"-C", os.path.expanduser(env.sdk_path)])
@task
def download_twrp():
"download TWRP"
download_url(env.bootloader_url, os.path.join(env.working_path, "download", "twrp.img"))
call(["cp",
os.path.join(env.working_path, "download", "twrp.img"),
os.path.join(env.working_path, env.nexus_model)
])
@task
def download_nexus_image():
"download the stock Nexus image"
download_url(env.image_url, os.path.join(env.working_path, "download", "nexus-image.tgz"))
call(["tar", "-xvzf", os.path.join(env.working_path, "download", "nexus-image.tgz"),
"-C", env.working_path])
call(["mv",
glob.glob(os.path.join(env.working_path, "{0}-*".format(env.nexus_model)))[0],
os.path.join(env.working_path, "nexus-image")
])
call(["mv",
os.path.join(env.working_path, "nexus-image"),
os.path.join(env.working_path, env.nexus_model)
])
@task
def adb_bootloader():
"reboot the phone into the bootloader using adb"
call([adb_cmd, "reboot", "bootloader"])
raw_input('Press ENTER after your phone has rebooted.')
@task
def fastboot_bootloader():
"reboot the phone into the bootloader using fastboot"
call([fastboot_cmd, "reboot-bootloader"])
raw_input('Press ENTER after your phone has rebooted.')
@task
def fastboot_recovery():
"reboot the phone into the recovery using fastboot"
call([fastboot_cmd, "reboot-recovery"])
raw_input('Press ENTER after your phone has rebooted.')
@task
def reboot():
"reboot the phone"
call([fastboot_cmd, "reboot"])
@task
def unlock():
"unlock the phone's bootloader. NB: This step will wipe all user data."
call([fastboot_cmd, "oem", "unlock"])
print("Now you must select 'yes' to wipe your user data and unlock the bootloader.")
raw_input('Press ENTER after you have unlocked the bootloader.')
reboot()
@task
def backup():
"copy backup from phone to local system"
call([adb_cmd, "pull", env.remote_backup_path, os.path.expanduser(env.local_backup_path)])
@task
def restore():
"restore backup from local system to phone"
call([adb_cmd, "push", os.path.expanduser(env.local_backup_path), env.remote_backup_path])
@task
def flash_bootloader():
"flash the stock bootloader"
call([
fastboot_cmd, "flash", "bootloader",
glob.glob(os.path.join(
env.working_path, env.nexus_model,
"nexus-image",
"bootloader-*.img"))[0]
])
time.sleep(1)
fastboot_bootloader()
time.sleep(5)
@task
def flash_radio():
"flash the radio image"
call([
fastboot_cmd, "flash", "radio",
glob.glob(os.path.join(
env.working_path, env.nexus_model,
"nexus-image",
"radio-*.img"))[0]
])
time.sleep(1)
fastboot_bootloader()
time.sleep(5)
@task
def flash_image():
"flash the nexus image"
call([
fastboot_cmd, "-w", "update",
glob.glob(os.path.join(
env.working_path, env.nexus_model,
"nexus-image",
"image-*.zip"))[0]
])
time.sleep(5)
@task
def flash_recovery():
"flash the recovery image"
call([
fastboot_cmd, "flash", "recovery",
os.path.join(
env.working_path, env.nexus_model,
"twrp.img")
])
time.sleep(1)
fastboot_recovery()
time.sleep(5)
@task
def install_apps():
"install several key APKs: BusyBox and TitaniumBackup"
install_app("TitaniumBackup.apk")
install_app("BusyBox.apk")
install_app("AndroidTerminalEmulator.apk")
|
Valeta Studios - Ikaria. Book direct with apartment owner and save money.
Valeta Studios - Apartments are located just above Livadi beach, in Armenistis, a village offering modern facilities on an island which still remains unaffected by excessive tourism.
Valeta Studios - Apartments consists of studio flats with self-catering facilities as well as a modern en suite bathroom. Each studio can accommodate a family of four, and two studios can be joined to form a larger accomodation.
All studios/apartments have breathtaking sea and mountain views. Ten minutes walk from the apartments will lead you to shops, food stores, traditional cafes, restaurants, and transportation.
The village of Armenistis offers a choice between, or a combination of a quiet relaxed atmosphere and a busy night life. Night clubs, discos, and bars are located outside the village , by the sea, about 10 minutes walk from Valeta.
Neighboring Armenistis is a vast mountainous, green country side and a group of small villages called Raches.(hills) The area is an ideal terrain with breathtaking views for hikers/walkers to explore. The friendly and relaxed manner of the local people will make you feel at home. |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import sys
from twisted.internet import reactor
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol, \
listenWS
class BroadcastServerProtocol(WebSocketServerProtocol):
def onOpen(self):
self.factory.register(self)
def onMessage(self, payload, isBinary):
if not isBinary:
msg = "{} from {}".format(payload.decode('utf8'), self.peer)
self.factory.broadcast(msg)
def connectionLost(self, reason):
WebSocketServerProtocol.connectionLost(self, reason)
self.factory.unregister(self)
class BroadcastServerFactory(WebSocketServerFactory):
"""
Simple broadcast server broadcasting any message it receives to all
currently connected clients.
"""
def __init__(self, url, debug=False, debugCodePaths=False):
WebSocketServerFactory.__init__(self, url, debug=debug, debugCodePaths=debugCodePaths)
self.clients = []
self.tickcount = 0
self.tick()
def tick(self):
self.tickcount += 1
self.broadcast("tick %d from server" % self.tickcount)
reactor.callLater(1, self.tick)
def register(self, client):
if client not in self.clients:
print("registered client {}".format(client.peer))
self.clients.append(client)
def unregister(self, client):
if client in self.clients:
print("unregistered client {}".format(client.peer))
self.clients.remove(client)
def broadcast(self, msg):
print("broadcasting message '{}' ..".format(msg))
for c in self.clients:
c.sendMessage(msg.encode('utf8'))
print("message sent to {}".format(c.peer))
class BroadcastPreparedServerFactory(BroadcastServerFactory):
"""
Functionally same as above, but optimized broadcast using
prepareMessage and sendPreparedMessage.
"""
def broadcast(self, msg):
print("broadcasting prepared message '{}' ..".format(msg))
preparedMsg = self.prepareMessage(msg)
for c in self.clients:
c.sendPreparedMessage(preparedMsg)
print("prepared message sent to {}".format(c.peer))
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
ServerFactory = BroadcastServerFactory
# ServerFactory = BroadcastPreparedServerFactory
factory = ServerFactory(u"ws://127.0.0.1:9000",
debug=debug,
debugCodePaths=debug)
factory.protocol = BroadcastServerProtocol
factory.setProtocolOptions(allowHixie76=True)
listenWS(factory)
webdir = File(".")
web = Site(webdir)
reactor.listenTCP(8080, web)
reactor.run()
|
On this Tuesday edition of What We're Reading, we bring you ELL-related stories and resources from the last two weeks. For more great content, check out our Highest Aspirations podcast. Season 2 begins on Wednesday, January 23rd!
EurekAlert! reports on a Duke and Stanford study that shows U.S. immigrant children study more STEM subjects in high school and college.
In this uplifting article from The Fresno Bee, Carmen George profiles a Yemeni refugee student who is excelling in computer programming while learning English.
In his Classroom Q & A series from Education Week, Larry Ferlazzo asks 5 experts how to build speaking skills with ELLs.
From New America, Ingrid T. Colón discusses the challenges of students with interrupted formal education (SIFE) and what should be done to best support them.
From The Charlotte Observer, Ann Doss Helms reports on the costs and benefits of Charlotte-Mecklenburg Schools' growing immigrant student population.
From The Hechinger Report, Sarah Butrymowicz reports on a lack of resources that may be preventing refugee girls from reaching their highest aspirations.
Education Week's Corey Mitchell highlights some of the opportunity gaps of the seal of biliteracy and what is being done to overcome them.
The 74 reports that the Gates Foundation will give up to $10 million to support teacher training on high quality curricula, some of which will directly help ELLs.
Also from The 74, Conor Williams analyzes Washington D.C.'s desire to increase dual language programs and how they can use it as a tool to advance equity.
In another important piece from Education Week, Corey Mitchell explores whether or not schools value the bilingualism of ELLs and solicits opinions from educators around the country.
From Ellevation's Specialist Spotlight series, Hillsborough County Schools ESOL Resource Teacher Maria Clarke shares tips and strategies for engaging and coaching classroom teachers of ELLs.
Season 2 of the Highest Aspirations Podcast begins on January 23rd! In the meantime, check out our 5 essential episodes from Season 1.
Want to get this newsletter delivered to your mailbox every other week? Join the ELL Community! |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-02-28 12:34
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('domain', '0029_meta'),
]
operations = [
migrations.AlterModelOptions(
name='attribute',
options={'permissions': (('view_attribute', 'Can view Attribute'),), 'verbose_name': 'Attribute', 'verbose_name_plural': 'Attributes'},
),
migrations.AlterModelOptions(
name='attributeentity',
options={'ordering': ('uri',), 'permissions': (('view_attributeentity', 'Can view Attribute entity'),), 'verbose_name': 'Attribute entity', 'verbose_name_plural': 'Attribute entities'},
),
migrations.AlterModelOptions(
name='range',
options={'ordering': ('attribute',), 'permissions': (('view_range', 'Can view Range'),), 'verbose_name': 'Range', 'verbose_name_plural': 'Ranges'},
),
migrations.AlterModelOptions(
name='verbosename',
options={'permissions': (('view_verbosename', 'Can view Verbose name'),), 'verbose_name': 'Verbose name', 'verbose_name_plural': 'Verbose names'},
),
]
|
The Great Market Hall in Budapest should be any visitors first stop in order to get acquainted to the people, goods and grub of Hungary. The market is one of those strange places that caters to both locals and tourists alike without losing its authentic appeal. Under one roof you can by meats and sausages, Royal Tokaji wine, paprika, hot Hungarian favorites, such as Goulash, an assortment of pickled vegetables and fresh produce. The upper floor has small eateries, where you can sample a variety of Hungarian favorites for very little money.
Also known as Great Market Hall, Budapest’s largest and oldest indoor market is reminiscent of the inside of a train station and is conveniently located in Pest at the end of Váci utca. Although Great Market Hall is a one-stop shop for everything from fresh produce and paprika to pálinka and Hungarian souvenirs, we were there to try the lángos, a deep-fried delight that can be prepared sweet or savory. Booths selling lángos and several other eateries and souvenir shops can be found on the second floor.
It doesn’t matter if you’re trying to find authentic Hungarian spices, meats tapestries or even a few snacks for your next train ride: The Central Market has to be a stop while visiting Budapest. On the ground floor you’ll find several vendors offering the best kolbasz, cured meats, fruits and vegetables from across the puztà, the Great Hungarian Plains. Stop here for a taste of the country, the restaurant upstairs is a bit kitschy, but if you are only in town for a day or two, it’s a great chance to try some regional dishes.
If you are fortunate enough to find yourself in Budapest, then hurry on over to Nagycsarnok--The Great Market Hall on Fővám Tér for lunch. Go directly upstairs and have some authentic Hungarian Goulash or Hungarian Stuffed Cabbage--Töltött Káposzta or choose from the many other delectable selections. After lunch, go downstairs and check out the food stalls with fruits, vegetables, meats, cheeses and pastries--you might want to try some of these, too. Enjoy! |
from django.shortcuts import render
from django.http import HttpResponse
from django.template import RequestContext
from rango.models import Bar, Tapa
from rango.forms import TapaForm
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
# Pagina inicio
def index(request):
# Query the database for a list of ALL bares currently stored.
# Order the bares by no. likes in descending order.
# Retrieve the top 5 only - or all if less than 5.
# Place the list in our context_dict dictionary which will be passed to the template engine.
bares_list = Bar.objects.order_by('-n_visitas')[:5]
context_dict = {'bares': bares_list}
# Render the response and send it back!
return render(request, 'rango/index.html', context_dict)
# Pagina bares
def bar(request, bar_name_slug):
# Create a context dictionary which we can pass to the template rendering engine.
context_dict = {}
try:
# Can we find a bar name slug with the given name?
# If we can't, the .get() method raises a DoesNotExist exception.
# So the .get() method returns one model instance or raises an exception.
bar = Bar.objects.get(slug=bar_name_slug)
context_dict['bar_name'] = bar.nombre
# Retrieve all of the associated tapas.
# Note that filter returns >= 1 model instance.
tapas = Tapa.objects.filter(bar=bar)
# Adds our results list to the template context under name tapas.
context_dict['tapas'] = tapas
# We also add the bar object from the database to the context dictionary.
# We'll use this in the template to verify that the bar exists.
context_dict['bar'] = bar
# New: Aumentar visitas cada vez que se pida la pagina
bar.n_visitas += 1
bar.save()
except Bar.DoesNotExist:
# We get here if we didn't find the specified bar.
# Don't do anything - the template displays the "no bar" message for us.
pass
# Go render the response and return it to the client.
return render(request, 'rango/bar.html', context_dict)
# Pagina Acerca de
def about(request):
# Create a context dictionary which we can pass to the template rendering engine.
context_dict = {}
return render(request, 'rango/about.html', context_dict)
# Pagina add tapa
@login_required
def add_tapa(request, bar_name_slug):
try:
ba = Bar.objects.get(slug=bar_name_slug)
except Category.DoesNotExist:
ba = None
if request.method == 'POST':
form = TapaForm(request.POST)
if form.is_valid():
if ba:
tapa = form.save(commit=False)
tapa.bar = ba
tapa.votos = 0
tapa.save()
# probably better to use a redirect here.
return bar(request, bar_name_slug)
else:
print form.errors
else:
form = TapaForm()
context_dict = {'form':form, 'bar': ba}
return render(request, 'rango/add_tapa.html', context_dict)
def reclama_datos (request):
bares = Bar.objects.order_by('-n_visitas')[:3]
datos={'bares':[bares[0].nombre,bares[1].nombre,bares[2].nombre],
'visitas':[bares[0].n_visitas,
bares[1].n_visitas,
bares[2].n_visitas
]
}
return JsonResponse(datos, safe=False)
def like_tapa(request):
context = RequestContext(request)
tapa_id = None
if request.method == 'GET':
tapa_id = request.GET['tapa_id']
votos = 0
if tapa_id:
tapa = Tapa.objects.get(id=int(tapa_id))
if tapa:
votos = tapa.votos + 1
tapa.votos = votos
tapa.save()
return HttpResponse(votos)
|
The NOW Account from Investment Savings Bank, provides unlimited check writing and transactions each month, without a per-check charge. Plus, you earn interest on all balances, which means your money is working for you. ISB’s NOW Account is a great way to step up your individual or non-profit’s transactional account. Apply today for an ISB NOW Account and move your banking experience to a whole new level.
Available to any individual, sole proprietorship, or non-profit corporation. Not available to partnerships and for-profit corporate entities. Certain restrictions may apply.
Interest earned on all balances.
Customers may be eligible for a VISA Debit Card.
Minimum deposit to open account is $50.00. A $300.00 minimum balance is required or a $5.00 Service Charge (SC) will be assessed. Service charge may be avoided by simply maintaining a $300.00 daily balance between consolidated deposit accounts (with the exception of an IRA account).
Funds are insured up to $250,000 by FDIC.
*The rate may change after the account is opened.
NOW Accounts with Investment Savings Bank, bring individual and non-profit customers the elevated banking experience they desire, without a significant minimum deposit. The minimum deposit and minimum balance amounts make ISB’s NOW Account a smart way to deposit funds, manage check writing and transactions. ISB’s NOW Accounts bring customers easy and safe access to their money when they need it. |
"""
(C) Copyright 2013 Rob Watson rmawatson [at] hotmail.com and others.
All rights reserved. This program and the accompanying materials
are made available under the terms of the GNU Lesser General Public License
(LGPL) version 2.1 which accompanies this distribution, and is available at
http://www.gnu.org/licenses/lgpl-2.1.html
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
Contributors:
Rob Watson ( rmawatson [at] hotmail )
"""
from PyMT4 import *
from threading import Thread
import time
class OnOrderManager(object):
__tickets = []
__handlers = []
__runthread = None
__shutdown = False
@classmethod
def Initialize(cls):
tickets = [OrderSelect(index,SELECT_BY_POS,MODE_TRADES) for index in range(OrdersTotal())]
for ticket in tickets:
if OrderType(ticket) in (OP_BUY ,OP_SELL):
cls.__tickets.append(ticket)
cls.__runthread = Thread(target=OnOrderManager.__run)
cls.__runthread.start()
@classmethod
def RegisterOnOrderHandler(cls,callback):
cls.__handlers.append(callback)
@classmethod
def __run(cls):
count = 0
while not cls.__shutdown:
if count == 10:
count = 0
tickets = [OrderSelect(index,SELECT_BY_POS,MODE_TRADES) for index in range(OrdersTotal())]
for ticket in tickets:
if OrderType(ticket) in (OP_BUY ,OP_SELL) and ticket not in cls.__tickets:
for handler in cls.__handlers:
#try:
handler(ticket)
#except:pass
cls.__tickets.append(ticket)
count +=1
time.sleep(0.2)
@classmethod
def Shutdown(cls):
if cls.__runthread:
cls.__shutdown = True
cls.__runthread.join()
RegisterOnOrderHandler = OnOrderManager.RegisterOnOrderHandler
def OnOrderHandler(ticket):
orderSize = OrderLots(ticket)
orderStop = OrderStopLoss(ticket)
openPrice = OrderOpenPrice(ticket)
orderType = OrderType(ticket)
orderSymbol = OrderSymbol(ticket)
lotStepValue = MarketInfo(orderSymbol,MODE_POINT)
if not orderStop:
newStop = openPrice + ((-50*lotStepValue*10) if orderType == OP_BUY else (50*lotStepValue*10))
OrderModify(ticket,0.0,newStop,0.0,0,0)
def OnTickHandler(symbol,bid,ask):
print symbol,bid,ask
if __name__ == "__main__":
print Connect()
OnOrderManager.Initialize()
RegisterOnOrderHandler(OnOrderHandler)
RegisterOnTickHandler("*",OnTickHandler)
try:
while(True):
time.sleep(0.5)
except KeyboardInterrupt:
pass
OnOrderManager.Shutdown()
Disconnect()
|
In an annual ‘fishing festival,’ thousands dive into the water when a Dong Nai dam closes its spillway.
Thousands wait at the Tri An reservoir in the southern Dong Nai Province on Monday morning, bringing nets and even electric rods to catch big fish, in an annual “fishing festival” that happens when the hydroelectric dam closes its spillway.
Different kinds of fish including catfish, carp and barb are left behind by the closing of the spillway.
A huge catfish weighing about 20 kilograms was caught in a small cavity by a man who refused an offer of VND2 million ($85) for it. “It took me quite a long time to catch this one,” he said.
Another man said he will take his fish home and cook it because this only happens once a year.
Two tilapias get caught in the net. According to fishermen, a lot of fish remain in this area when the spillway is closed, including very big ones.
Hung is part of a group of people who’ve come from Bien Hoa, more than an hour away, to participate in the fish festival. “Our group has 10 people. My job is just to keep an eye on the fish. We sell most of the fish we catch.. only leave some to take home as gifts to our families,” Hung said.
While trying to catch a catfish, Sang gets punctured in the arm by a hard, sharp spine (seen in his right hand). “Catching a big fish is not easy at all, and when that spine stings you, you’ll get a fever for days,” Sang said.
Sang finally caught this catfish, weighing 10 kilograms.
This year, the prices of fish caught in this lake have gone up. Barbs fetch VND30,000 to 50,000 ($1.29-2.15) a kilogram, while big catfish (weighing over 10 kilograms) is priced VND800,000 to 1.2 million ($34-52). |
# Copyright (c) 2016 Alex Meade. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
NetApp Data ONTAP data motion library.
This library handles transferring data from a source to a destination. Its
responsibility is to handle this as efficiently as possible given the
location of the data's source and destination. This includes cloning,
SnapMirror, and copy-offload as improvements to brute force data transfer.
"""
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
from manila import exception
from manila.i18n import _
from manila.share import configuration
from manila.share import driver
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
from manila.share.drivers.netapp.dataontap.client import client_cmode
from manila.share.drivers.netapp import options as na_opts
from manila.share.drivers.netapp import utils as na_utils
from manila.share import utils as share_utils
from manila import utils
LOG = log.getLogger(__name__)
CONF = cfg.CONF
def get_backend_configuration(backend_name):
config_stanzas = CONF.list_all_sections()
if backend_name not in config_stanzas:
msg = _("Could not find backend stanza %(backend_name)s in "
"configuration which is required for replication or migration "
"workflows with the source backend. Available stanzas are "
"%(stanzas)s")
params = {
"stanzas": config_stanzas,
"backend_name": backend_name,
}
raise exception.BadConfigurationException(reason=msg % params)
config = configuration.Configuration(driver.share_opts,
config_group=backend_name)
if config.driver_handles_share_servers:
# NOTE(dviroel): avoid using a pre-create vserver on DHSS == True mode
# when retrieving remote backend configuration.
config.netapp_vserver = None
config.append_config_values(na_opts.netapp_cluster_opts)
config.append_config_values(na_opts.netapp_connection_opts)
config.append_config_values(na_opts.netapp_basicauth_opts)
config.append_config_values(na_opts.netapp_transport_opts)
config.append_config_values(na_opts.netapp_support_opts)
config.append_config_values(na_opts.netapp_provisioning_opts)
config.append_config_values(na_opts.netapp_data_motion_opts)
return config
def get_client_for_backend(backend_name, vserver_name=None):
config = get_backend_configuration(backend_name)
client = client_cmode.NetAppCmodeClient(
transport_type=config.netapp_transport_type,
ssl_cert_path=config.netapp_ssl_cert_path,
username=config.netapp_login,
password=config.netapp_password,
hostname=config.netapp_server_hostname,
port=config.netapp_server_port,
vserver=vserver_name or config.netapp_vserver,
trace=na_utils.TRACE_API)
return client
class DataMotionSession(object):
def _get_backend_volume_name(self, config, share_obj):
"""Return the calculated backend name of the share.
Uses the netapp_volume_name_template configuration value for the
backend to calculate the volume name on the array for the share.
"""
volume_name = config.netapp_volume_name_template % {
'share_id': share_obj['id'].replace('-', '_')}
return volume_name
def _get_backend_qos_policy_group_name(self, share):
"""Get QoS policy name according to QoS policy group name template."""
__, config = self.get_backend_name_and_config_obj(share['host'])
return config.netapp_qos_policy_group_name_template % {
'share_id': share['id'].replace('-', '_')}
def _get_backend_snapmirror_policy_name_svm(self, share_server_id,
backend_name):
config = get_backend_configuration(backend_name)
return (config.netapp_snapmirror_policy_name_svm_template
% {'share_server_id': share_server_id.replace('-', '_')})
def get_vserver_from_share_server(self, share_server):
backend_details = share_server.get('backend_details')
if backend_details:
return backend_details.get('vserver_name')
def get_vserver_from_share(self, share_obj):
share_server = share_obj.get('share_server')
if share_server:
return self.get_vserver_from_share_server(share_server)
def get_backend_name_and_config_obj(self, host):
backend_name = share_utils.extract_host(host, level='backend_name')
config = get_backend_configuration(backend_name)
return backend_name, config
def get_backend_info_for_share(self, share_obj):
backend_name, config = self.get_backend_name_and_config_obj(
share_obj['host'])
vserver = (self.get_vserver_from_share(share_obj) or
config.netapp_vserver)
volume_name = self._get_backend_volume_name(config, share_obj)
return volume_name, vserver, backend_name
def get_client_and_vserver_name(self, share_server):
destination_host = share_server.get('host')
vserver = self.get_vserver_from_share_server(share_server)
backend, __ = self.get_backend_name_and_config_obj(destination_host)
client = get_client_for_backend(backend, vserver_name=vserver)
return client, vserver
def get_snapmirrors(self, source_share_obj, dest_share_obj):
dest_volume_name, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
dest_client = get_client_for_backend(dest_backend,
vserver_name=dest_vserver)
src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)
snapmirrors = dest_client.get_snapmirrors(
source_vserver=src_vserver, dest_vserver=dest_vserver,
source_volume=src_volume_name, dest_volume=dest_volume_name,
desired_attributes=['relationship-status',
'mirror-state',
'source-vserver',
'source-volume',
'last-transfer-end-timestamp'])
return snapmirrors
def create_snapmirror(self, source_share_obj, dest_share_obj):
"""Sets up a SnapMirror relationship between two volumes.
1. Create SnapMirror relationship
2. Initialize data transfer asynchronously
"""
dest_volume_name, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
dest_client = get_client_for_backend(dest_backend,
vserver_name=dest_vserver)
src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)
# 1. Create SnapMirror relationship
# TODO(ameade): Change the schedule from hourly to a config value
dest_client.create_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name,
schedule='hourly')
# 2. Initialize async transfer of the initial data
dest_client.initialize_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
def delete_snapmirror(self, source_share_obj, dest_share_obj,
release=True):
"""Ensures all information about a SnapMirror relationship is removed.
1. Abort snapmirror
2. Delete the snapmirror
3. Release snapmirror to cleanup snapmirror metadata and snapshots
"""
dest_volume_name, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
dest_client = get_client_for_backend(dest_backend,
vserver_name=dest_vserver)
src_volume_name, src_vserver, src_backend = (
self.get_backend_info_for_share(source_share_obj))
# 1. Abort any ongoing transfers
try:
dest_client.abort_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name,
clear_checkpoint=False)
except netapp_api.NaApiError:
# Snapmirror is already deleted
pass
# 2. Delete SnapMirror Relationship and cleanup destination snapshots
try:
dest_client.delete_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
except netapp_api.NaApiError as e:
with excutils.save_and_reraise_exception() as exc_context:
if (e.code == netapp_api.EOBJECTNOTFOUND or
e.code == netapp_api.ESOURCE_IS_DIFFERENT or
"(entry doesn't exist)" in e.message):
LOG.info('No snapmirror relationship to delete')
exc_context.reraise = False
if release:
# If the source is unreachable, do not perform the release
try:
src_client = get_client_for_backend(src_backend,
vserver_name=src_vserver)
except Exception:
src_client = None
# 3. Cleanup SnapMirror relationship on source
try:
if src_client:
src_client.release_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
except netapp_api.NaApiError as e:
with excutils.save_and_reraise_exception() as exc_context:
if (e.code == netapp_api.EOBJECTNOTFOUND or
e.code == netapp_api.ESOURCE_IS_DIFFERENT or
"(entry doesn't exist)" in e.message):
# Handle the case where the snapmirror is already
# cleaned up
exc_context.reraise = False
def update_snapmirror(self, source_share_obj, dest_share_obj):
"""Schedule a snapmirror update to happen on the backend."""
dest_volume_name, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
dest_client = get_client_for_backend(dest_backend,
vserver_name=dest_vserver)
src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)
# Update SnapMirror
dest_client.update_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
def quiesce_then_abort_svm(self, source_share_server, dest_share_server):
source_client, source_vserver = self.get_client_and_vserver_name(
source_share_server)
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
# 1. Attempt to quiesce, then abort
dest_client.quiesce_snapmirror_svm(source_vserver, dest_vserver)
dest_backend = share_utils.extract_host(dest_share_server['host'],
level='backend_name')
config = get_backend_configuration(dest_backend)
retries = config.netapp_snapmirror_quiesce_timeout / 5
@utils.retry(exception.ReplicationException, interval=5,
retries=retries, backoff_rate=1)
def wait_for_quiesced():
snapmirror = dest_client.get_snapmirrors_svm(
source_vserver=source_vserver, dest_vserver=dest_vserver,
desired_attributes=['relationship-status', 'mirror-state']
)[0]
if snapmirror.get('relationship-status') != 'quiesced':
raise exception.ReplicationException(
reason="Snapmirror relationship is not quiesced.")
try:
wait_for_quiesced()
except exception.ReplicationException:
dest_client.abort_snapmirror_svm(source_vserver,
dest_vserver,
clear_checkpoint=False)
def quiesce_then_abort(self, source_share_obj, dest_share_obj):
dest_volume, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
dest_client = get_client_for_backend(dest_backend,
vserver_name=dest_vserver)
src_volume, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)
# 1. Attempt to quiesce, then abort
dest_client.quiesce_snapmirror_vol(src_vserver,
src_volume,
dest_vserver,
dest_volume)
config = get_backend_configuration(dest_backend)
retries = config.netapp_snapmirror_quiesce_timeout / 5
@utils.retry(exception.ReplicationException, interval=5,
retries=retries, backoff_rate=1)
def wait_for_quiesced():
snapmirror = dest_client.get_snapmirrors(
source_vserver=src_vserver, dest_vserver=dest_vserver,
source_volume=src_volume, dest_volume=dest_volume,
desired_attributes=['relationship-status', 'mirror-state']
)[0]
if snapmirror.get('relationship-status') != 'quiesced':
raise exception.ReplicationException(
reason="Snapmirror relationship is not quiesced.")
try:
wait_for_quiesced()
except exception.ReplicationException:
dest_client.abort_snapmirror_vol(src_vserver,
src_volume,
dest_vserver,
dest_volume,
clear_checkpoint=False)
def break_snapmirror(self, source_share_obj, dest_share_obj, mount=True):
"""Breaks SnapMirror relationship.
1. Quiesce any ongoing snapmirror transfers
2. Wait until snapmirror finishes transfers and enters quiesced state
3. Break snapmirror
4. Mount the destination volume so it is exported as a share
"""
dest_volume_name, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
dest_client = get_client_for_backend(dest_backend,
vserver_name=dest_vserver)
src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)
# 1. Attempt to quiesce, then abort
self.quiesce_then_abort(source_share_obj, dest_share_obj)
# 2. Break SnapMirror
dest_client.break_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
# 3. Mount the destination volume and create a junction path
if mount:
dest_client.mount_volume(dest_volume_name)
def resync_snapmirror(self, source_share_obj, dest_share_obj):
"""Resync SnapMirror relationship. """
dest_volume_name, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
dest_client = get_client_for_backend(dest_backend,
vserver_name=dest_vserver)
src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)
dest_client.resync_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
def resume_snapmirror(self, source_share_obj, dest_share_obj):
"""Resume SnapMirror relationship from a quiesced state."""
dest_volume_name, dest_vserver, dest_backend = (
self.get_backend_info_for_share(dest_share_obj))
dest_client = get_client_for_backend(dest_backend,
vserver_name=dest_vserver)
src_volume_name, src_vserver, __ = self.get_backend_info_for_share(
source_share_obj)
dest_client.resume_snapmirror_vol(src_vserver,
src_volume_name,
dest_vserver,
dest_volume_name)
def change_snapmirror_source(self, replica,
orig_source_replica,
new_source_replica, replica_list):
"""Creates SnapMirror relationship from the new source to destination.
1. Delete all snapmirrors involving the replica, but maintain
snapmirror metadata and snapshots for efficiency
2. For DHSS=True scenarios, creates a new vserver peer relationship if
it does not exists
3. Ensure a new source -> replica snapmirror exists
4. Resync new source -> replica snapmirror relationship
"""
replica_volume_name, replica_vserver, replica_backend = (
self.get_backend_info_for_share(replica))
replica_client = get_client_for_backend(replica_backend,
vserver_name=replica_vserver)
new_src_volume_name, new_src_vserver, new_src_backend = (
self.get_backend_info_for_share(new_source_replica))
# 1. delete
for other_replica in replica_list:
if other_replica['id'] == replica['id']:
continue
# We need to delete ALL snapmirror relationships
# involving this replica but do not remove snapmirror metadata
# so that the new snapmirror relationship is efficient.
self.delete_snapmirror(other_replica, replica, release=False)
self.delete_snapmirror(replica, other_replica, release=False)
# 2. vserver operations when driver handles share servers
replica_config = get_backend_configuration(replica_backend)
if (replica_config.driver_handles_share_servers
and replica_vserver != new_src_vserver):
# create vserver peering if does not exists
if not replica_client.get_vserver_peers(replica_vserver,
new_src_vserver):
new_src_client = get_client_for_backend(
new_src_backend, vserver_name=new_src_vserver)
# Cluster name is needed for setting up the vserver peering
new_src_cluster_name = new_src_client.get_cluster_name()
replica_cluster_name = replica_client.get_cluster_name()
replica_client.create_vserver_peer(
replica_vserver, new_src_vserver,
peer_cluster_name=new_src_cluster_name)
if new_src_cluster_name != replica_cluster_name:
new_src_client.accept_vserver_peer(new_src_vserver,
replica_vserver)
# 3. create
# TODO(ameade): Update the schedule if needed.
replica_client.create_snapmirror_vol(new_src_vserver,
new_src_volume_name,
replica_vserver,
replica_volume_name,
schedule='hourly')
# 4. resync
replica_client.resync_snapmirror_vol(new_src_vserver,
new_src_volume_name,
replica_vserver,
replica_volume_name)
@na_utils.trace
def remove_qos_on_old_active_replica(self, orig_active_replica):
old_active_replica_qos_policy = (
self._get_backend_qos_policy_group_name(orig_active_replica)
)
replica_volume_name, replica_vserver, replica_backend = (
self.get_backend_info_for_share(orig_active_replica))
replica_client = get_client_for_backend(
replica_backend, vserver_name=replica_vserver)
try:
replica_client.set_qos_policy_group_for_volume(
replica_volume_name, 'none')
replica_client.mark_qos_policy_group_for_deletion(
old_active_replica_qos_policy)
except exception.StorageCommunicationException:
LOG.exception("Could not communicate with the backend "
"for replica %s to unset QoS policy and mark "
"the QoS policy group for deletion.",
orig_active_replica['id'])
def create_snapmirror_svm(self, source_share_server,
dest_share_server):
"""Sets up a SnapMirror relationship between two vServers.
1. Create a SnapMirror policy for SVM DR
2. Create SnapMirror relationship
3. Initialize data transfer asynchronously
"""
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
src_vserver = self.get_vserver_from_share_server(source_share_server)
# 1: Create SnapMirror policy for SVM DR
dest_backend_name = share_utils.extract_host(dest_share_server['host'],
level='backend_name')
policy_name = self._get_backend_snapmirror_policy_name_svm(
dest_share_server['id'],
dest_backend_name,
)
dest_client.create_snapmirror_policy(policy_name)
# 2. Create SnapMirror relationship
dest_client.create_snapmirror_svm(src_vserver,
dest_vserver,
policy=policy_name,
schedule='hourly')
# 2. Initialize async transfer of the initial data
dest_client.initialize_snapmirror_svm(src_vserver,
dest_vserver)
def get_snapmirrors_svm(self, source_share_server, dest_share_server):
"""Get SnapMirrors between two vServers."""
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
src_vserver = self.get_vserver_from_share_server(source_share_server)
snapmirrors = dest_client.get_snapmirrors_svm(
source_vserver=src_vserver, dest_vserver=dest_vserver,
desired_attributes=['relationship-status',
'mirror-state',
'last-transfer-end-timestamp'])
return snapmirrors
def get_snapmirror_destinations_svm(self, source_share_server,
dest_share_server):
"""Get SnapMirrors between two vServers."""
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
src_vserver = self.get_vserver_from_share_server(source_share_server)
snapmirrors = dest_client.get_snapmirror_destinations_svm(
source_vserver=src_vserver, dest_vserver=dest_vserver)
return snapmirrors
def update_snapmirror_svm(self, source_share_server, dest_share_server):
"""Schedule a SnapMirror update to happen on the backend."""
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
src_vserver = self.get_vserver_from_share_server(source_share_server)
# Update SnapMirror
dest_client.update_snapmirror_svm(src_vserver, dest_vserver)
def quiesce_and_break_snapmirror_svm(self, source_share_server,
dest_share_server):
"""Abort and break a SnapMirror relationship between vServers.
1. Quiesce SnapMirror
2. Break SnapMirror
"""
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
src_vserver = self.get_vserver_from_share_server(source_share_server)
# 1. Attempt to quiesce, then abort
self.quiesce_then_abort_svm(source_share_server, dest_share_server)
# 2. Break SnapMirror
dest_client.break_snapmirror_svm(src_vserver, dest_vserver)
def cancel_snapmirror_svm(self, source_share_server, dest_share_server):
"""Cancels SnapMirror relationship between vServers."""
dest_backend = share_utils.extract_host(dest_share_server['host'],
level='backend_name')
dest_config = get_backend_configuration(dest_backend)
server_timeout = (
dest_config.netapp_server_migration_state_change_timeout)
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
snapmirrors = self.get_snapmirrors_svm(source_share_server,
dest_share_server)
if snapmirrors:
# 1. Attempt to quiesce and break snapmirror
self.quiesce_and_break_snapmirror_svm(source_share_server,
dest_share_server)
# NOTE(dviroel): Lets wait until the destination vserver be
# promoted to 'default' and state 'running', before starting
# shutting down the source
self.wait_for_vserver_state(dest_vserver, dest_client,
subtype='default', state='running',
operational_state='stopped',
timeout=server_timeout)
# 2. Delete SnapMirror
self.delete_snapmirror_svm(source_share_server, dest_share_server)
else:
dest_info = dest_client.get_vserver_info(dest_vserver)
if dest_info is None:
# NOTE(dviroel): Nothing to cancel since the destination does
# not exist.
return
if dest_info.get('subtype') == 'dp_destination':
# NOTE(dviroel): Can be a corner case where no snapmirror
# relationship was found but the destination vserver is stuck
# in DP mode. We need to convert it to 'default' to release
# its resources later.
self.convert_svm_to_default_subtype(dest_vserver, dest_client,
timeout=server_timeout)
def convert_svm_to_default_subtype(self, vserver_name, client,
is_dest_path=True, timeout=300):
interval = 10
retries = (timeout / interval or 1)
@utils.retry(exception.VserverNotReady, interval=interval,
retries=retries, backoff_rate=1)
def wait_for_state():
vserver_info = client.get_vserver_info(vserver_name)
if vserver_info.get('subtype') != 'default':
if is_dest_path:
client.break_snapmirror_svm(dest_vserver=vserver_name)
else:
client.break_snapmirror_svm(source_vserver=vserver_name)
raise exception.VserverNotReady(vserver=vserver_name)
try:
wait_for_state()
except exception.VserverNotReady:
msg = _("Vserver %s did not reach the expected state. Retries "
"exhausted. Aborting.") % vserver_name
raise exception.NetAppException(message=msg)
def delete_snapmirror_svm(self, src_share_server, dest_share_server,
release=True):
"""Ensures all information about a SnapMirror relationship is removed.
1. Abort SnapMirror
2. Delete the SnapMirror
3. Release SnapMirror to cleanup SnapMirror metadata and snapshots
"""
src_client, src_vserver = self.get_client_and_vserver_name(
src_share_server)
dest_client, dest_vserver = self.get_client_and_vserver_name(
dest_share_server)
# 1. Abort any ongoing transfers
try:
dest_client.abort_snapmirror_svm(src_vserver, dest_vserver)
except netapp_api.NaApiError:
# SnapMirror is already deleted
pass
# 2. Delete SnapMirror Relationship and cleanup destination snapshots
try:
dest_client.delete_snapmirror_svm(src_vserver, dest_vserver)
except netapp_api.NaApiError as e:
with excutils.save_and_reraise_exception() as exc_context:
if (e.code == netapp_api.EOBJECTNOTFOUND or
e.code == netapp_api.ESOURCE_IS_DIFFERENT or
"(entry doesn't exist)" in e.message):
LOG.info('No snapmirror relationship to delete')
exc_context.reraise = False
# 3. Release SnapMirror
if release:
src_backend = share_utils.extract_host(src_share_server['host'],
level='backend_name')
src_config = get_backend_configuration(src_backend)
release_timeout = (
src_config.netapp_snapmirror_release_timeout)
self.wait_for_snapmirror_release_svm(src_vserver,
dest_vserver,
src_client,
timeout=release_timeout)
def wait_for_vserver_state(self, vserver_name, client, state=None,
operational_state=None, subtype=None,
timeout=300):
interval = 10
retries = (timeout / interval or 1)
expected = {}
if state:
expected['state'] = state
if operational_state:
expected['operational_state'] = operational_state
if subtype:
expected['subtype'] = subtype
@utils.retry(exception.VserverNotReady, interval=interval,
retries=retries, backoff_rate=1)
def wait_for_state():
vserver_info = client.get_vserver_info(vserver_name)
if not all(item in vserver_info.items() for
item in expected.items()):
raise exception.VserverNotReady(vserver=vserver_name)
try:
wait_for_state()
except exception.VserverNotReady:
msg = _("Vserver %s did not reach the expected state. Retries "
"exhausted. Aborting.") % vserver_name
raise exception.NetAppException(message=msg)
def wait_for_snapmirror_release_svm(self, source_vserver, dest_vserver,
src_client, timeout=300):
interval = 10
retries = (timeout / interval or 1)
@utils.retry(exception.NetAppException, interval=interval,
retries=retries, backoff_rate=1)
def release_snapmirror():
snapmirrors = src_client.get_snapmirror_destinations_svm(
source_vserver=source_vserver, dest_vserver=dest_vserver)
if not snapmirrors:
LOG.debug("No snapmirrors to be released in source location.")
else:
try:
src_client.release_snapmirror_svm(source_vserver,
dest_vserver)
except netapp_api.NaApiError as e:
if (e.code == netapp_api.EOBJECTNOTFOUND or
e.code == netapp_api.ESOURCE_IS_DIFFERENT or
"(entry doesn't exist)" in e.message):
LOG.debug('Snapmirror relationship does not exists '
'anymore.')
msg = _('Snapmirror release sent to source vserver. We will '
'wait for it to be released.')
raise exception.NetAppException(vserver=msg)
try:
release_snapmirror()
except exception.NetAppException:
msg = _("Unable to release the snapmirror from source vserver %s. "
"Retries exhausted. Aborting") % source_vserver
raise exception.NetAppException(message=msg)
|
The rider of the scooter, a boy, received serious injuries and was taken to the Queen’s Medical Centre in Nottingham.
OFFICERS from Leicestershire Police’s road policing unit are appealing for witnesses after a child riding a scooter received serious injuries in a collision with a car.
The collision occurred at 5.20pm on Thursday, September 25 on Halstead Road in Mountsorrel between a child’s scooter and a grey Honda Jazz.
The rider of the scooter, a boy, received serious injuries and was taken to the Queen’s Medical Centre in Nottingham where he has subsequently been discharged following treatment.
Pc 1281 Kev March, who is investigating the collision, said: “The Honda Jazz was travelling along Halstead Road towards the direction of Swithland Lane and the rider of the two-wheeled child’s scooter was attempting to cross the road when the collision occurred.
Anyone with information should call the police on 101 or Crimestoppers, which is free and anonymous, on 0800 555111. |
from urllib import request
from html.parser import HTMLParser
NAME = "HD-Jiggly"
COOKIE = 'cookie.txt'
class parseLinks(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.data = []
self.flag_tbody = False
self.flag_span = False
self.flag_a = False
self.flag_td = False
self.flag_a2 = False
self.link = ''
self.title = ''
self.name = ''
def handle_starttag(self, tag, attrs):
if tag == 'tbody':
for name, value in attrs:
if name == 'id' and value.find("normalthread_") == 0:
self.flag_tbody = True
if tag == 'span'and self.flag_tbody == True:
for name, value in attrs:
if name == 'id' and value.find("thread_") == 0:
self.flag_span = True
if tag == 'a' and self.flag_span == True:
for name, value in attrs:
if name == 'href' and value.find("thread-") == 0:
self.link = "http://174.127.195.166/forum/" + value
self.flag_a = True
if tag == 'td'and self.flag_tbody == True:
for name, value in attrs:
if name == 'class' and value.find("author") == 0:
self.flag_td = True
if tag == 'a' and self.flag_td == True:
self.flag_a2 = True
def handle_data(self, data):
if self.flag_span == True and self.flag_a == True:
self.title = data
self.flag_span = False
self.flag_a = False
if self.flag_td == True and self.flag_a2 == True:
self.name = data
if(self.name == NAME):
self.data.append(self.title)
self.data.append(self.link)
self.flag_td = False
self.flag_a2 = False
self.flag_tbody = False
class crawlerSISID():
def __init__(self, index):
url = "http://174.127.195.166/forum/forum-463-" + str(index) + ".html"
self.parse = parseLinks()
self.request = request.urlopen(url)
self.parse.feed(self.request.read().decode('gbk'))
def crawlerData(self):
self.request.close()
return self.parse.data
# print(crawlerSISID(1).crawlerData())
|
29 Nov 2015 . The soothing feeling of wood decking and artificial turf under my feet as I relax . article on Outdoor Flooring Options for Singapore Balconies and Balcony . Lastly, setting a budget is the most important part of your renovation.
25 Feb 2014 . Durable, Safe, Easy to clean flooring for Singapore balconies . make a considered decision when renovating your balcony for your new home.
2 Jan 2015 . In Singapore, most of the renovation works are expensive. .. If you happen to be looking to do some decking in your house or balcony/patio or.
Balcony decking - which material works best? | Home & Decor .
9 Jan 2016 . What material works best for your balcony decking and what else do you need to consider?
Best Deck, Patio & Outdoor Enclosure Professionals in Singapore .
Search 12 Singapore deck, patio & outdoor enclosure professionals to find the best deck, . Design & Renovation . At Greenwood Timber, our mission is to provide an affordable, durable, environmental friendly timber flooring to. . for your deck, patio, balcony or verandah, your hired contractor will survey your lot to come.
21 Jan 2015 . Residential homes nowadays have balconies that allow . Evorich Flooring, is one of the most reputable flooring contractors in Singapore and.
cheapest #manufacturer wood balcony deck flooring | balcony decoration wpc floor. . #Flooring #Balcony #veranda #Decoration #FloorBoards #Terrace #renovation .. lightweight wood floor in Singapore , ecological wood floor for external.
13 balcony designs that'll put you at ease instantly | Balcony design .
See More. The Interarch Design - Photo 2 of 12 | Home & Decor Singapore · Singapore ... Wood decking that looks like parquet flooring is a perfect indoor/outdoor solution for a. Balcony .. 10 Beautiful Home Renovations Under $50,000.
Wood Decking & Flooring Contractor Archives - Renovation and .
Looking for an outdoor decking contractor for your balcony or planter decking? Get quality services for your outdoor decking. Singapore leading flooring expert.
25 Dec 2016 . Thanks to Absolut Outdoors, I built my balcony into a private . As many of you know, I finally got the keys to my new house and we spent a lot of time renovating it. . wood decking and they have many designs for you to choose from. . in landed properties and high-rise apartments in Singapore and are.
18 Jun 2016 . In Singapore, besides renovation guidelines for HDB flats or private . When it comes to flooring options, hardwood (solid wood) flooring can be.
Exterior renovation or outdoor renovation is becoming more popular. . For Condo Balcony, Exterior Renovation For HDB Balcony In Light Wood Floor Deck.
www.hdb.gov.sg under Living in HDB flats > Home Renovation > “Looking For .. e) Laying of floor finishes to balcony and bathrooms over existing finishes.
The wood flooring specialist in Singapore. . #weatherresistant #wpc #powerdekor #balcony #patios #sgproperty #condorenovation #sgproperty #outdoor.
#07-32 Singapore 757322. Email: [email protected] . in all our construction work ranging from Roofing & Decking to Fencing and General Renovation work. |
#!/usr/bin/env python3
"""
Handle logging
All logging is done througt QT signal/slots, so they can be used from other threads.
Copyright (C) 2020 Anders Lowinger, anders@abundo.se
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import datetime
import threading
import PyQt5.QtCore as QtCore
import PyQt5.QtWidgets as QtWidgets
class Log(QtCore.QObject):
"""
Log handler. Uses signals to be thread safe
Modeled so stdout/stderr can be directed to this class
"""
# Possible debug subsystems
DEBUG_FILES = 1 << 0
DEBUG_SETTINGS = 1 << 1
DEBUG_ACTIVITYMGR = 1 << 3
DEBUG_REPORTMGR = 1 << 4
DEBUG_MAINWIN = 1 << 5
DEBUG_OPTIONS = 1 << 6
DEBUG_SYSTRAY = 1 << 7
# Setup debug bitmask
DEBUG_LEVEL = 0
DEBUG_LEVEL |= DEBUG_FILES * 0
DEBUG_LEVEL |= DEBUG_SETTINGS * 0
DEBUG_LEVEL |= DEBUG_ACTIVITYMGR * 1
DEBUG_LEVEL |= DEBUG_REPORTMGR * 1
DEBUG_LEVEL |= DEBUG_MAINWIN * 1
DEBUG_LEVEL |= DEBUG_OPTIONS * 1
DEBUG_LEVEL |= DEBUG_SYSTRAY * 1
logTrigger = QtCore.pyqtSignal(int, str, str)
INFO = 0
WARNING = 1
ERROR = 2
DEBUG = 3
CONSOLE = 4
# Map from string to log level
level_dict = {
"info": INFO,
"warning": WARNING,
"error": ERROR,
"debug": DEBUG,
"console": CONSOLE,
}
def __init__(self):
super().__init__()
self.out = None # QT Widget for log output
self.levels = ["INFO", "WARNING", "ERROR", "DEBUG", "CONSOLE"]
self.level = self.CONSOLE
self.logTrigger.connect(self.log)
self._lines = [] # temp buffer until we have an output device
def add_row(self, line):
c = self.out.rowCount()
self.out.setRowCount(c + 1)
self.out.setItem(c, 0, QtWidgets.QTableWidgetItem(line[0]))
self.out.setItem(c, 1, QtWidgets.QTableWidgetItem(line[1]))
self.out.setItem(c, 2, QtWidgets.QTableWidgetItem(line[2]))
self.out.setItem(c, 3, QtWidgets.QTableWidgetItem(line[3]))
if c > 500:
self.out.removeRow(0)
self.out.resizeColumnsToContents()
self.out.scrollToBottom()
def setOut(self, out):
self.out = out
for line in self._lines:
self.add_row(line)
self._lines = []
def setLevel(self, level):
if isinstance(level, str):
level = self.level_dict[level]
self.level = level
def log(self, level, threadname, msg):
if level <= self.level:
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
msg = str(msg).replace("\n", ", ")
line = [now, threadname, self.levels[level], msg]
if self.out is None:
self._lines.append(line)
print(" ".join(line))
else:
self.add_row(line)
def info(self, msg):
self.logTrigger.emit(self.INFO, threading.current_thread().getName(), msg)
def warning(self, msg):
self.logTrigger.emit(self.WARNING, threading.current_thread().getName(), msg)
def error(self, msg):
self.logTrigger.emit(self.ERROR, threading.current_thread().getName(), msg)
def debug(self, msg):
self.logTrigger.emit(self.DEBUG, threading.current_thread().getName(), msg)
def debugf(self, mask, msg):
"""
Show debug message, if debug for this type is enabled
"""
if self.DEBUG_LEVEL & mask:
self.logTrigger.emit(self.DEBUG, threading.current_thread().getName(), msg)
def write(self, msg):
msg = msg.strip()
if msg:
self.logTrigger.emit(self.CONSOLE, threading.current_thread().getName(), msg.strip())
def flush(self):
# this is defined so we can redirect stdout/stderr here without warnings
pass
log = Log()
|
Launch yourself into the market with this great 3 bedroom townhome at a fantastic price. Bright sun filled living and dining room and a large eat in kitchen with ample cupboards and cabinets. Bright picture window overlooks the fenced in backyard. Easy care laminate runs throughout the main floor. Carpeted Upstairs with 3 bedrooms and a 4 pc bathroom. Large Master suite enjoys a wall of closets. Basement is partially developed with a spacious finished family room. Updated windows & newer washer & dryer. Woodbine offers both public & separate K-6 schools, within the community as well as local shopping at the nearby Woodbine Square where you will find Safeway, Shoppers, and numerous other stores & services including the coveted Patisserie du Soleil Bakery Cafe. Quick & easy access in and out of the community via Anderson Road and the soon to be completed Ring Road. This home needs some TLC and updating but at this price, this is your opportunity to build your own equity and add your unique, personal touch. |
# -*- coding: utf-8 -*-
##########################################################################################
# Developer: Luan,Jingchao Project: HuMaIN (http://humain.acis.ufl.edu)
# Description:
# Script to invoke the OCRopus Segmentation microservice. Given the binarized images'
# directory or an image, for each image return a folder containing all segemnted
# single-line images.
##########################################################################################
# Copyright 2017 Advanced Computing and Information Systems (ACIS) Lab - UF
# (https://www.acis.ufl.edu/)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################################
import requests, zipfile, StringIO
import time, argparse, os, subprocess
import multiprocessing as mp
# Segmentation service URL
IP = "10.5.146.92"
PORT = "8102"
URL_SEG = "http://" + IP + ":" + PORT + "/segmentationapi"
SESSION = requests.Session()
def str2bool(v):
"""Transfer String to Boolean.
Normalizing all positive string to "True" and all negative string to "False".
Args:
v: original string.
Returns:
Return the original string related boolean. For example, return "True" if the original string is "yes".
"""
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def arg_parse():
"""Parse argumentes input by user.
Returns:
A dictionary-like type viriable 'args' which contains all arguments input by user
"""
parser = argparse.ArgumentParser("Call OCRopy Segmentation Service")
parser.add_argument('input', help="The path of an image file, or a folder containing all pre-process images.")
# output parameters
parser.add_argument('-o', '--output', default=None, help="output directory, without the last slash")
# limits
group_limits = parser.add_argument_group('limits')
group_limits.add_argument('--minscale',type=float,default=argparse.SUPPRESS, help='minimum scale permitted')
group_limits.add_argument('--maxlines',type=float,default=argparse.SUPPRESS, help='maximum # lines permitted')
# scale parameters
group_scale = parser.add_argument_group('scale parameters')
group_scale.add_argument('--scale',type=float,default=argparse.SUPPRESS, help='the basic scale of the document (roughly, xheight) 0=automatic')
group_scale.add_argument('--hscale',type=float,default=argparse.SUPPRESS, help='non-standard scaling of horizontal parameters')
group_scale.add_argument('--vscale',type=float,default=argparse.SUPPRESS, help='non-standard scaling of vertical parameters')
# line parameters
group_line = parser.add_argument_group('line parameters')
group_line.add_argument('--threshold',type=float,default=argparse.SUPPRESS, help='baseline threshold')
group_line.add_argument('--noise',type=int,default=argparse.SUPPRESS, help="noise threshold for removing small components from lines")
group_line.add_argument('--usegauss', type=str2bool, default=argparse.SUPPRESS, help='use gaussian instead of uniform')
# column parameters
group_column = parser.add_argument_group('column parameters')
group_column.add_argument('--maxseps',type=int,default=argparse.SUPPRESS, help='maximum black column separators')
group_column.add_argument('--sepwiden',type=int,default=argparse.SUPPRESS, help='widen black separators (to account for warping)')
group_column.add_argument('--maxcolseps',type=int,default=argparse.SUPPRESS, help='maximum # whitespace column separators')
group_column.add_argument('--csminheight',type=float,default=argparse.SUPPRESS, help='minimum column height (units=scale)')
# output parameters
group_column = parser.add_argument_group('output parameters')
group_column.add_argument('--pad',type=int,default=argparse.SUPPRESS, help='adding for extracted lines')
group_column.add_argument('--expand',type=int,default=argparse.SUPPRESS, help='expand mask for grayscale extraction')
args = parser.parse_args()
# Set the default output folder
default_output = ""
if os.path.isfile(args.input):
default_output = os.path.dirname(args.input)
elif os.path.isdir(args.input):
default_output = args.input
else:
parser.print_help()
sys.exit(0)
# Verify or create the output folder
if args.output is None:
args.output = default_output
else:
if not os.path.isdir(args.output):
subprocess.call(["mkdir -p " + args.output], shell=True)
if not os.path.isdir(args.output):
print("Error: Destination folder %s could not be created" % (args.output))
sys.exit(0)
args = vars(args) # Convert the Namespace object "args" to a dict=like object
return args
def call_seg(job):
"""Call Segmentation Service.
Call the Segmentation service, and store the segmented result locally.
Args:
job: a tuple variable (image path, local path to store result, parametes customed by user).
"""
imagepath, dst_dir, parameters = job
# Uploaded iamges
image = {'image': open(imagepath, 'rb')}
# Call segmentation service and get response
resp = SESSION.get(URL_SEG, files=image, data=parameters)
# Unpress the zip file responsed from segmentation service, and save it
if resp.status_code == 200:
# For python 3+, replace with io.BytesIO(resp.content)
z = zipfile.ZipFile(StringIO.StringIO(resp.content))
z.extractall(dst_dir)
print("[OK] '%s' segmentation success!" % os.path.basename(imagepath))
else:
print("[ERROR] '%s' segmentation error!" % os.path.basename(imagepath))
def main(args):
"""Main function.
Call Segmentation service for each image sequencially or parallelly.
"""
input_ = args['input']
output = args['output']
# Only keep the setable parameters
del args['input']
del args['output']
# Call segmentation service
if os.path.isfile(input_):
# one image using a single process
call_seg((input_, output, args))
SESSION.close()
elif os.path.isdir(input_):
# multiple images using multiple processes to call segmentation parallelly
jobs = []
for img in os.listdir(input_):
img_path = os.path.join(input_, img)
jobs.append((img_path, output, args))
pool = mp.Pool(processes=8) # #processes = #CPU by default
pool.map(call_seg, jobs)
# Close processes pool after it is finished
pool.close()
pool.join()
SESSION.close()
if __name__ == '__main__':
args = arg_parse()
main(args) |
The Kane Garden Club is displaying scarecrows in 10 flower planters along Fraley Street in Uptown Kane. Some of the club members involved with the project include, left to right: Sunne Gregg (vice president), Barb Woll (president), Wayne Gregg, June Ross, Janet Bard, Linda Rich and Jeanne Iannuzzi. Bard is chairman of the club's Carved Pumpkin Walk, which is scheduled Oct. 5-6 in Evergreen Park in Kane. |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 11 09:04:55 2018
@author: AmatVictoriaCuramIII
"""
#Developed in Python 3.5
#R Multiple Finder; Trade Tracking
import numpy as np
import random as rand
import pandas as pd
import time as t
from DatabaseGrabber import DatabaseGrabber
from YahooGrabber import YahooGrabber
#Inputs - OHLC data
Ticker1 = 'VXX'
Asset1 = YahooGrabber(Ticker1)
##Tasty OHLC; ***ATTN*** insert path for OHLC data
#Asset1 = pd.read_pickle('C:\\Users\\Tasty\\Desktop\\WorkingDirectory\\UVXY')
#Declaration/Assignment
#Empty list
Empty = []
#Empty dataframe
Trades = pd.DataFrame()
##Timing statistics and iteration counter for optimization
#Start = t.time()
#Counter = 0
#start = t.time()
##The next 4 declarations are for use in fixed profit and loss based exits
##Exit stop loss - in percentages --------- however, looking to use ATR based stops
#LongStopLoss = .005
#ShortStopLoss = .005
##Exit profit take -------- However, looking to use other exits, time based, trailing, ATR, etc.
#LongProfitTake = .01
#ShortProfitTake = .01
#Constraints in percentages
Commission = .01
Slippage = .01
#Time series trimmer for in/out sample data
#Asset1a = Asset1[-1250:] #Out
Asset1 = Asset1[:] #In
#
#Numbered subindex
Asset1['SubIndex'] = range(1,len(Asset1)+1)
#Variable windows
donchianwindow = 15
exitwindow = 13
ATRwindow = 20
stopwindow = 13
Counter = 0
#Log Returns
Asset1['LogRet'] = np.log(Asset1['Adj Close']/Asset1['Adj Close'].shift(1))
Asset1['LogRet'] = Asset1['LogRet'].fillna(0)
Asset1['Method1'] = Asset1['High'] - Asset1['Low']
Asset1['Method2'] = abs((Asset1['High'] - Asset1['Close'].shift(1)))
Asset1['Method3'] = abs((Asset1['Low'] - Asset1['Close'].shift(1)))
Asset1['Method1'] = Asset1['Method1'].fillna(0)
Asset1['Method2'] = Asset1['Method2'].fillna(0)
Asset1['Method3'] = Asset1['Method3'].fillna(0)
Asset1['TrueRange'] = Asset1[['Method1','Method2','Method3']].max(axis = 1)
Asset1['ATR'] = Asset1['TrueRange'].rolling(window = ATRwindow,
center=False).mean()
##Market top and bottom calculation
Asset1['RollingMax'] = Asset1['High'].rolling(window=donchianwindow, center=False).max()
Asset1['RollingMin'] = Asset1['Low'].rolling(window=donchianwindow, center=False).min()
#Asset1[['RollingMax','RollingMin','Adj Close']].plot()
##Signal = Price </> min/max
##if price is less than the min go long
##if price is greater than the max go short
Asset1['Signal'] = np.where(Asset1['High'] >= Asset1['RollingMax'].shift(1) , 1, 0)
Asset1['Signal'] = np.where(Asset1['Low'] <= Asset1['RollingMin'].shift(1) , -1, Asset1['Signal'])
#if Rolling Min/Max is still being computed, stay out of market
Asset1['Signal'] = np.where(Asset1['RollingMax'] == np.nan, 0, Asset1['Signal'])
#To help identify "regime changes" i.e. last signal switches from short to long, vice versa
#Asset1['FilledSignal'] = np.where(Asset1['Signal'] == 0, np.nan, Asset1['Signal'] )
#Asset1['FilledSignal'] = Asset1['FilledSignal'].ffill(inplace = False)
#Asset1['FilledSignal'] = Asset1['FilledSignal'].fillna(0)
#Signal sub index numbers for segmenting data for trade analysis
SignalDates = Asset1['SubIndex'].loc[((Asset1['Signal'] != 0))]
#Trade ATR for signal
Asset1['TradeATR'] = np.where(Asset1['Signal'] != 0, Asset1['ATR'].shift(1), np.nan)
#experimental exits
Asset1['LimitExitPrice'] = np.nan
Asset1['ShortExitPrice'] = Asset1['High'].rolling(window=stopwindow, center=False).max()
Asset1['LongExitPrice'] = Asset1['Low'].rolling(window=stopwindow, center=False).min()
#Find the first trade of the signal period, so we can document entry prices
#Declare columns to record entry price and stop for unit one
Asset1['EntryPriceUnitOne'] = np.nan
Asset1['StopPriceUnitOne'] = np.nan
#Be sure to check for gaps on first unit entry and later on exits.
#Default stops and entries
#Long entry first unit
Asset1['EntryPriceUnitOne'] = np.where(Asset1['Signal'] == 1,
Asset1['RollingMax'].shift(1) + .01, np.nan)
#Long gap entry first unit
Asset1['EntryPriceUnitOne'].loc[(Asset1['Signal'] == 1) & (
Asset1['Open'] > Asset1['EntryPriceUnitOne'])] = Asset1['Open']
#Short entry first unit
Asset1['EntryPriceUnitOne'] = np.where(Asset1['Signal'] == -1,
Asset1['RollingMin'].shift(1) - .01, Asset1['EntryPriceUnitOne'])
#Short gap entry first unit
Asset1['EntryPriceUnitOne'].loc[(Asset1['Signal'] == -1) & (
Asset1['Open'] < Asset1['EntryPriceUnitOne'])] = Asset1['Open']
#Long stop first unit
Asset1['StopPriceUnitOne'] = np.where(Asset1['Signal'] == 1,
Asset1['EntryPriceUnitOne'] - (Asset1['TradeATR'] * 2), np.nan)
#Short stop first unit
Asset1['StopPriceUnitOne'] = np.where(Asset1['Signal'] == -1,
Asset1['EntryPriceUnitOne'] + (Asset1['TradeATR'] * 2), Asset1['StopPriceUnitOne'])
#Experimental exits
Asset1['HybridShortExitPrice'] = np.where(Asset1['ShortExitPrice'] < Asset1['StopPriceUnitOne'],
Asset1['ShortExitPrice'], Asset1['StopPriceUnitOne'])
Asset1['HybridLongExitPrice'] = np.where(Asset1['LongExitPrice'] > Asset1['StopPriceUnitOne'],
Asset1['LongExitPrice'], Asset1['StopPriceUnitOne'])
Asset1['HybridShortExitPrice'] = Asset1['HybridShortExitPrice'].ffill()
Asset1['HybridLongExitPrice'] = Asset1['HybridLongExitPrice'].ffill()
#This is a profit target for long trades
Asset1['LimitExitPrice'] = np.where(Asset1['Signal'] == 1,
Asset1['EntryPriceUnitOne'] + (5 * Asset1['TradeATR']), np.nan)
#This is a profit target for short trades
Asset1['LimitExitPrice'] = np.where(Asset1['Signal'] == -1,
Asset1['EntryPriceUnitOne'] - (5 * Asset1['TradeATR']), Asset1['LimitExitPrice'])
#Begin loops for individual trade examination
#Novice indexing abilities
TradeRanger = range(0,len(SignalDates))
#for r in TradeRanger:
TradeSubset = Asset1.loc[(Asset1['SubIndex'] >= SignalDates[0])]
#TradeSubset = Asset1.loc[(Asset1['SubIndex'] >= 59) & (Asset1['SubIndex'] <= 87)]
TradeDirection = TradeSubset['Signal'][0]
TradeSubset['Exit'] = 0
#
#Short exit, 1 = yes, 0 = no
TradeSubset['ShortExit'] = 0
#Long exit, 1 = yes, 0 = no
TradeSubset['LongExit'] = 0
#Did the exit gap overnight? or hit after open
TradeSubset['GapShortExit'] = 0
#Did the exit gap overnight? or hit after open
TradeSubset['GapLongExit'] = 0
##Experimental exits
#TradeSubset['HybridShortExitPrice'] = np.where(TradeSubset['ShortExitPrice'] < TradeSubset['StopPriceUnitOne'],
# TradeSubset['ShortExitPrice'], TradeSubset['StopPriceUnitOne'])
#TradeSubset['HybridLongExitPrice'] = np.where(TradeSubset['LongExitPrice'] > TradeSubset['StopPriceUnitOne'],
# TradeSubset['LongExitPrice'], TradeSubset['StopPriceUnitOne'])
#TradeSubset['HybridShortExitPrice'] = TradeSubset['HybridShortExitPrice'].ffill()
#TradeSubset['HybridLongExitPrice'] = TradeSubset['HybridLongExitPrice'].ffill()
#
#
#
#
#
#1 = Short exit being hit starting the day of the signal.
if TradeDirection == -1:
TradeSubset['ShortExit'].loc[(TradeSubset['High'] > TradeSubset['HybridShortExitPrice'])] = 1
if TradeDirection == 1:
#1 = Long exit being hit starting the day of the signal.
TradeSubset['LongExit'].loc[(TradeSubset['Low'] < TradeSubset['HybridLongExitPrice'])] = 1
#Assess Gaps on days where trade closes
TradeSubset['GapShortExit'].loc[(TradeSubset['ShortExit'] == 1) & (
TradeSubset['Open'] > TradeSubset['HybridShortExitPrice'])] = 1
TradeSubset['GapLongExit'].loc[(TradeSubset['LongExit'] == 1) & (
TradeSubset['Open'] < TradeSubset['HybridLongExitPrice'])] = 1
#Types of exit
TradeSubset['Exit'].loc[(TradeSubset['ShortExit'] == 1)] = 1 #1 indicating short exit
TradeSubset['Exit'].loc[(TradeSubset['LongExit'] == 1)] = 2 #1 indicating long exit
TradeSubset['Exit'].loc[(TradeSubset['GapShortExit'] == 1)] = 3 #1 indicating short exit w/ gap
TradeSubset['Exit'].loc[(TradeSubset['GapLongExit'] == 1)] = 4 #1 indicating long exit w/ gap
while sum(abs(TradeSubset['Exit'])) != 0:
#while Counter < 1:
EntryPriceUnitOne = TradeSubset['EntryPriceUnitOne'][0]
StopPriceUnitOne = TradeSubset['StopPriceUnitOne'][0]
#TradeDirection
TradeDirection = TradeSubset['Signal'][0]
#I have to figure out how to add units..
#TradeSubset['Units'] = 1
#
TradeSubset['Exit'] = 0
#
#Short exit, 1 = yes, 0 = no
TradeSubset['ShortExit'] = 0
#Long exit, 1 = yes, 0 = no
TradeSubset['LongExit'] = 0
#Did the exit gap overnight? or hit after open
TradeSubset['GapShortExit'] = 0
#Did the exit gap overnight? or hit after open
TradeSubset['GapLongExit'] = 0
##Experimental exits
#TradeSubset['HybridShortExitPrice'] = np.where(TradeSubset['ShortExitPrice'] < TradeSubset['StopPriceUnitOne'],
# TradeSubset['ShortExitPrice'], TradeSubset['StopPriceUnitOne'])
#TradeSubset['HybridLongExitPrice'] = np.where(TradeSubset['LongExitPrice'] > TradeSubset['StopPriceUnitOne'],
# TradeSubset['LongExitPrice'], TradeSubset['StopPriceUnitOne'])
#TradeSubset['HybridShortExitPrice'] = TradeSubset['HybridShortExitPrice'].ffill()
#TradeSubset['HybridLongExitPrice'] = TradeSubset['HybridLongExitPrice'].ffill()
#
#
#
#
#
#1 = Short exit being hit starting the day of the signal.
if TradeDirection == -1:
TradeSubset['ShortExit'].loc[(TradeSubset['High'] > TradeSubset['HybridShortExitPrice'])] = 1
if TradeDirection == 1:
#1 = Long exit being hit starting the day of the signal.
TradeSubset['LongExit'].loc[(TradeSubset['Low'] < TradeSubset['HybridLongExitPrice'])] = 1
#Assess Gaps on days where trade closes
TradeSubset['GapShortExit'].loc[(TradeSubset['ShortExit'] == 1) & (
TradeSubset['Open'] > TradeSubset['HybridShortExitPrice'])] = 1
TradeSubset['GapLongExit'].loc[(TradeSubset['LongExit'] == 1) & (
TradeSubset['Open'] < TradeSubset['HybridLongExitPrice'])] = 1
#Types of exit
TradeSubset['Exit'].loc[(TradeSubset['ShortExit'] == 1)] = 1 #1 indicating short exit
TradeSubset['Exit'].loc[(TradeSubset['LongExit'] == 1)] = 2 #1 indicating long exit
TradeSubset['Exit'].loc[(TradeSubset['GapShortExit'] == 1)] = 3 #1 indicating short exit w/ gap
TradeSubset['Exit'].loc[(TradeSubset['GapLongExit'] == 1)] = 4 #1 indicating long exit w/ gap
#
#
#
#
#List comprehension to find exit taken for subset.
#The next function gives a position on the TradeSubset index
ExitTaken = TradeSubset['Exit'][next((n for n, x in enumerate(TradeSubset['Exit']) if x), 0)]
#The length of the trade
LengthOfTrade = int(next((n for n, x in enumerate(TradeSubset['Exit']) if x), 0))
#The SubIndex of the exit date is for continuing looking for rentry in new subset
SubIndexOfEntry = TradeSubset['SubIndex'][0]
SubIndexOfExit = TradeSubset['SubIndex'][next((n for n, x in enumerate(TradeSubset['Exit']) if x), 0)]
OpenPriceOnGap = TradeSubset['Open'][LengthOfTrade]
if ExitTaken == 1: # if exiting short trade, exit during market day
TradeReturn = (EntryPriceUnitOne - StopPriceUnitOne)/EntryPriceUnitOne
elif ExitTaken == 2: # if exiting long trade, exitduring market day
TradeReturn = (StopPriceUnitOne - EntryPriceUnitOne)/EntryPriceUnitOne
elif ExitTaken == 3: # if exiting short trade with gap
TradeReturn = (EntryPriceUnitOne - OpenPriceOnGap)/EntryPriceUnitOne
elif ExitTaken == 4: # if exiting long trade with gap
TradeReturn = (OpenPriceOnGap - EntryPriceUnitOne)/EntryPriceUnitOne
#Log Trade details in Trade dataframe
Empty.append(ExitTaken)
Empty.append(LengthOfTrade)
Empty.append(EntryPriceUnitOne)
Empty.append(StopPriceUnitOne)
Empty.append(SubIndexOfEntry)
Empty.append(SubIndexOfExit)
Empty.append(TradeDirection)
Empty.append(OpenPriceOnGap)
Empty.append(TradeReturn)
#Empty.append(RMultiple)
Emptyseries = pd.Series(Empty)
Trades[Counter] = Emptyseries.values
Empty[:] = []
#This trimmer trims the Trade out of the TradeSubset, then trims to the next signal!
TradeSubset = TradeSubset[(LengthOfTrade + 1):]
SignalTrim = next((n for n, x in enumerate(TradeSubset['Signal']) if x), 0)
TradeSubset = TradeSubset[SignalTrim:]
#
Counter = Counter + 1
print(Counter)
#The last trade that is still open
if sum(abs(TradeSubset['Signal'])) != 0:
EntryPriceUnitOne = TradeSubset['EntryPriceUnitOne'][0]
StopPriceUnitOne = TradeSubset['StopPriceUnitOne'][0]
ExitTaken = 0
LengthOfTrade = len(TradeSubset)
TradeDirection = TradeSubset['Signal'][0]
if TradeDirection == 1:
TradeReturn = (TradeSubset['HybridLongExitPrice'][-1] - EntryPriceUnitOne)/EntryPriceUnitOne
# etc..
elif TradeDirection == -1:
TradeReturn = (EntryPriceUnitOne - TradeSubset['HybridLongExitPrice'][-1])/EntryPriceUnitOne
SubIndexOfEntry = TradeSubset['SubIndex'][0]
SubIndexOfExit = np.nan
OpenPriceOnGap = np.nan
Empty.append(ExitTaken)
Empty.append(LengthOfTrade)
Empty.append(EntryPriceUnitOne)
Empty.append(StopPriceUnitOne)
Empty.append(SubIndexOfEntry)
Empty.append(SubIndexOfExit)
Empty.append(TradeDirection)
Empty.append(OpenPriceOnGap)
Empty.append(TradeReturn)
Emptyseries = pd.Series(Empty)
Trades[Counter] = Emptyseries.values
Empty[:] = []
Trades = Trades.rename(index={0: "ExitTaken", 1: "LengthOfTrade", 2: "EntryPriceUnitOne",
3: "StopPriceUnitOne", 4: "SubIndexOfEntry", 5: "SubIndexOfExit",
6: "TradeDirection", 7: "OpenPriceOnGap", 8: "TradeReturn"})
Asset1['Brackets'] = 1
for d in Trades:
Asset1['Brackets'].loc[(Asset1['SubIndex'] == Trades[d]['SubIndexOfExit'])] = 1 + Trades[d]['TradeReturn']
NumWinningTrades = len(Asset1['Brackets'][Asset1['Brackets'] > 1])
NumLosingTrades = len(Asset1['Brackets'][Asset1['Brackets'] < 1])
AvgWin = Asset1['Brackets'][Asset1['Brackets'] > 1].mean()
AvgLoss = Asset1['Brackets'][Asset1['Brackets'] < 1].mean()
RewardRisk = AvgWin/AvgLoss
WinRate = NumWinningTrades / (NumWinningTrades + NumLosingTrades)
LossRate = NumLosingTrades / (NumWinningTrades + NumLosingTrades)
Expectancy = (WinRate * RewardRisk) - (LossRate)
Asset1['Multiplier'] = Asset1['Brackets'].cumprod()
Asset1['Multiplier'].plot()
print(Expectancy)
#for a in Trades:
# if Trades[a][8] == np.nan:
# Trades = Trades.drop(a)
#In the event that we are stopped out, we want to continue to look for rentry.
#while sum(abs(TradeSubset['Signal'])) != 0:
# EntryPriceUnitOne = TradeSubset['EntryPriceUnitOne'][0]
# StopPriceUnitOne = TradeSubset['StopPriceUnitOne'][0]
#
# #I have to figure out how to add units..
# TradeSubset['Units'] = 1
#
# TradeSubset['Exit'] = 0
#
# #Short exit, 1 = yes, 0 = no
# TradeSubset['ShortExit'] = 0
# #Long exit, 1 = yes, 0 = no
# TradeSubset['LongExit'] = 0
# #Did the exit gap overnight? or hit after open
# TradeSubset['GapShortExit'] = 0
# #Did the exit gap overnight? or hit after open
# TradeSubset['GapLongExit'] = 0
#
# #1 = Short exit being hit starting the day of the signal.
# TradeSubset['ShortExit'].loc[(Asset1['FilledSignal'] == -1) & (
# TradeSubset['High'] > TradeSubset['HybridShortExitPrice'])] = 1
# #1 = Long exit being hit starting the day of the signal.
# TradeSubset['LongExit'].loc[(Asset1['FilledSignal'] == 1) & (
# TradeSubset['Low'] < TradeSubset['HybridLongExitPrice'])] = 1
#
#
# #Assess Gaps on days where trade closes
# TradeSubset['GapShortExit'].loc[(TradeSubset['ShortExit'] == 1) & (
# TradeSubset['Open'] > TradeSubset['HybridShortExitPrice'])] = 1
#
# TradeSubset['GapLongExit'].loc[(TradeSubset['LongExit'] == 1) & (
# TradeSubset['Open'] < TradeSubset['HybridLongExitPrice'])] = 1
#
# #Types of exit
# TradeSubset['Exit'].loc[(TradeSubset['ShortExit'] == 1)] = 1 #1 indicating short exit
# TradeSubset['Exit'].loc[(TradeSubset['LongExit'] == 1)] = 2 #1 indicating long exit
# TradeSubset['Exit'].loc[(TradeSubset['GapShortExit'] == 1)] = 3 #1 indicating short exit w/ gap
# TradeSubset['Exit'].loc[(TradeSubset['GapLongExit'] == 1)] = 4 #1 indicating long exit w/ gap
#
# #List comprehension to find exit taken for subset.
# #The next function gives a position on the TradeSubset index
# ExitTaken = TradeSubset['Exit'][next((n for n, x in enumerate(TradeSubset['Exit']) if x), 0)]
# #The length of the trade
# LengthOfTrade = int(next((n for n, x in enumerate(TradeSubset['Exit']) if x), 0))
# #The SubIndex of the exit date is for continuing looking for rentry in new subset
# SubIndexOfExit = TradeSubset['SubIndex'][next((n for n, x in enumerate(TradeSubset['Exit']) if x), 0)]
# SubIndexOfEntry = TradeSubset['SubIndex'][0]
# #TradeDirection
# TradeDirection = TradeSubset['Signal'][0]
# OpenPriceOnGap = TradeSubset['Open'][LengthOfTrade]
# if ExitTaken == 1: # if exiting short trade, exit during market day
# TradeReturn = (EntryPriceUnitOne - StopPriceUnitOne)/EntryPriceUnitOne
# elif ExitTaken == 2: # if exiting long trade, exitduring market day
# TradeReturn = (StopPriceUnitOne - EntryPriceUnitOne)/EntryPriceUnitOne
# elif ExitTaken == 3: # if exiting short trade with gap
# TradeReturn = (EntryPriceUnitOne - OpenPriceOnGap)/EntryPriceUnitOne
# elif ExitTaken == 4: # if exiting long trade with gap
# TradeReturn = (OpenPriceOnGap - EntryPriceUnitOne)/EntryPriceUnitOne
#
# #In the event that we are stopped out, we want to continue to look for rentry.
# TradeSubset = TradeSubset[(LengthOfTrade + 1):]
#Create individual trade subsets for examination
#TradeSubIndex = Asset1['SubIndex'].loc[(Asset1['OriginalSignal'] != 0)]
#TradeDates = pd.DataFrame()
#try:
# for i in range(0, len(TradeSubIndex)):
# TradeDates[i] = TradeSubIndex[i]-1,TradeSubIndex[i+1]
#except IndexError:
# pass
#quick reference matrix for exits
#ExitReturns = pd.Series(index=range(0,10))
#ExitReturns[0] = 0
#ExitReturns[1] = 1 + LongProfitTake
#ExitReturns[2] = 1 + ShortProfitTake
#ExitReturns[3] = 0
#ExitReturns[4] = 0
#ExitReturns[5] = 1 - LongStopLoss
#ExitReturns[6] = 1 - ShortStopLoss
#ExitReturns[7] = 1 - LongStopLoss
#ExitReturns[8] = 1 - ShortStopLoss
#ExitReturns[9] = 0
#ExitReturns[10] = 0
#Short, units added
#TradeSubset['Units'].loc[(TradeSubset['FilledSignal'][0] == -1) & (Asset1['Low'] < EntryPriceUnitTwo)] = 2
#TradeSubset['Units'].loc[(TradeSubset['FilledSignal'][0] == -1) & (Asset1['Low'] < EntryPriceUnitThree)] = 3
#TradeSubset['Units'].loc[(TradeSubset['FilledSignal'][0] == -1) & (Asset1['Low'] < EntryPriceUnitFour)] = 4
#Long, units added
#TradeSubset['Units'].loc[(TradeSubset['FilledSignal'][0] == 1) & (Asset1['High'] > EntryPriceUnitTwo)] = 2
#TradeSubset['Units'].loc[(TradeSubset['FilledSignal'][0] == 1) & (Asset1['High'] > EntryPriceUnitThree)] = 3
#TradeSubset['Units'].loc[(TradeSubset['FilledSignal'][0] == 1) & (Asset1['High'] > EntryPriceUnitFour)] = 4
#for l in range(0,len(TradeSubset['Units'])):
# TradeSubset['Units'].loc[(TradeSubset['Units'] < TradeSubset['Units'].shift(1))] = TradeSubset['Units'].shift(1)
#TradeSubset['Units'].loc[(TradeSubset['Units'] < TradeSubset['Units'].shift(1))] = TradeSubset['Units'].shift(1)
##If it's the original signal, record entry price
#Asset1['EntryPrice'].loc[(Asset1['OriginalSignal'] != 0)] = Asset1['Adj Close']
#
##Assess spread/unfavorable fills here!
##Asset1['EntryPriceSlippage'] = Asset1['EntryPrice']
##Long slippage
##Asset1['EntryPriceSlippage'].loc[(Asset1['EntryPrice'] != 0) & (
## Asset1['Signal'] == 1)] = Asset1['EntryPrice'] * (1 + Slippage)
##Short slippage
##Asset1['EntryPriceSlippage'].loc[(Asset1['EntryPrice'] != 0) & (
## Asset1['Signal'] == -1)] = Asset1['EntryPrice'] * (1 - Slippage)
##
##Run the entry price DOWN the column until new position is taken
##Asset1['EntryPriceSlippage'] = Asset1['EntryPriceSlippage'].ffill(inplace=False)
##Fill nan with 0 for entry price
##Asset1['EntryPriceSlippage'] = Asset1['EntryPriceSlippage'].fillna(0)
#
##Declare StopPrice column
#Asset1['StopPrice'] = np.nan
##Long stop calculation
#Asset1['StopPrice'].loc[(Asset1['EntryPrice'] != 0) & (
# Asset1['OriginalSignal'] == 1)] = Asset1['EntryPrice'] * (1 - LongStopLoss)
##Short stop calculation
#Asset1['StopPrice'].loc[(Asset1['EntryPrice'] != 0) & (
# Asset1['OriginalSignal'] == -1)] = Asset1['EntryPrice'] * (1 + ShortStopLoss)
##Forward fill
#Asset1['StopPrice'] = Asset1['StopPrice'].ffill(inplace=False)
#Asset1['StopPrice'] = Asset1['StopPrice'].fillna(0)
#
##Declare ProfitPrice column
#Asset1['ProfitPrice'] = np.nan
##Long stop calculation
#Asset1['ProfitPrice'].loc[(Asset1['EntryPrice'] != 0) & (
# Asset1['OriginalSignal'] == 1)] = Asset1['EntryPrice'] * (1 + LongProfitTake)
##Short stop calculation
#Asset1['ProfitPrice'].loc[(Asset1['EntryPrice'] != 0) & (
# Asset1['OriginalSignal'] == -1)] = Asset1['EntryPrice'] * (1 - ShortProfitTake)
##Forward fill
#Asset1['ProfitPrice'] = Asset1['ProfitPrice'].ffill(inplace=False)
#Asset1['ProfitPrice'] = Asset1['ProfitPrice'].fillna(0)
#
#Asset1['Exit'] = 0
##This will be the final return stream. Generally I use a regime of
##(-1, or 0, or +1) multiplied by the next day's log return to get equity curve
#Asset1['BracketReturns'] = 1
#
##Short Take Gain exit, 1 = yes, 0 = no
#Asset1['STG'] = 0
##Short Take Gain exit, 1 = yes, 0 = no
#Asset1['SSL'] = 0
##Short Stop Loss exit, 1 = yes, 0 = no
#Asset1['LTG'] = 0
##Long Stop Loss exit, 1 = yes, 0 = no
#Asset1['LSL'] = 0
#
##For initial exits
#Asset1['OriginalSTG'] = 0
#Asset1['OriginalSSL'] = 0
#Asset1['OriginalLTG'] = 0
#Asset1['OriginalLSL'] = 0
#
#Asset1['GapSTG'] = 0
#Asset1['GapSSL'] = 0
#Asset1['GapLTG'] = 0
#Asset1['GapLSL'] = 0
#
##1 = STG being hit starting the day after the signal. After initial hit, 1s
##will run down the column even though the trade should be closed
#Asset1['STG'].loc[(Asset1['Signal'] == -1) & (
# Asset1['OriginalSignal'] == 0) & (Asset1['Low'] < Asset1['ProfitPrice'])] = 1
##find initial exit
##Asset1['OriginalSTG'].loc[Asset1['STG'] != Asset1['STG'].shift(1)] = Asset1['STG']
#
#Asset1['LTG'].loc[(Asset1['Signal'] == 1) & (
# Asset1['OriginalSignal'] == 0) & (Asset1['High'] > Asset1['ProfitPrice'])] = 1
##Asset1['OriginalLTG'].loc[Asset1['LTG'] != Asset1['LTG'].shift(1)] = Asset1['LTG']
#
#Asset1['SSL'].loc[(Asset1['Signal'] == -1) & (
# Asset1['OriginalSignal'] == 0) & (Asset1['High'] > Asset1['StopPrice'])] = 1
##Asset1['OriginalSSL'].loc[Asset1['STG'] != Asset1['SSL'].shift(1)] = Asset1['SSL']
#
#Asset1['LSL'].loc[(Asset1['Signal'] == 1) & (
# Asset1['OriginalSignal'] == 0) & (Asset1['Low'] < Asset1['StopPrice'])] = 1
##Asset1['OriginalLSL'].loc[Asset1['LSL'] != Asset1['LSL'].shift(1)] = Asset1['LSL']
#
##Assess Gaps on days where trade closes
#Asset1['GapSTG'].loc[(Asset1['STG'] == 1) & (
# Asset1['Open'] < Asset1['ProfitPrice'])] = 1
#Asset1['GapSSL'].loc[(Asset1['SSL'] == 1) & (
# Asset1['Open'] > Asset1['StopPrice'])] = 1
#Asset1['GapLTG'].loc[(Asset1['LTG'] == 1) & (
# Asset1['Open'] > Asset1['ProfitPrice'])] = 1
#Asset1['GapLSL'].loc[(Asset1['LSL'] == 1) & (
# Asset1['Open'] < Asset1['StopPrice'])] = 1
#
##Days where StopPrice and ProfitPrice are both touched
#Asset1['LongDD'] = np.where((Asset1['LTG'] + Asset1['LSL']) == 2, 1, 0)
#Asset1['ShortDD'] = np.where((Asset1['STG'] + Asset1['SSL']) == 2, 1, 0)
#Asset1['DoubleDay'] = Asset1['LongDD'] + Asset1['ShortDD']
#
##Exit on DoubleDays - 1 & 2; LTG - 3; LSL - 4; STG - 5, SSL - 6.
##Preference given to stoploss on 'expensive' days
#Asset1['Exit'].loc[(Asset1['LTG'] == 1)] = 1 #exit as gain
#Asset1['Exit'].loc[(Asset1['STG'] == 1)] = 2 #exit as gain
#Asset1['Exit'].loc[(Asset1['GapSTG'] == 1)] = 3 #exit as gain
#Asset1['Exit'].loc[(Asset1['GapLTG'] == 1)] = 4 #exit as gain
#Asset1['Exit'].loc[(Asset1['LSL'] == 1)] = 5 #exit as loss
#Asset1['Exit'].loc[(Asset1['SSL'] == 1)] = 6 #exit as loss
#Asset1['Exit'].loc[(Asset1['LongDD'] == 1)] == 7 #exit long position at loss
#Asset1['Exit'].loc[(Asset1['ShortDD'] == 1)] == 8 #exit as short position at loss
#Asset1['Exit'].loc[(Asset1['GapSSL'] == 1)] = 9 #exit as loss
#Asset1['Exit'].loc[(Asset1['GapLSL'] == 1)] = 10 #exit as loss
#
##Create individual trade subsets for examination
#TradeSubIndex = Asset1['SubIndex'].loc[(Asset1['OriginalSignal'] != 0)]
#TradeDates = pd.DataFrame()
#try:
# for i in range(0, len(TradeSubIndex)):
# TradeDates[i] = TradeSubIndex[i]-1,TradeSubIndex[i+1]
#except IndexError:
# pass
#
##quick reference matrix for exits
#ExitReturns = pd.Series(index=range(0,10))
#ExitReturns[0] = 0
#ExitReturns[1] = 1 + LongProfitTake
#ExitReturns[2] = 1 + ShortProfitTake
#ExitReturns[3] = 0
#ExitReturns[4] = 0
#ExitReturns[5] = 1 - LongStopLoss
#ExitReturns[6] = 1 - ShortStopLoss
#ExitReturns[7] = 1 - LongStopLoss
#ExitReturns[8] = 1 - ShortStopLoss
#ExitReturns[9] = 0
#ExitReturns[10] = 0
#
##Trade Analysis from 0th trade
#for ii in TradeDates.columns:
# TradeData = Asset1[TradeDates[ii][0]:TradeDates[ii][1]]
# #the 'next' function yields index position of first non 0 exit
# ExitTaken = TradeData['Exit'][next((n for n, x in enumerate(TradeData['Exit']) if x), 0)]
# SubIndexOfExit = TradeData['SubIndex'][next((n for n, x in enumerate(TradeData['Exit']) if x), 0)]
# TradeDuration = len(TradeData) - 1
# TradeDirection = TradeData['Signal'][0]
# TradeReturn = ExitReturns[ExitTaken]
# RMultiple = (1 - TradeReturn)/ShortStopLoss
##If no stops are hit and there is a signal change, take P/L and switch position
# if ExitTaken == 0:
# SubIndexOfExit = TradeData['SubIndex'][-1]
# if TradeDirection == 1:
# TradeReturn = 1 + ((TradeData['Adj Close'][-1] - TradeData['Adj Close'][0])/TradeData['Adj Close'][0])
# elif TradeDirection == -1:
# TradeReturn = 1 + ((TradeData['Adj Close'][0] - TradeData['Adj Close'][-1])/TradeData['Adj Close'][0])
# else:
# pass
##Assess Gaps
# #GAP STG
# if ExitTaken == 3:
# TradeReturn = 1 + ((TradeData['Adj Close'][0] - TradeData['Open'][TradeDuration])/TradeData['Adj Close'][0])
# else:
# pass
# #GAP LTG
# if ExitTaken == 4:
# TradeReturn = 1 + ((TradeData['Open'][TradeDuration] - TradeData['Adj Close'][0])/TradeData['Adj Close'][0])
# else:
# pass
# #GAP SSL
# if ExitTaken == 9:
# TradeReturn = 1 + ((TradeData['Adj Close'][0] - TradeData['Open'][TradeDuration])/TradeData['Adj Close'][0])
# else:
# pass
# #GAP LSL
# if ExitTaken == 10:
# TradeReturn = 1 + ((TradeData['Open'][TradeDuration] - TradeData['Adj Close'][0])/TradeData['Adj Close'][0])
# else:
# pass
# RMultiple = (TradeReturn - 1)/ShortStopLoss
# Empty.append(ExitTaken)
# Empty.append(SubIndexOfExit)
# Empty.append(TradeDuration)
# Empty.append(TradeDirection)
# Empty.append(TradeReturn)
# Empty.append(RMultiple)
# Emptyseries = pd.Series(Empty)
# Dataset[ii] = Emptyseries.values
# Empty[:] = []
##
#Dataset = Dataset.rename(index={0: "ExitTaken", 1: "SubIndex", 2: "TradeDuration",
# 3: "TradeDirection", 4: "TradeReturn", 5: "RMultiple"})
#
#Asset1['Brackets'] = 1
#Asset1['SlippageCommissionBrackets'] = 1
#for d in Dataset:
# Asset1['SlippageCommissionBrackets'].loc[(Asset1['SubIndex'] == Dataset[d]['SubIndex'])] = Dataset[d]['TradeReturn'] - Slippage - Commission
#for d in Dataset:
# Asset1['Brackets'].loc[(Asset1['SubIndex'] == Dataset[d]['SubIndex'])] = Dataset[d]['TradeReturn']
#NumWinningTrades = len(Asset1['Brackets'][Asset1['Brackets'] > 1])
#NumLosingTrades = len(Asset1['Brackets'][Asset1['Brackets'] < 1])
#AvgWin = Asset1['Brackets'][Asset1['Brackets'] > 1].mean()
#AvgLoss = Asset1['Brackets'][Asset1['Brackets'] < 1].mean()
#RewardRisk = AvgWin/AvgLoss
#WinRate = NumWinningTrades / (NumWinningTrades + NumLosingTrades)
#LossRate = NumLosingTrades / (NumWinningTrades + NumLosingTrades)
#Expectancy = (WinRate * RewardRisk) - (LossRate)
#
#Asset1['Multiplier'] = Asset1['Brackets'].cumprod().plot()
#print(Expectancy)
#
##TradeData = Asset1[TradeDates[0][0]:TradeDates[0][1]]
###the 'next' function yields index position of first non 0 exit
##TradeData['ReIndex'] = range(0,len(TradeData))
##ExitTaken = TradeData['Exit'][next((n for n, x in enumerate(TradeData['Exit']) if x), 0)]
##SubIndexOfExit = TradeData['SubIndex'][next((n for n, x in enumerate(TradeData['Exit']) if x), 0)]
##TradeDuration = TradeData['ReIndex'][next((n for n, x in enumerate(TradeData['Exit']) if x), 0)]
##TradeDirection = TradeData['Signal'][0]
##TradeReturn = ExitReturns[ExitTaken]
##
###If no stops are hit and there is a signal change, take P/L and switch position
##if ExitTaken == 0:
## SubIndexOfExit = TradeData['SubIndex'][-1]
## if TradeDirection == 1:
## TradeReturn = 1 + ((TradeData['Adj Close'][-1] - TradeData['Adj Close'][0])/TradeData['Adj Close'][0])
## elif TradeDirection == -1:
## TradeReturn = 1 + ((TradeData['Adj Close'][0] - TradeData['Adj Close'][-1])/TradeData['Adj Close'][0])
##else:
## pass
###Assess Gaps
###GAP STG
##if ExitTaken == 3:
## TradeReturn = 1 + ((TradeData['Adj Close'][0] - TradeData['Open'][TradeDuration])/TradeData['Adj Close'][0])
##else:
## pass
###GAP LTG
##if ExitTaken == 4:
## TradeReturn = 1 + ((TradeData['Open'][TradeDuration] - TradeData['Adj Close'][0])/TradeData['Adj Close'][0])
##else:
## pass
###GAP SSL
##if ExitTaken == 9:
## TradeReturn = 1 + ((TradeData['Adj Close'][0] - TradeData['Open'][TradeDuration])/TradeData['Adj Close'][0])
##else:
## pass
###GAP LSL
##if ExitTaken == 10:
## TradeReturn = 1 + ((TradeData['Open'][TradeDuration] - TradeData['Adj Close'][0])/TradeData['Adj Close'][0])
##else:
## pass
##Empty.append(ExitTaken)
##Empty.append(SubIndexOfExit)
##Empty.append(TradeDuration)
##Empty.append(TradeDirection)
##Empty.append(TradeReturn)
##Emptyseries = pd.Series(Empty)
###Dataset[ii] = Emptyseries.values
###Empty[:] = []
##print(Emptyseries) |
Michaela Morard of Huntsville, Alabama, notched her fifth AJGA tournament title at the Randy Wise Junior Open. The University of Alabama commit was dominant from the outset, as she fired a first-round 5-under-par 67 to break the competitive course record previously held by LPGA pro Morgan Pressel. From there, the three-time Rolex Junior All-American continued to cruise to earn a four-stroke wire-to-wire victory. Morard is currently No. 8 in the Rolex AJGA Rankings.
David Ford of Peachtree Corners, Georgia, fired a 10-under-par 206 at the AJGA Junior All-Star at Butte Creek to earn his first AJGA title in only his fourth event. Throughout the tournament, Ford holed 12 birdies and an eagle en route to taking the wire-to-wire two-stroke victory. The win is Ford’s second top-10 finish in 2018.
Kaleiya Romero of San Jose, California, went wire-to-wire at the Eagle Crest Junior Championship to earn her second AJGA tournament title of 2018. Romero holed 13 birdies and two eagles over the course of the weather-shortened tournament to earn a nine-stroke victory. Along with being a four-time AJGA tournament champion, Romero is also a four-time winner with the Junior Golf Association of Northern California.
In his first-career AJGA event, Sang Ha Park of Cheongju, South Korea, took home his first tournament title at the KJ Choi Foundation Junior Championship presented by SK Telecom. Park entered the third round tied for the lead, but he broke the tie by turning in a dominant 3-under-par 69 final-round in which he holed five birdies and did not score over par for the first 15 holes. |
"""
Custom message viewer with support for switching between HTML and plain
text rendering, HTML sanitization, lazy rendering (as you scroll down),
zoom and URL click warning popup
"""
from PyQt4 import QtCore, QtGui
from safehtmlparser import SafeHTMLParser
from tr import _translate
class MessageView(QtGui.QTextBrowser):
"""Message content viewer class, can switch between plaintext and HTML"""
MODE_PLAIN = 0
MODE_HTML = 1
def __init__(self, parent=0):
super(MessageView, self).__init__(parent)
self.mode = MessageView.MODE_PLAIN
self.html = None
self.setOpenExternalLinks(False)
self.setOpenLinks(False)
self.anchorClicked.connect(self.confirmURL)
self.out = ""
self.outpos = 0
self.document().setUndoRedoEnabled(False)
self.rendering = False
self.defaultFontPointSize = self.currentFont().pointSize()
self.verticalScrollBar().valueChanged.connect(self.lazyRender)
self.setWrappingWidth()
def resizeEvent(self, event):
"""View resize event handler"""
super(MessageView, self).resizeEvent(event)
self.setWrappingWidth(event.size().width())
def mousePressEvent(self, event):
"""Mouse press button event handler"""
if event.button() == QtCore.Qt.LeftButton and self.html and self.html.has_html and self.cursorForPosition(
event.pos()).block().blockNumber() == 0:
if self.mode == MessageView.MODE_PLAIN:
self.showHTML()
else:
self.showPlain()
else:
super(MessageView, self).mousePressEvent(event)
def wheelEvent(self, event):
"""Mouse wheel scroll event handler"""
# super will actually automatically take care of zooming
super(MessageView, self).wheelEvent(event)
if (
QtGui.QApplication.queryKeyboardModifiers() & QtCore.Qt.ControlModifier
) == QtCore.Qt.ControlModifier and event.orientation() == QtCore.Qt.Vertical:
zoom = self.currentFont().pointSize() * 100 / self.defaultFontPointSize
QtGui.QApplication.activeWindow().statusBar().showMessage(_translate(
"MainWindow", "Zoom level %1%").arg(str(zoom)))
def setWrappingWidth(self, width=None):
"""Set word-wrapping width"""
self.setLineWrapMode(QtGui.QTextEdit.FixedPixelWidth)
if width is None:
width = self.width()
self.setLineWrapColumnOrWidth(width)
def confirmURL(self, link):
"""Show a dialog requesting URL opening confirmation"""
if link.scheme() == "mailto":
window = QtGui.QApplication.activeWindow()
window.ui.lineEditTo.setText(link.path())
if link.hasQueryItem("subject"):
window.ui.lineEditSubject.setText(
link.queryItemValue("subject"))
if link.hasQueryItem("body"):
window.ui.textEditMessage.setText(
link.queryItemValue("body"))
window.setSendFromComboBox()
window.ui.tabWidgetSend.setCurrentIndex(0)
window.ui.tabWidget.setCurrentIndex(
window.ui.tabWidget.indexOf(window.ui.send)
)
window.ui.textEditMessage.setFocus()
return
reply = QtGui.QMessageBox.warning(
self,
QtGui.QApplication.translate(
"MessageView",
"Follow external link"),
QtGui.QApplication.translate(
"MessageView",
"The link \"%1\" will open in a browser. It may be a security risk, it could de-anonymise you"
" or download malicious data. Are you sure?").arg(unicode(link.toString())),
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
QtGui.QDesktopServices.openUrl(link)
def loadResource(self, restype, name):
"""
Callback for loading referenced objects, such as an image. For security reasons at the moment doesn't do
anything)
"""
pass
def lazyRender(self):
"""
Partially render a message. This is to avoid UI freezing when loading huge messages. It continues loading as
you scroll down.
"""
if self.rendering:
return
self.rendering = True
position = self.verticalScrollBar().value()
cursor = QtGui.QTextCursor(self.document())
while self.outpos < len(self.out) and self.verticalScrollBar().value(
) >= self.document().size().height() - 2 * self.size().height():
startpos = self.outpos
self.outpos += 10240
# find next end of tag
if self.mode == MessageView.MODE_HTML:
pos = self.out.find(">", self.outpos)
if pos > self.outpos:
self.outpos = pos + 1
cursor.movePosition(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
cursor.insertHtml(QtCore.QString(self.out[startpos:self.outpos]))
self.verticalScrollBar().setValue(position)
self.rendering = False
def showPlain(self):
"""Render message as plain text."""
self.mode = MessageView.MODE_PLAIN
out = self.html.raw
if self.html.has_html:
out = "<div align=\"center\" style=\"text-decoration: underline;\"><b>" + unicode(
QtGui.QApplication.translate(
"MessageView", "HTML detected, click here to display")) + "</b></div><br/>" + out
self.out = out
self.outpos = 0
self.setHtml("")
self.lazyRender()
def showHTML(self):
"""Render message as HTML"""
self.mode = MessageView.MODE_HTML
out = self.html.sanitised
out = "<div align=\"center\" style=\"text-decoration: underline;\"><b>" + unicode(
QtGui.QApplication.translate("MessageView", "Click here to disable HTML")) + "</b></div><br/>" + out
self.out = out
self.outpos = 0
self.setHtml("")
self.lazyRender()
def setContent(self, data):
"""Set message content from argument"""
self.html = SafeHTMLParser()
self.html.reset()
self.html.reset_safe()
self.html.allow_picture = True
self.html.feed(data)
self.html.close()
self.showPlain()
|
We've saved a record of the stream from this event for you, so you won't miss info about new features and other important peculiarities of updated Visual Studio. At the same time you can browse and take the trial Devart products compatible with Visual Studio 2019.
dotConnect is an enhanced data connectivity solution built over ADO.NET architecture and a development framework with a number of innovative technologies.
LinqConnect is a fast, lightweight, and easy to use LINQ to SQL compatible ORM solution with visual model designer, data providers, and SQL monitoring tool.
A frontmost tool for comparing files and folders. It integrates with TFS, SVN, Git, Mercurial, and Perforce. Use Code Compare as an add-in for Visual Studio or as an individual solution.
Review Assistant is a powerful plugin for reviewing code in Visual Studio. It integrates with TFS, Git, SVN, and Mercurial, and supports multiple reviews.
Smart SQL autocompletion and formatting tool with a wide range of code completion features that relieve you from remembering complex object names, SQL operators, etc.
Devart ODBC Drivers provide high-performance, feature-rich, and secure connectivity solutions to access the most popular databases directly. |
import datetime
from ming.orm.ormsession import ThreadLocalORMSession
from mock import patch
from alluratest.controller import TestController
from allura import model as M
#---------x---------x---------x---------x---------x---------x---------x
# RootController methods exposed:
# index, new_page, search
# PageController methods exposed:
# index, edit, history, diff, raw, revert, update
# CommentController methods exposed:
# reply, delete
class TestRootController(TestController):
def _post(self, slug='', **kw):
d = {
'title':'My Post',
'text':'Nothing to see here',
'labels':'',
'state':'published'}
d.update(kw)
r = self.app.post('/blog%s/save' % slug, params=d)
return r
def _blog_date(self):
return datetime.datetime.utcnow().strftime('%Y/%m')
@patch('forgeblog.model.blog.g.director.create_activity')
def test_activity(self, create_activity):
self._post(state='draft')
assert create_activity.call_count == 0
slug = '/%s/my-post' % self._blog_date()
self._post(slug)
assert create_activity.call_count == 1, create_activity.call_count
assert create_activity.call_args[0][1] == 'created'
create_activity.reset_mock()
self._post(slug, text='new text')
assert create_activity.call_count == 1
assert create_activity.call_args[0][1] == 'modified'
create_activity.reset_mock()
self._post(slug, title='new title')
assert create_activity.call_count == 1
assert create_activity.call_args[0][1] == 'renamed'
def test_root_index(self):
self._post()
d = self._blog_date()
response = self.app.get('/blog/')
assert 'Recent posts' in response
assert 'Nothing to see here' in response
assert '/blog/%s/my-post/edit' % d in response
anon_r = self.app.get('/blog/',
extra_environ=dict(username='*anonymous'))
# anonymous user can't see Edit links
assert 'Nothing to see here' in anon_r
assert '/blog/%s/my-post/edit' % d not in anon_r
def test_root_index_draft(self):
self._post(state='draft')
d = self._blog_date()
response = self.app.get('/blog/')
assert 'Recent posts' in response
assert 'Nothing to see here' in response
assert 'Draft' in response
assert '/blog/%s/my-post/edit' % d in response
anon_r = self.app.get('/blog/',
extra_environ=dict(username='*anonymous'))
# anonymous user can't see draft posts
assert 'Nothing to see here' not in anon_r
def test_root_new_post(self):
response = self.app.get('/blog/new')
assert '<option selected value="published">Published</option>' in response
assert 'Enter your title here' in response
def test_validation(self):
r = self._post(title='')
assert 'You must provide a Title' in r
def test_root_new_search(self):
self._post()
response = self.app.get('/blog/search?q=see')
assert 'Search' in response
def test_paging(self):
[self._post() for i in range(3)]
r = self.app.get('/blog/?limit=1&page=0')
assert 'Newer Entries' not in r
assert 'Older Entries' in r
r = self.app.get('/blog/?limit=1&page=1')
assert 'Newer Entries' in r
assert 'Older Entries' in r
r = self.app.get('/blog/?limit=1&page=2')
assert 'Newer Entries' in r
assert 'Older Entries' not in r
def test_discussion_admin(self):
r = self.app.get('/blog/')
r = self.app.get('/admin/blog/options', validate_chunk=True)
assert 'Allow discussion/commenting on posts' in r
# Turn discussion on
r = self.app.post('/admin/blog/set_options',
params=dict(show_discussion='1'))
self._post()
d = self._blog_date()
r = self.app.get('/blog/%s/my-post/' % d)
assert '<div class="markdown_edit">' in r
# Turn discussion off
r = self.app.post('/admin/blog/set_options')
r = self.app.get('/blog/%s/my-post/' % d)
assert '<div class="markdown_edit">' not in r
def test_post_index(self):
self._post()
d = self._blog_date()
response = self.app.get('/blog/%s/my-post/' % d)
assert 'Nothing to see here' in response
assert '/blog/%s/my-post/edit' % d in response
anon_r = self.app.get('/blog/%s/my-post/' % d,
extra_environ=dict(username='*anonymous'))
# anonymous user can't see Edit links
assert 'Nothing to see here' in anon_r
assert '/blog/%s/my-post/edit' % d not in anon_r
self.app.get('/blog/%s/no-my-post' % d, status=404)
def test_post_index_draft(self):
self._post(state='draft')
d = self._blog_date()
response = self.app.get('/blog/%s/my-post/' % d)
assert 'Nothing to see here' in response
assert 'Draft' in response
assert '/blog/%s/my-post/edit' % d in response
anon_r = self.app.get('/blog/%s/my-post/' % d,
extra_environ=dict(username='*anonymous'))
# anonymous user can't get to draft posts
assert 'Nothing to see here' not in anon_r
def test_post_edit(self):
self._post()
d = self._blog_date()
response = self.app.get('/blog/%s/my-post/edit' % d)
assert 'Nothing' in response
# anon users can't edit
response = self.app.get('/blog/%s/my-post/edit' % d,
extra_environ=dict(username='*anonymous'))
assert 'Nothing' not in response
def test_post_history(self):
self._post()
d = self._blog_date()
self._post('/%s/my-post' % d)
self._post('/%s/my-post' % d)
response = self.app.get('/blog/%s/my-post/history' % d)
assert 'My Post' in response
# two revisions are shown
assert '2 by Test Admin' in response
assert '1 by Test Admin' in response
self.app.get('/blog/%s/my-post?version=1' % d)
self.app.get('/blog/%s/my-post?version=foo' % d, status=404)
def test_post_diff(self):
self._post()
d = self._blog_date()
self._post('/%s/my-post' % d, text='sometext')
self.app.post('/blog/%s/my-post/revert' % d, params=dict(version='1'))
response = self.app.get('/blog/%s/my-post/' % d)
response = self.app.get('/blog/%s/my-post/diff?v1=0&v2=0' % d)
assert 'My Post' in response
def test_feeds(self):
self.app.get('/blog/feed.rss')
self.app.get('/blog/feed.atom')
def test_post_feeds(self):
self._post()
d = self._blog_date()
response = self.app.get('/blog/%s/my-post/feed.rss' % d)
assert 'Nothing to see' in response
response = self.app.get('/blog/%s/my-post/feed.atom' % d)
assert 'Nothing to see' in response
def test_related_artifacts(self):
self._post(title='one')
d = self._blog_date()
self._post(title='two', text='[blog:%s/one]' % d)
M.MonQTask.run_ready()
ThreadLocalORMSession.flush_all()
r= self.app.get('/blog/%s/one/' % d)
assert 'Related' in r
assert 'Blog Post: %s/two' % d in r
|
Below are the nutrition facts and Weight Watchers points for Seared Steak, Egg & Tomatillo Wrap from Starbucks Coffee .
There are 410 calories in a Seared Steak, Egg & Tomatillo Wrap from Starbucks. Most of those calories come from fat (38%) and carbohydrates (41%).
Is the Seared Steak, Egg & Tomatillo Wrap good for you?
Does the Seared Steak, Egg & Tomatillo Wrap taste good?
homestyle tortilla (enriched unbleached wheat flour (wheat flour, enzyme (added for improved baking), niacin, reduced iron, thiamne mononitrate, riboflavin, folic acid), water, palm oil, expeller pressed canola oil, leavening (wheat starch, sodium acid pyrophosphate, sodium bicarbonate, monocalcium phosphate), cane sugar, salt, yeast, sodium bicarbonate); precooked scrambled eggs (whole eggs, water, soybean oil, modified food starch, salt, xanthan gum, guar gum, citric acid); fully cooked seasoned beef steak and modified food starch product (beef, beef broth, dextrose, modified food starch, beef flavor (maltodextrin, natural flavor, beef extract, salt, beef tallow, modified corn starch, yeast extract), salt, sodium phosphate, spice extractives); citrus tomatillo salsa (tomatillos, water, onions, garlic, dried chile peppers, contains less than 2% of canola oil/olive oil blend, spices, lemon juice concentrate, salt, xanthan gum and guar gum blend); caramelized onions (onions, caramelized sugar, canola oil/olive oil blend, spice, modified food starch, salt). contains: wheat, eggs. |
# coding=utf-8
"""
Every project needs its trash heap of miscellaneous functions and classes
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import codecs
import copy
import csv
import fnmatch
import ipaddress
import itertools
import json
import locale
import logging
import mimetypes
import operator
import os
import platform
import random
import re
import shlex
import shutil
import signal
import subprocess
import sys
import tarfile
import tempfile
import time
import traceback
import webbrowser
import zipfile
from abc import abstractmethod
from collections import defaultdict, Counter
from contextlib import contextmanager
from distutils.version import LooseVersion
from subprocess import CalledProcessError, PIPE, check_output, STDOUT
from webbrowser import GenericBrowser
import math
import psutil
import requests
import requests.adapters
from io import IOBase
from lxml import etree
from progressbar import ProgressBar, Percentage, Bar, ETA
from urllib import parse
from urllib.request import url2pathname
from urwid import BaseScreen
from bzt import TaurusInternalException, TaurusNetworkError, ToolError, TaurusConfigError
LOG = logging.getLogger("")
CALL_PROBLEMS = (CalledProcessError, OSError)
numeric_types = (int, float, complex)
viewvalues = operator.methodcaller("values")
def unicode_decode(string, errors="strict"):
if isinstance(string, bytes):
return string.decode("utf-8", errors)
else:
return string
def communicate(proc): # todo: replace usage of it with sync_run()
out, err = proc.communicate()
out = unicode_decode(out, errors="ignore")
err = unicode_decode(err, errors="ignore")
return out, err
def iteritems(dictionary, **kw):
return iter(dictionary.items(**kw))
def b(string):
return string.encode("latin-1")
def get_stacktrace(exc):
return ''.join(traceback.format_tb(exc.__traceback__)).rstrip()
def reraise(exc_info, exc=None):
_type, message, stacktrace = exc_info
if exc is None:
exc = _type(message)
exc.__traceback__ = stacktrace
raise exc
def stream_decode(string):
if not isinstance(string, str):
return string.decode()
else:
return string
def sync_run(args, env=None):
output = check_output(args, env=env, stderr=STDOUT)
return stream_decode(output).rstrip()
def temp_file(suffix="", prefix="tmp", dir=None):
""" Creates temporary file, returns name of it. User is responsible for deleting the file """
fd, fname = tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir)
os.close(fd)
return fname
def simple_body_dict(dic):
""" body dict must have just one level for sending with form params"""
if isinstance(dic, dict):
for key in dic:
if not isinstance(dic[key], (str, numeric_types)):
return False
return True
return False
def get_full_path(path, default=None, step_up=0):
"""
Function expands '~' and adds cwd to path if it's not absolute (relative)
Target doesn't have to exist
:param path:
:param default:
:param step_up:
:return:
"""
if not path:
return default
res = os.path.abspath(os.path.expanduser(path))
for _ in range(step_up):
res = os.path.dirname(res)
return res
BZT_DIR = get_full_path(__file__, step_up=1)
RESOURCES_DIR = os.path.join(BZT_DIR, "resources")
def get_files_recursive(dir_name, exclude_mask=''):
for root, _, files in os.walk(dir_name):
for _file in files:
if not fnmatch.fnmatch(_file, exclude_mask):
yield os.path.join(root, _file)
def parse_java_version(versions):
if versions:
version = versions[0]
if LooseVersion(version) > LooseVersion("6"): # start of openjdk naming
major = re.findall("^([\d]*)", version)
else:
major = re.findall("\.([\d]*)", version)
if major:
return major[0]
def run_once(func):
"""
A decorator to run function only once
:type func: __builtin__.function
:return:
"""
def wrapper(*args, **kwargs):
"""
:param kwargs:
:param args:
"""
if not wrapper.has_run:
wrapper.has_run = True
return func(*args, **kwargs)
wrapper.has_run = False
return wrapper
def replace_in_config(config, samples, substitutes, log=None):
def file_replacer(value, key, container):
if value in samples:
container[key] = substitutes[samples.index(value)]
if container[key] != value and log:
log.debug("Replaced %s with %s", value, container[key])
BetterDict.traverse(config, file_replacer)
def dehumanize_time(str_time):
"""
Convert value like 1d4h33m12s103ms into seconds
Also, incidentally translates strings like "inf" into float("inf")
:param str_time: string to convert
:return: float value in seconds
:raise TaurusInternalException: in case of unsupported unit
"""
if not str_time:
return 0
parser = re.compile(r'([\d\.\-infa]+)([a-zA-Z]*)')
parts = parser.findall(str(str_time).replace(' ', ''))
if len(parts) == 0:
msg = "String format not supported: %s"
raise TaurusInternalException(msg % str_time)
result = 0.0
for value, unit in parts:
try:
value = float(value)
except ValueError:
raise TaurusInternalException("Unsupported float string: %r" % value)
unit = unit.lower()
if unit == 'ms':
result += value / 1000.0
continue
elif unit == 's' or unit == '':
result += value
continue
elif unit == 'm':
result += value * 60
continue
elif unit == 'h':
result += value * 60 * 60
continue
elif unit == 'd':
result += value * 60 * 60 * 24
continue
else:
msg = "String contains unsupported unit %s: %s"
raise TaurusInternalException(msg % (unit, str_time))
return result
def get_bytes_count(str_bytes):
if not str_bytes:
return 0
parser = re.compile(r'([\d\.]+)([a-zA-Z]*)')
parts = parser.findall(str(str_bytes).replace(' ', ''))
if len(parts) != 1:
msg = "String format not supported: %s"
raise TaurusConfigError(msg % str_bytes)
value, unit = parts[0]
try:
value = float(value)
except ValueError:
raise TaurusConfigError("Unsupported float string: %r" % value)
unit = unit.lower()
if unit in ('', 'b'):
result = value
elif unit in ('k', 'kb', 'kib'):
result = value * 1024
elif unit in ('m', 'mb', 'mib'):
result = value * 1024 * 1024
else:
msg = "String contains unsupported unit %s: %s"
raise TaurusConfigError(msg % (unit, str_bytes))
return result
class BetterDict(defaultdict):
"""
Wrapper for defaultdict that able to deep merge other dicts into itself
"""
@classmethod
def from_dict(cls, orig):
"""
# https://stackoverflow.com/questions/50013768/how-can-i-convert-nested-dictionary-to-defaultdict/50013806
"""
if isinstance(orig, dict):
return cls(lambda: None, {k: cls.from_dict(v) for k, v in orig.items()})
elif isinstance(orig, list):
return [cls.from_dict(e) for e in orig]
else:
return orig
def get(self, key, default=defaultdict, force_set=False):
"""
Change get with setdefault
:param force_set:
:type key: object
:type default: object
"""
if default == defaultdict:
default = BetterDict()
if isinstance(default, BaseException) and key not in self:
raise default
if force_set:
value = self.setdefault(key, default)
else:
value = defaultdict.get(self, key, default)
return value
def merge(self, src):
"""
Deep merge other dict into current
:type src: dict
"""
if not isinstance(src, dict):
raise TaurusInternalException("Loaded object is not dict [%s]: %s" % (src.__class__, src))
for key, val in iteritems(src):
prefix = ""
if key[0] in ("^", "~", "$"): # modificator found
prefix = key[0]
key = key[1:]
if prefix == "^": # eliminate flag
# TODO: improve logic - use val contents to see what to eliminate
if key in self:
self.pop(key)
continue
elif prefix == "~": # overwrite flag
if key in self:
self.pop(key)
if isinstance(val, dict):
self.__add_dict(key, val)
elif isinstance(val, list):
self.__add_list(key, val, merge_list_items=(prefix == "$"))
else:
self[key] = val
return self
def __add_dict(self, key, val):
dst = self.get(key, force_set=True)
if isinstance(dst, BetterDict):
dst.merge(val)
elif isinstance(dst, Counter):
self[key] += val
elif isinstance(dst, dict):
raise TaurusInternalException("Mix of DictOfDict and dict is forbidden")
else:
self[key] = BetterDict.from_dict(val)
def __add_list(self, key, val, merge_list_items):
self.__ensure_list_type(val)
if key not in self:
self[key] = []
if not isinstance(self[key], list):
self[key] = val
return
if merge_list_items:
left = self[key]
right = val
for index, righty in enumerate(right):
if index < len(left):
lefty = left[index]
if isinstance(lefty, BetterDict) and isinstance(righty, BetterDict):
lefty.merge(righty)
else:
# todo: should we log all overwriting cases?
LOG.warning("Overwriting the value of %r when merging configs", key)
left[index] = righty
else:
left.insert(index, righty)
else:
self[key].extend(val)
def __ensure_list_type(self, values):
"""
Ensure that values is a list, convert if needed
:param values: dict or list
:return:
"""
for idx, obj in enumerate(values):
if isinstance(obj, dict):
values[idx] = BetterDict.from_dict(obj)
elif isinstance(obj, list):
self.__ensure_list_type(obj)
@classmethod
def traverse(cls, obj, visitor):
"""
Deep traverse dict with visitor. If visitor returns any value, don't traverse into
:type obj: list or dict or object
:type visitor: callable
"""
if isinstance(obj, dict):
for key, val in iteritems(obj):
if not visitor(val, key, obj):
cls.traverse(obj[key], visitor)
elif isinstance(obj, list):
for idx, val in enumerate(obj):
if not visitor(val, idx, obj):
cls.traverse(obj[idx], visitor)
def filter(self, rules, black_list=False):
keys = set(self.keys())
for key in keys:
ikey = "!" + key
if (key in rules) or (ikey in rules): # we have rule for this key
current_black_list = black_list if key in rules else not black_list
rkey = key if key in rules else ikey
if isinstance(rules.get(rkey), dict):
if isinstance(self.get(key), BetterDict): # need to go deeper
self.get(key).filter(rules[rkey], black_list=current_black_list)
elif not current_black_list:
del self[key]
elif current_black_list:
del self[key] # must be blacklisted
elif not black_list:
del self[key] # remove unknown key
current = self.get(key, None)
if isinstance(current, (dict, list)) and not current:
del self[key] # clean empty
def __repr__(self):
return dict(self).__repr__()
def get_uniq_name(directory, prefix, suffix="", forbidden_names=()):
base = os.path.join(directory, prefix)
diff = ""
num = 0
while os.path.exists(base + diff + suffix) or base + diff + suffix in forbidden_names:
num += 1
diff = "-%s" % num
return base + diff + suffix
class TaurusCalledProcessError(CalledProcessError):
def __init__(self, *args, **kwargs):
""" join output and stderr for compatibility """
output = ""
if "output" in kwargs:
output += u"\n>>> {out_start} >>>\n{out}\n<<< {out_end} <<<\n".format(
out_start="START OF STDOUT", out=kwargs["output"], out_end="END OF STDOUT")
if "stderr" in kwargs:
output += u"\n>>> {err_start} >>>\n{err}\n<<< {err_end} <<<\n".format(
err_start="START OF STDERR", err=kwargs.pop("stderr"), err_end="END OF STDERR")
if output:
kwargs["output"] = output
super(TaurusCalledProcessError, self).__init__(*args, **kwargs)
def __str__(self):
base_str = super(TaurusCalledProcessError, self).__str__()
if self.output:
base_str += '\n' + self.output
return base_str
def exec_and_communicate(*args, **kwargs):
process = shell_exec(*args, **kwargs)
out, err = communicate(process)
if process.returncode != 0:
raise TaurusCalledProcessError(process.returncode, cmd=args[0], output=out, stderr=err)
return out, err
def shell_exec(args, cwd=None, stdout=PIPE, stderr=PIPE, stdin=PIPE, shell=False, env=None, pgrp=True):
"""
Wrapper for subprocess starting
"""
if stdout and not isinstance(stdout, (int, IOBase)):
LOG.warning("stdout is not IOBase: %s", stdout)
stdout = None
if stderr and not isinstance(stderr, (int, IOBase)):
LOG.warning("stderr is not IOBase: %s", stderr)
stderr = None
if isinstance(args, str) and not shell:
args = shlex.split(args, posix=not is_windows())
LOG.debug("Executing shell: %s at %s", args, cwd or os.curdir)
kwargs = {
"stdout": stdout,
"stderr": stderr,
"stdin": stdin,
"bufsize": 0,
"cwd": cwd,
"shell": shell,
"env": env
}
if is_windows():
if pgrp:
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
return psutil.Popen(args, **kwargs)
else:
kwargs["close_fds"] = True
if pgrp:
kwargs["preexec_fn"] = os.setpgrp
return psutil.Popen(args, **kwargs)
# FIXME: shouldn't we bother closing opened descriptors?
class Environment(object):
def __init__(self, log=None, parent=None):
self.data = {}
self._queue = []
log = log or LOG
self.log = log.getChild(self.__class__.__name__)
if parent:
self._queue.extend(
[(self.__getattribute__(method), args, kwargs) for method, args, kwargs in parent.get_queue()])
def get_queue(self):
return [(method.__name__, args, kwargs) for method, args, kwargs in self._queue]
def set(self, *args, **kwargs):
self._add_to_queue(self._set, *args, **kwargs)
def add_path(self, *args, **kwargs):
self._add_to_queue(self._add_path, *args, **kwargs)
def add_java_param(self, *args, **kwargs):
self._add_to_queue(self._add_java_param, *args, **kwargs)
def update(self, *args, **kwargs):
self._add_to_queue(self._update, *args, **kwargs)
def _add_to_queue(self, *args, **kwargs):
self._queue.append((args[0], args[1:], kwargs))
def _set(self, env):
"""
:type env: dict
"""
for key in env:
key = str(key)
val = env[key]
if is_windows():
key = key.upper()
if key in self.data:
if val is None:
self.log.debug("Remove '%s' from environment", key)
self.data.pop(key)
else:
self.log.debug("Replace '%s' in environment", key)
self.data[key] = str(val)
else:
self._add({key: val}, '', finish=False)
def _add_path(self, pair, finish=False):
self._add(pair, os.pathsep, finish)
def _add_java_param(self, pair, finish=False):
self._add(pair, " ", finish)
def _update(self, env): # compatibility with taurus-server
self.set(env)
def _add(self, pair, separator, finish):
for key in pair:
val = pair[key]
key = str(key)
if is_windows():
key = key.upper()
if val is None:
self.log.debug("Skip empty variable '%s'", key)
return
val = str(val)
if key in self.data:
if finish:
self.data[key] += separator + val # add to the end
else:
self.data[key] = val + separator + self.data[key] # add to the beginning
else:
self.data[key] = str(val)
def get(self, key=None):
self._apply_queue()
if key:
key = str(key)
if is_windows():
key = key.upper()
return self.data.get(key, None)
else:
# full environment
return copy.deepcopy(self.data)
def _apply_queue(self):
self.data = {}
self._set(os.environ)
for method, args, kwargs in self._queue:
method(*args, **kwargs)
class FileReader(object):
SYS_ENCODING = locale.getpreferredencoding()
def __init__(self, filename="", file_opener=None, parent_logger=None):
self.fds = None
if parent_logger:
self.log = parent_logger.getChild(self.__class__.__name__)
else:
self.log = logging.getLogger(self.__class__.__name__)
if file_opener:
self.file_opener = file_opener # external method for opening of file
else:
self.file_opener = lambda f: open(f, mode='rb') # default mode is binary
# for non-trivial openers filename must be empty (more complicate than just open())
# it turns all regular file checks off, see is_ready()
self.name = filename
self.cp = 'utf-8' # default code page is utf-8
self.decoder = codecs.lookup(self.cp).incrementaldecoder()
self.fallback_decoder = codecs.lookup(self.SYS_ENCODING).incrementaldecoder(errors='ignore')
self.offset = 0
def _readlines(self, hint=None):
# get generator instead of list (in regular readlines())
length = 0
for line in self.fds:
yield line
if hint and hint > 0:
length += len(line)
if length >= hint:
return
def is_ready(self):
if not self.fds:
if self.name:
if not os.path.isfile(self.name):
self.log.debug("File not appeared yet: %s", self.name)
return False
if not os.path.getsize(self.name):
self.log.debug("File is empty: %s", self.name)
return False
self.log.debug("Opening file: %s", self.name)
# call opener regardless of the name value as it can use empty name as flag
self.fds = self.file_opener(self.name)
if self.fds:
self.name = self.fds.name
return True
def _decode(self, line, last_pass=False):
try:
return self.decoder.decode(line, final=last_pass)
except UnicodeDecodeError:
self.log.warning("Content encoding of '%s' doesn't match %s", self.name, self.cp)
self.cp = self.SYS_ENCODING
self.decoder = self.fallback_decoder
self.decoder.reset()
self.log.warning("Proposed code page: %s", self.cp)
return self.decoder.decode(line, final=last_pass)
def get_lines(self, size=-1, last_pass=False):
if self.is_ready():
if last_pass:
size = -1
self.fds.seek(self.offset)
for line in self._readlines(hint=size):
self.offset += len(line)
yield self._decode(line, last_pass)
def get_line(self):
line = ""
if self.is_ready():
self.fds.seek(self.offset)
line = self.fds.readline()
self.offset += len(line)
return self._decode(line)
def get_bytes(self, size=-1, last_pass=False, decode=True):
if self.is_ready():
if last_pass:
size = -1
self.fds.seek(self.offset)
_bytes = self.fds.read(size)
self.offset += len(_bytes)
if decode:
return self._decode(_bytes, last_pass)
else:
return _bytes
def __del__(self):
self.close()
def close(self):
if self.fds:
self.fds.close()
def ensure_is_dict(container, key, sub_key):
"""
Ensure that dict item is dict, convert if needed
:type container: dict or list
:type key: basestring or int
:type sub_key: basestring
:return:
"""
if isinstance(container, BetterDict):
container.get(key, force_set=True)
elif isinstance(container, dict): # todo: remove after fixing merge
container[key] = BetterDict()
if not isinstance(container[key], dict): # todo: replace dict with BetterDict after fixing merge
container[key] = BetterDict.from_dict({sub_key: container[key]})
return container[key]
class MultiPartForm(object):
"""
Accumulate the data to be used when posting a form.
http://blog.doughellmann.com/2009/07/
pymotw-urllib2-library-for-opening-urls.html
:type form_fields: list[str,str]
"""
def __init__(self):
self.form_fields = []
self.files = []
self.boundary = make_boundary()
def get_content_type(self):
""" returns content type """
return 'multipart/form-data; boundary=%s' % self.boundary
def add_field(self, name, value):
"""
Add a simple field to the form data.
:type name: str
:type value: str
"""
self.form_fields.append((name, value))
def add_file_as_string(self, fieldname, filename, body, mimetype=None):
""" add raw string file
:type fieldname: str
:type filename: str
:type body: str | bytes
:type mimetype: str
"""
default = 'application/octet-stream'
if mimetype is None:
mimetype = mimetypes.guess_type(filename)[0] or default
self.files.append((fieldname, filename, mimetype, body))
def add_file(self, fieldname, filename, file_handle=None, mimetype=None):
"""Add a file to be uploaded.
:type mimetype: str
:type file_handle: file
:type filename: str
:type fieldname: str
"""
if not file_handle:
with open(filename, 'rb') as fds:
body = fds.read()
filename = os.path.basename(filename)
else:
body = file_handle.read()
self.add_file_as_string(fieldname, filename, body, mimetype)
def __convert_to_list(self):
"""Return a string representing the form, including attached files."""
# Build a list of lists, each containing "lines" of the
# request. Each part is separated by a boundary string.
# Once the list is built, return a string where each
# line is separated by '\r\n'.
parts = []
part_boundary = '--' + self.boundary
# Add the form fields
parts.extend(
[part_boundary, 'Content-Disposition: form-data; name="%s"' % name, '', value, ]
for name, value in self.form_fields
)
# Add the files to upload
parts.extend(
[part_boundary,
'Content-Disposition: file; name="%s"; filename="%s"' % (field_name, filename),
'Content-Type: %s' % content_type, '', body]
for field_name, filename, content_type, body in self.files
)
# Flatten the list and add closing boundary marker,
# then return CR+LF separated data
flattened = list(itertools.chain(*parts))
flattened.append('--' + self.boundary + '--')
return flattened
def form_as_bytes(self):
"""
represents form contents as bytes
"""
result_list = []
for item in self.__convert_to_list():
# if (bytes (3.x), then no processing, just add, else - encode)
if isinstance(item, bytes):
result_list.append(item)
elif isinstance(item, str):
result_list.append(item.encode())
else:
raise TaurusInternalException("Unhandled form data type: %s" % type(item))
res_bytes = b("\r\n").join(result_list)
res_bytes += b("\r\n")
return res_bytes
def to_json(obj, indent=True):
"""
Convert object into indented json
:param indent: whether to generate indented JSON
:param obj: object to convert
:return:
"""
# NOTE: you can set allow_nan=False to fail when serializing NaN/Infinity
return json.dumps(obj, indent=indent, cls=ComplexEncoder)
class JSONDumpable(object):
"""
Marker class for json dumpable classes
"""
pass
class JSONConvertible(object):
@abstractmethod
def __json__(self):
"Convert class instance into JSON-dumpable structure (e.g. dict)"
pass
class ComplexEncoder(json.JSONEncoder):
"""
Magic class to help serialize in JSON any object.
"""
# todo: should we add complex type?
TYPES = (dict, list, tuple, str, int, float, bool, type(None))
def default(self, obj): # pylint: disable=method-hidden
"""
Filters out protected and private fields
:param obj:
:return:
"""
if self.__dumpable(obj):
res = {}
for key, val in iteritems(obj.__dict__):
if not self.__dumpable(val):
# logging.debug("Filtered out: %s.%s", key, val)
pass
elif key.startswith('_'):
# logging.debug("Filtered out: %s", key)
pass
else:
res[key] = val
return res
elif ComplexEncoder.__convertible(obj):
return obj.__json__()
else:
return None
@classmethod
def __dumpable(cls, obj):
"""
Re
:param obj:
:rtype: bool
"""
dumpable_types = tuple(cls.TYPES + (JSONDumpable,))
return isinstance(obj, dumpable_types)
@staticmethod
def __convertible(obj):
return isinstance(obj, JSONConvertible)
@classmethod
def of_basic_type(cls, val):
"""
Returns true if val is of basic type
:param val:
:return:
"""
return isinstance(val, cls.TYPES)
def humanize_time(secs):
"""
taken from http://testingreflections.com/node/6534
:param secs:
:return:
"""
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
return '%02d:%02d:%02d' % (hours, mins, secs)
def guess_csv_dialect(header, force_doublequote=False):
""" completely arbitrary fn to detect the delimiter
:param force_doublequote: bool
:type header: str
:rtype: csv.Dialect
"""
possible_delims = ",;\t"
dialect = csv.Sniffer().sniff(header, delimiters=possible_delims)
if force_doublequote:
dialect.doublequote = True
return dialect
def load_class(full_name):
"""
Load class by its full name like bzt.cli.CLI
:type full_name: str
:return:
:rtype: callable
"""
module_name = full_name[:full_name.rfind('.')]
class_name = full_name[full_name.rfind('.') + 1:]
LOG.debug("Importing module: %s", module_name)
module = __import__(module_name)
for mod in module_name.split('.')[1:]:
module = getattr(module, mod)
LOG.debug("Loading class: '%s' from %s", class_name, module)
return getattr(module, class_name)
def unzip(source_filename, dest_dir, rel_path=None):
"""
:param source_filename:
:param dest_dir:
:param rel_path:
:return:
"""
LOG.debug("Extracting %s to %s", source_filename, dest_dir)
with zipfile.ZipFile(source_filename) as zfd:
for member in zfd.infolist():
if rel_path:
if not member.filename.startswith(rel_path):
continue
else:
member.filename = member.filename[len(rel_path) + 1:]
if not member.filename:
continue
# Path traversal defense copied from
# http://hg.python.org/cpython/file/tip/Lib/http/server.py#l789
LOG.debug("Writing %s%s%s", dest_dir, os.path.sep, member.filename)
zfd.extract(member, dest_dir)
def untar(source_filename, dest_dir, rel_path=None):
with tarfile.open(source_filename, "r|*") as tar:
for member in tar:
if member.isfile():
if member.name is None:
continue
if rel_path is not None and not member.name.startswith(rel_path):
continue
filename = os.path.basename(member.name)
destination = os.path.join(dest_dir, filename)
with open(destination, "wb") as output:
shutil.copyfileobj(tar.extractfile(member), output, member.size)
def make_boundary(text=None):
"""
Generate boundary id
:param text:
:return:
"""
_width = len(repr(sys.maxsize - 1))
_fmt = '%%0%dd' % _width
token = random.randrange(sys.maxsize)
boundary = ('=' * 15) + (_fmt % token) + '=='
if text is None:
return boundary
bnd = boundary
counter = 0
while True:
cre = re.compile(r'^--' + re.escape(bnd) + '(--)?$', re.MULTILINE)
if not cre.search(text):
break
bnd = boundary + '.' + str(counter)
counter += 1
return bnd
def is_int(str_val):
"""
Check if str_val is int type
:param str_val: str
:return: bool
"""
if str_val.startswith('-') and str_val[1:].isdigit():
return True
elif str_val.isdigit():
return True
else:
return False
def shutdown_process(process_obj, log_obj):
count = 60
while process_obj and process_obj.poll() is None:
time.sleep(1)
count -= 1
kill_signal = signal.SIGTERM if count > 0 else signal.SIGKILL
log_obj.info("Terminating process PID %s with signal %s (%s tries left)", process_obj.pid, kill_signal, count)
try:
if is_windows():
cur_pids = psutil.pids()
if process_obj.pid in cur_pids:
jm_proc = psutil.Process(process_obj.pid)
for child_proc in jm_proc.children(recursive=True):
log_obj.debug("Terminating child process: %d", child_proc.pid)
child_proc.send_signal(kill_signal)
os.kill(process_obj.pid, kill_signal)
else:
os.killpg(process_obj.pid, kill_signal)
except OSError as exc:
log_obj.debug("Failed to terminate process: %s", exc)
class LocalFileAdapter(requests.adapters.BaseAdapter):
"""
Protocol Adapter to allow HTTPClient to GET file:// URLs
"""
@staticmethod
def _chkpath(method, path):
"""Return an HTTP status for the given filesystem path."""
if method.lower() in ('put', 'delete'):
return 501, "Not Implemented" # TODO
elif method.lower() not in ('get', 'head'):
return 405, "Method Not Allowed"
elif os.path.isdir(path):
return 400, "Path Not A File"
elif not os.path.isfile(path):
return 404, "File Not Found"
elif not os.access(path, os.R_OK):
return 403, "Access Denied"
else:
return 200, "OK"
def send(self, req, **kwargs): # pylint: disable=unused-argument
"""Return the file specified by the given request
"""
path = os.path.normcase(os.path.normpath(url2pathname(req.path_url)))
response = requests.Response()
response.status_code, response.reason = self._chkpath(req.method, path)
if response.status_code == 200 and req.method.lower() != 'head':
try:
response.raw = open(path, 'rb')
except (OSError, IOError) as err:
response.status_code = 500
response.reason = str(err)
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
response.request = req
response.connection = self
return response
def close(self):
pass
class HTTPClient(object):
def __init__(self):
self.session = requests.Session()
self.session.mount('file://', LocalFileAdapter())
self.log = logging.getLogger(self.__class__.__name__)
self.proxy_settings = None
def add_proxy_settings(self, proxy_settings):
if proxy_settings and proxy_settings.get("address"):
self.proxy_settings = proxy_settings
proxy_addr = proxy_settings.get("address")
self.log.info("Using proxy %r", proxy_addr)
proxy_url = parse.urlsplit(proxy_addr)
self.log.debug("Using proxy settings: %s", proxy_url)
username = proxy_settings.get("username")
pwd = proxy_settings.get("password")
scheme = proxy_url.scheme if proxy_url.scheme else 'http'
if username and pwd:
proxy_uri = "%s://%s:%s@%s" % (scheme, username, pwd, proxy_url.netloc)
else:
proxy_uri = "%s://%s" % (scheme, proxy_url.netloc)
self.session.proxies = {"https": proxy_uri, "http": proxy_uri}
self.session.verify = proxy_settings.get('ssl-cert', True)
self.session.cert = proxy_settings.get('ssl-client-cert', None)
def get_proxy_props(self):
props = {}
if not self.proxy_settings or not self.proxy_settings.get("address"):
return props
proxy_url = parse.urlsplit(self.proxy_settings.get("address"))
username = self.proxy_settings.get("username")
pwd = self.proxy_settings.get("password")
for protocol in ["http", "https"]:
props[protocol + '.proxyHost'] = proxy_url.hostname
props[protocol + '.proxyPort'] = proxy_url.port or 80
if username and pwd:
props[protocol + '.proxyUser'] = username
props[protocol + '.proxyPass'] = pwd
return props
@staticmethod
def _save_file_from_connection(conn, filename, reporthook=None):
if not conn.ok:
raise TaurusNetworkError("Connection failed, status code %s" % conn.status_code)
total = int(conn.headers.get('content-length', 0))
block_size = 1024
count = 0
with open(filename, 'wb') as f:
for chunk in conn.iter_content(chunk_size=block_size):
if chunk:
f.write(chunk)
count += 1
if reporthook:
reporthook(count, block_size, total)
def download_file(self, url, filename, reporthook=None, data=None, timeout=None):
headers = None
try:
with self.session.get(url, stream=True, data=data, timeout=timeout) as conn:
self._save_file_from_connection(conn, filename, reporthook=reporthook)
headers = conn.headers
except requests.exceptions.RequestException as exc:
resp = exc.response
self.log.debug("File download resulted in exception: %s", traceback.format_exc())
msg = "Unsuccessful download from %s" % url
if resp is not None:
msg += ": %s - %s" % (resp.status_code, resp.reason)
raise TaurusNetworkError(msg)
except BaseException:
self.log.debug("File download resulted in exception: %s", traceback.format_exc())
raise TaurusNetworkError("Unsuccessful download from %s" % url)
return filename, headers
def request(self, method, url, *args, **kwargs):
self.log.debug('Making HTTP request %s %s', method, url)
try:
return self.session.request(method, url, *args, **kwargs)
except requests.exceptions.RequestException as exc:
resp = exc.response
self.log.debug("Request resulted in exception: %s", traceback.format_exc())
msg = "Request to %s failed" % url
if resp is not None:
msg += ": %s - %s" % (resp.status_code, resp.reason)
raise TaurusNetworkError(msg)
class ExceptionalDownloader(object):
def __init__(self, http_client):
"""
:type http_client: HTTPClient
"""
super(ExceptionalDownloader, self).__init__()
self.http_client = http_client
def get(self, url, filename=None, reporthook=None, data=None, suffix="", timeout=5.0):
if os.getenv("TAURUS_DISABLE_DOWNLOADS", ""):
raise TaurusInternalException("Downloads are disabled by TAURUS_DISABLE_DOWNLOADS env var")
try:
if not filename:
filename = temp_file(suffix)
result = self.http_client.download_file(url, filename, reporthook=reporthook, data=data, timeout=timeout)
except BaseException:
os.remove(filename)
raise
return result
class RequiredTool(object):
"""
Abstract required tool
"""
def __init__(self, log=None, tool_path="", download_link="", http_client=None,
env=None, version=None, installable=True, mandatory=True):
self.http_client = http_client
self.tool_path = os.path.expanduser(tool_path)
self.download_link = download_link
self.mirror_manager = None
self.mandatory = mandatory
self.version = None
if version is not None:
self.version = str(version)
self.installable = installable
self.tool_name = self.__class__.__name__
# for browsermobproxy compatability, remove it later
if not isinstance(log, logging.Logger):
log = None
log = log or LOG
self.log = log.getChild(self.tool_name)
self.env = env or Environment(self.log)
def _get_version(self, output):
return
def call(self, *args, **kwargs):
mixed_env = self.env.get()
mixed_env.update(kwargs.get("env", {}))
kwargs["env"] = mixed_env
return exec_and_communicate(*args, **kwargs)
def check_if_installed(self):
if os.path.exists(self.tool_path):
return True
self.log.debug("File not exists: %s", self.tool_path)
return False
def install(self):
if not self.installable:
msg = "%s isn't found, automatic installation isn't implemented" % self.tool_name
if self.mandatory:
raise ToolError(msg)
else:
self.log.warning(msg)
return
with ProgressBarContext() as pbar:
if not os.path.exists(os.path.dirname(self.tool_path)):
os.makedirs(os.path.dirname(self.tool_path))
downloader = ExceptionalDownloader(self.http_client)
self.log.info("Downloading %s", self.download_link)
downloader.get(self.download_link, self.tool_path, reporthook=pbar.download_callback)
if self.check_if_installed():
return self.tool_path
else:
raise ToolError("Unable to run %s after installation!" % self.tool_name)
def _download(self, suffix=".zip", use_link=False):
if use_link:
links = [self.download_link]
else:
links = self.mirror_manager.mirrors()
downloader = ExceptionalDownloader(self.http_client)
for link in links:
self.log.info("Downloading: %s", link)
with ProgressBarContext() as pbar:
try:
return downloader.get(link, reporthook=pbar.download_callback, suffix=suffix)[0]
except KeyboardInterrupt:
raise
except BaseException as exc:
self.log.error("Error while downloading %s: %s" % (link, exc))
raise TaurusInternalException("%s download failed: No more links to try" % self.tool_name)
class JavaVM(RequiredTool):
def __init__(self, **kwargs):
if "mandatory" not in kwargs:
kwargs["mandatory"] = False
super(JavaVM, self).__init__(installable=False, tool_path="java", **kwargs)
def _get_version(self, output):
versions = re.findall("version\ \"([_\d\.]*)", output)
version = parse_java_version(versions)
if not version:
self.log.warning("Tool version parsing error: %s", output)
return version
def check_if_installed(self):
cmd = [self.tool_path, '-version']
self.log.debug("Trying %s: %s", self.tool_name, cmd)
try:
out, err = self.call(cmd)
except CALL_PROBLEMS as exc:
self.log.debug("Failed to check %s: %s", self.tool_name, exc)
return False
self.version = self._get_version(err)
if err:
out += err
self.log.debug("%s output: %s", self.tool_name, out)
return True
class ProgressBarContext(ProgressBar):
def __init__(self, maxval=0):
widgets = [Percentage(), ' ', Bar(marker='=', left='[', right=']'), ' ', ETA()]
super(ProgressBarContext, self).__init__(widgets=widgets, maxval=maxval, fd=sys.stdout)
def __enter__(self):
if not sys.stdout.isatty():
LOG.debug("No progressbar for non-tty output: %s", sys.stdout)
self.start()
return self
def update(self, value=None):
if sys.stdout.isatty():
super(ProgressBarContext, self).update(value)
def __exit__(self, exc_type, exc_val, exc_tb):
del exc_type, exc_val, exc_tb
if sys.stdout.isatty():
self.finish()
def download_callback(self, block_count, blocksize, totalsize):
if totalsize > 0:
self.maxval = totalsize
progress = block_count * blocksize
self.update(progress if progress <= totalsize else totalsize)
class IncrementableProgressBar(ProgressBarContext):
def __init__(self, maxval):
super(IncrementableProgressBar, self).__init__(maxval=maxval)
def increment(self):
incremented = self.currval + 1
if incremented < self.maxval:
super(IncrementableProgressBar, self).update(incremented)
def catchup(self, started_time=None, current_value=None):
super(IncrementableProgressBar, self).start()
if started_time:
self.start_time = started_time
if current_value and current_value < self.maxval:
self.update(current_value)
class TclLibrary(RequiredTool):
ENV_NAME = "TCL_LIBRARY"
INIT_TCL = "init.tcl"
FOLDER = "tcl"
def check_if_installed(self):
"""
Check if tcl is available
:return:
"""
if is_windows():
self.log.debug("Checking if %s variable is present in environment", TclLibrary.ENV_NAME)
if not os.environ.get(TclLibrary.ENV_NAME, None):
self.log.debug("%s environment variable is not present", TclLibrary.ENV_NAME)
return False
else:
self.log.debug("%s environment variable is present", TclLibrary.ENV_NAME)
return True
else:
self.log.debug("We don't need to check tcl library on this platform")
return True
@staticmethod
def _find_tcl_dir():
lib_dirs = [os.path.dirname(_x) for _x in sys.path if _x.lower().endswith('lib')]
for lib_dir in lib_dirs:
base_dir = os.path.join(lib_dir, TclLibrary.FOLDER)
if os.path.exists(base_dir):
for root, _, files in os.walk(base_dir):
if TclLibrary.INIT_TCL in files:
return root
def _set_env_variable(self, value):
self.log.debug("Setting environment %s=%s", TclLibrary.ENV_NAME, value)
os.environ[TclLibrary.ENV_NAME] = value
def install(self):
"""
:return:
"""
tcl_dir = self._find_tcl_dir()
if tcl_dir:
self.log.debug("Tcl directory was found: %s", tcl_dir)
self._set_env_variable(tcl_dir)
if not self.check_if_installed():
self.log.warning("No Tcl library was found")
class Node(RequiredTool):
def __init__(self, **kwargs):
super(Node, self).__init__(installable=False, **kwargs)
def check_if_installed(self):
node_candidates = ["node", "nodejs"]
for candidate in node_candidates:
try:
self.log.debug("Trying '%r' as Node Tool...", candidate)
out, err = self.call([candidate, '--version'])
except CALL_PROBLEMS as exc:
self.log.debug("%r is not installed: %s", candidate, exc)
continue
if err:
out += err
self.log.debug("%s output: %s", candidate, out)
self.tool_path = candidate
return True
return False
class MirrorsManager(object):
def __init__(self, http_client, base_link, parent_logger):
"""
:type base_link: str
:type http_client: HTTPClient
"""
self.base_link = base_link
self.log = parent_logger.getChild(self.__class__.__name__)
self.http_client = http_client
self.page_source = None
@abstractmethod
def _parse_mirrors(self):
return []
def mirrors(self):
self.log.debug("Retrieving mirrors from page: %s", self.base_link)
downloader = ExceptionalDownloader(self.http_client)
try:
tmp_file = downloader.get(self.base_link)[0]
with open(tmp_file) as fds:
self.page_source = fds.read()
except BaseException:
self.log.debug("Exception: %s", traceback.format_exc())
self.log.error("Can't fetch %s", self.base_link)
return self._parse_mirrors()
@contextmanager
def log_std_streams(logger=None, stdout_level=logging.DEBUG, stderr_level=logging.DEBUG):
"""
redirect standard output/error to taurus logger
"""
out_descriptor = os.dup(1)
err_descriptor = os.dup(2)
stdout = tempfile.SpooledTemporaryFile(mode='w+')
stderr = tempfile.SpooledTemporaryFile(mode='w+')
sys.stdout = stdout
sys.stderr = stderr
os.dup2(stdout.fileno(), 1)
os.dup2(stderr.fileno(), 2)
try:
yield
finally:
stdout.seek(0)
stderr.seek(0)
stdout_str = stdout.read().strip()
stderr_str = stderr.read().strip()
stdout.close()
stderr.close()
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
os.dup2(out_descriptor, 1)
os.dup2(err_descriptor, 2)
os.close(out_descriptor)
os.close(err_descriptor)
if logger:
if stdout_str:
logger.log(stdout_level, "STDOUT: " + stdout_str)
if stderr_str:
logger.log(stderr_level, "STDERR: " + stderr_str)
def open_browser(url):
try:
browser = webbrowser.get()
if type(browser) != GenericBrowser: # pylint: disable=unidiomatic-typecheck
with log_std_streams(logger=LOG):
webbrowser.open(url)
except BaseException as exc:
LOG.warning("Can't open link in browser: %s", exc)
def is_windows():
return platform.system() == 'Windows'
def is_linux():
return 'linux' in sys.platform.lower()
def is_mac():
return 'darwin' in sys.platform.lower()
def platform_bitness():
return 64 if sys.maxsize > 2 ** 32 else 32
EXE_SUFFIX = ".bat" if is_windows() else ".sh"
class DummyScreen(BaseScreen):
"""
Null-object for Screen on non-tty output
"""
def __init__(self, rows=120, cols=40):
super(DummyScreen, self).__init__()
self.size = (rows, cols)
self.ansi_escape = re.compile(r'\x1b[^m]*m')
def get_cols_rows(self):
"""
Dummy cols and rows
:return:
"""
return self.size
def draw_screen(self, size, canvas):
"""
:param size:
:type canvas: urwid.Canvas
"""
data = ""
for char in canvas.content():
line = ""
for part in char:
if isinstance(part[2], str):
line += part[2]
else:
line += part[2].decode()
data += "%s│\n" % line
data = self.ansi_escape.sub('', data)
LOG.info("Screen %sx%s chars:\n%s", size[0], size[1], data)
def which(filename):
"""unix-style `which` implementation"""
locations = os.environ.get("PATH").split(os.pathsep)
candidates = []
for location in locations:
candidate = os.path.join(location, filename)
if os.path.isfile(candidate):
candidates.append(candidate)
return candidates
class PythonGenerator(object):
IMPORTS = ''
INDENT_STEP = 4
def __init__(self, scenario):
self.root = etree.Element("PythonCode")
self.tree = etree.ElementTree(self.root)
self.log = scenario.engine.log.getChild(self.__class__.__name__)
self.scenario = scenario
def add_imports(self):
imports = etree.Element("imports")
imports.text = self.IMPORTS
return imports
@abstractmethod
def build_source_code(self):
pass
@staticmethod
def gen_class_definition(class_name, inherits_from, indent=0):
def_tmpl = "class {class_name}({inherits_from}):"
class_def_element = etree.Element("class_definition", indent=str(indent))
class_def_element.text = def_tmpl.format(class_name=class_name, inherits_from="".join(inherits_from))
return class_def_element
@staticmethod
def gen_method_definition(method_name, params, indent=None):
if indent is None:
indent = PythonGenerator.INDENT_STEP
def_tmpl = "def {method_name}({params}):"
method_def_element = etree.Element("method_definition", indent=str(indent))
method_def_element.text = def_tmpl.format(method_name=method_name, params=",".join(params))
return method_def_element
@staticmethod
def gen_decorator_statement(decorator_name, indent=None):
if indent is None:
indent = PythonGenerator.INDENT_STEP
def_tmpl = "@{decorator_name}"
decorator_element = etree.Element("decorator_statement", indent=str(indent))
decorator_element.text = def_tmpl.format(decorator_name=decorator_name)
return decorator_element
@staticmethod
def gen_statement(statement, indent=None):
if indent is None:
indent = PythonGenerator.INDENT_STEP * 2
statement_elem = etree.Element("statement", indent=str(indent))
statement_elem.text = statement
return statement_elem
def gen_comment(self, comment, indent=None):
return self.gen_statement("# %s" % comment, indent=indent)
def save(self, filename):
with codecs.open(filename, 'w', encoding='utf-8') as fds:
for child in self.root.iter():
if child.text is not None:
indent = int(child.get('indent', "0"))
fds.write(" " * indent + child.text + "\n")
def gen_new_line(self, indent=0):
return self.gen_statement("", indent=indent)
def str_representer(dumper, data):
""" Representer for PyYAML that dumps multiline strings as | scalars """
if len(data.splitlines()) > 1:
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
return dumper.represent_scalar('tag:yaml.org,2002:str', data)
def humanize_bytes(byteval):
# from http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size/
# 25613067#25613067
_suffixes = [' ', 'K', 'M', 'G', 'T', 'P']
# determine binary order in steps of size 10
# (coerce to int, // still returns a float)
order = int(math.log(byteval, 2) / 10.0) if byteval else 0
# format file size
# (.4g results in rounded numbers for exact matches and max 3 decimals,
# should never resort to exponent values)
return '{:.4g}{}'.format(byteval / (1 << (order * 10)), _suffixes[order])
class LDJSONReader(object):
def __init__(self, filename, parent_log):
self.log = parent_log.getChild(self.__class__.__name__)
self.file = FileReader(filename=filename,
file_opener=lambda f: open(f, 'rb'),
parent_logger=self.log)
self.partial_buffer = ""
def read(self, last_pass=False):
lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)
for line in lines:
if not last_pass and not line.endswith("\n"):
self.partial_buffer += line
continue
line = "%s%s" % (self.partial_buffer, line)
self.partial_buffer = ""
yield json.loads(line)
def get_host_ips(filter_loopbacks=True):
"""
Returns a list of all IP addresses assigned to this host.
:param filter_loopbacks: filter out loopback addresses
"""
ips = []
for _, interfaces in iteritems(psutil.net_if_addrs()):
for iface in interfaces:
addr = str(iface.address)
try:
ip = ipaddress.ip_address(addr)
if filter_loopbacks and ip.is_loopback:
continue
except ValueError:
continue
ips.append(iface.address)
return ips
def is_url(url):
return parse.urlparse(url).scheme in ["https", "http"]
def guess_delimiter(path):
with open(path) as fhd:
header = fhd.read(4096) # 4KB is enough for header
try:
delimiter = guess_csv_dialect(header).delimiter
except BaseException as exc:
LOG.debug(traceback.format_exc())
LOG.warning('CSV dialect detection failed (%s), default delimiter selected (",")', exc)
delimiter = "," # default value
return delimiter
def get_assembled_value(configs, key, protect=False):
"""
Joins values from several configs, "the last is the most important" (strings, lists or dictionaries).
:param configs: list of dicts with target configs
:param key: name of target config
:param protect: use safely, make deepcopy
"""
target_configs = []
for config in configs:
target_config = config.get(key)
if target_config:
if protect:
target_config = copy.deepcopy(target_config)
target_configs.append(target_config)
if not target_configs:
return
res = target_configs.pop(0)
if all(isinstance(config, dict) for config in target_configs):
for config in target_configs:
res.merge(config)
elif all(isinstance(config, list) for config in target_configs):
for config in target_configs:
res.extend(config)
elif all(isinstance(config, (numeric_types, str)) for config in target_configs):
res = target_configs[-1]
else:
raise TaurusConfigError("Incorrect type of '%s' found." % key)
return res
def parse_think_time(think_time, full=False):
distributions = ["uniform", "gaussian", "poisson"]
format_str = "^(%s)\(([\wd.]+)[,\s]+([\wd.]+)\)$"
expr = re.compile(format_str % '|'.join(distributions), re.IGNORECASE)
res = expr.match(str(think_time))
if not res: # constant timer
return think_time
if not full:
return res.group(2).lower() # make it simple!
else:
return [res.group(i + 1).lower() for i in range(3)]
class SoapUIScriptConverter(object):
NAMESPACES = dict(con="http://eviware.com/soapui/config")
def __init__(self, parent_log):
self.log = parent_log.getChild(self.__class__.__name__)
self.tree = None
self.interface = None
def load(self, path):
try:
self.tree = etree.ElementTree()
self.tree.parse(path)
except BaseException as exc:
msg = "XML parsing failed for file %s: %s"
raise TaurusInternalException(msg % (path, exc))
def _extract_headers(self, config_elem):
headers_settings = config_elem.find(
'.//con:settings/con:setting[@id="com.eviware.soapui.impl.wsdl.WsdlRequest@request-headers"]',
namespaces=self.NAMESPACES)
if headers_settings is None:
return None
headers = etree.fromstring(headers_settings.text)
if "{" + self.NAMESPACES['con'] + "}" + "entry" == headers.tag:
entries = [headers]
else:
entries = headers.findall(".//con:entry", namespaces=self.NAMESPACES)
headers = {entry.get('key'): entry.get('value')
for entry in entries}
return headers
def _extract_assertions(self, config_elem):
assertions = []
assertion_tags = config_elem.findall('.//con:assertion', namespaces=self.NAMESPACES)
for assertion in assertion_tags:
# TODO: XPath assertions / JSONPath assertions ?
if assertion.get('type') in ('Simple Contains', 'Simple NotContains'):
subject = assertion.findtext('./con:configuration/token', namespaces=self.NAMESPACES)
use_regex = assertion.findtext('./con:configuration/useRegEx', namespaces=self.NAMESPACES)
negate = assertion.get('type') == 'Simple NotContains'
assertions.append({"contains": [subject],
"subject": "body",
"regexp": use_regex == "true",
"not": negate,
})
return assertions
def _extract_http_request(self, test_step):
config = test_step.find('./con:config', namespaces=self.NAMESPACES)
request = {
"label": test_step.get('name'),
"url": config.find('.//con:endpoint', namespaces=self.NAMESPACES).text}
method = config.get('method')
if method is not None and method != "GET":
request["method"] = method
headers = self._extract_headers(config)
assertions = self._extract_assertions(config)
if headers:
request["headers"] = headers
if assertions:
request["assert"] = assertions
body = config.findtext('./con:request', namespaces=self.NAMESPACES)
if body is not None:
request["body"] = body
params = config.findall('./con:parameters/con:parameter', namespaces=self.NAMESPACES)
if params:
body = {}
for param in params:
key = param.findtext("./con:name", namespaces=self.NAMESPACES)
value = param.findtext("./con:value", namespaces=self.NAMESPACES)
body[key] = value
request["body"] = body
return request
def _extract_soap_endpoint(self, interface_name, operation_name):
interface = self.tree.find("//con:interface[@name='%s']" % interface_name, namespaces=self.NAMESPACES)
if interface is None:
self.log.warning("Can't find intreface %s for operation %s, skipping", interface_name, operation_name)
return None
interface_endpoint = interface.findtext("./con:endpoints/con:endpoint", namespaces=self.NAMESPACES)
operation = interface.find(".//con:operation[@name='%s']" % operation_name, namespaces=self.NAMESPACES)
if operation is None:
self.log.warning("Can't find operation %s for interface %s, skipping", operation_name, interface_name)
return None
operation_endpoint = operation.findtext(".//con:endpoint", namespaces=self.NAMESPACES)
if operation_endpoint is not None:
return operation_endpoint
elif interface_endpoint is not None:
return interface_endpoint
else:
self.log.warning("Can't find endpoint for %s:%s", interface_name, operation_name)
return None
def _extract_soap_request(self, test_step):
label = test_step.get('name')
config = test_step.find('./con:config', namespaces=self.NAMESPACES)
body = config.findtext('./con:request/con:request', namespaces=self.NAMESPACES)
interface = config.findtext('./con:interface', namespaces=self.NAMESPACES)
operation = config.findtext('./con:operation', namespaces=self.NAMESPACES)
self.log.debug("Extracting SOAP request, interface=%r, operation=%r", interface, operation)
endpoint = self._extract_soap_endpoint(interface, operation)
if endpoint is None:
return
request = {
"url": endpoint,
"label": label,
"method": "POST",
"headers": {
"Content-Type": "text/xml; charset=utf-8",
}
}
if body:
request["body"] = body
return request
def _calc_base_address(self, test_step):
config = test_step.find('./con:config', namespaces=self.NAMESPACES)
service = config.get('service')
interfaces = self.tree.xpath('//con:interface', namespaces=self.NAMESPACES)
for interface in interfaces:
if interface.get("name") == service:
endpoint = interface.find('.//con:endpoints/con:endpoint', namespaces=self.NAMESPACES)
if endpoint is not None:
service = endpoint.text
break
return service
def _extract_rest_request(self, test_step):
config = test_step.find('./con:config', namespaces=self.NAMESPACES)
method = config.get('method')
params = self._parse_parent_resources(config)
url = self._calc_base_address(test_step) + config.get('resourcePath')
for param_name in copy.copy(list(params.keys())):
template = "{" + param_name + "}"
if template in url:
param_value = params.pop(param_name)
url = url.replace(template, param_value)
request = {"url": url, "label": test_step.get('name')}
if method is not None and method != "GET":
request["method"] = method
headers = self._extract_headers(config)
assertions = self._extract_assertions(config)
if headers:
request["headers"] = headers
if assertions:
request["assert"] = assertions
body = {}
for key, value in iteritems(params):
body[key] = value
if body:
request["body"] = body
return request
def _parse_parent_resources(self, config):
method_name = config.get('methodName')
for interface in self.interface:
method_obj = interface.find('.//con:method[@name="%s"]' % method_name, namespaces=self.NAMESPACES)
if method_obj is not None:
break
params = BetterDict()
if method_obj is not None:
parent = method_obj.getparent()
while parent.tag.endswith('resource'):
for param in parent.findall('./con:parameters/con:parameter', namespaces=self.NAMESPACES):
param_name = param.findtext('./con:name', namespaces=self.NAMESPACES)
param_value = param.findtext('./con:value', namespaces=self.NAMESPACES)
def_value = param.findtext('./con:default', namespaces=self.NAMESPACES)
if param_value:
params[param_name] = param_value
elif def_value:
params[param_name] = def_value
parent = parent.getparent()
for entry in config.findall('./con:restRequest/con:parameters/con:entry', namespaces=self.NAMESPACES):
params.merge({entry.get("key"): entry.get("value")})
return params
def _extract_properties(self, block, key_prefix=""):
properties = block.findall('./con:properties/con:property', namespaces=self.NAMESPACES)
prop_map = {}
for prop in properties:
key = key_prefix + prop.findtext('./con:name', namespaces=self.NAMESPACES)
value = prop.findtext('./con:value', namespaces=self.NAMESPACES)
prop_map[key] = value
return prop_map
def _extract_execution(self, test_case):
load_exec = {}
load_test = test_case.find('./con:loadTest', namespaces=self.NAMESPACES)
if load_test is not None:
load_exec['concurrency'] = int(load_test.find('./con:threadCount', self.NAMESPACES).text)
load_exec['hold-for'] = int(load_test.find('./con:testLimit', self.NAMESPACES).text)
else:
load_exec['concurrency'] = 1
return load_exec
def _validate_transfer(self, source_type, source_step_name, transfer_type, target_step_name):
source_step = self.tree.find("//con:testStep[@name='%s']" % source_step_name, namespaces=self.NAMESPACES)
if source_step is None:
self.log.warning("Can't find source step (%s) for Property Transfer. Skipping", source_step_name)
return False
source_step_type = source_step.get("type")
if source_step_type not in ["httprequest", "restrequest", "request"]:
self.log.warning("Unsupported source step type for Property Transfer (%s). Skipping", source_step_type)
return False
if source_type != "Response":
self.log.warning("Found Property Transfer with non-response source (%s). Skipping", source_type)
return False
if transfer_type not in ["JSONPATH", "XPATH"]:
self.log.warning("Found Property Transfer with unsupported type (%s). Skipping", transfer_type)
return False
target_step = self.tree.find("//con:testStep[@name='%s']" % target_step_name, namespaces=self.NAMESPACES)
if target_step is None:
self.log.warning("Can't find target step (%s) for Property Transfer. Skipping", target_step_name)
return False
target_step_type = target_step.get("type")
if target_step_type != "properties":
self.log.warning("Unsupported target step type for Property Transfer (%s). Skipping", target_step_type)
return False
return True
def _extract_transfer(self, transfer):
source_type = transfer.findtext('./con:sourceType', namespaces=self.NAMESPACES)
source_step_name = transfer.findtext('./con:sourceStep', namespaces=self.NAMESPACES)
query = transfer.findtext('./con:sourcePath', namespaces=self.NAMESPACES)
transfer_type = transfer.findtext('./con:type', namespaces=self.NAMESPACES)
target_step_name = transfer.findtext('./con:targetStep', namespaces=self.NAMESPACES)
target_prop = transfer.findtext('./con:targetType', namespaces=self.NAMESPACES)
if source_step_name.startswith("#") and source_step_name.endswith("#"):
source_step_name = source_step_name[1:-1]
if not self._validate_transfer(source_type, source_step_name, transfer_type, target_step_name):
return None
extractor = BetterDict()
if transfer_type == "JSONPATH":
extractor.merge({
'extract-jsonpath': {
target_prop: {
'jsonpath': query,
'default': 'NOT_FOUND',
}
}
})
elif transfer_type == "XPATH":
extractor.merge({
'extract-xpath': {
target_prop: {
'xpath': query,
'default': 'NOT_FOUND',
}
}
})
return {source_step_name: extractor}
def _extract_property_transfers(self, test_step):
extractors = BetterDict() # label -> {extract-xpath: ..., extract-jsonpath: ...}
transfers = test_step.findall('./con:config/con:transfers', namespaces=self.NAMESPACES)
if not transfers:
return None
for transfer in transfers:
extracted_transfer = self._extract_transfer(transfer)
if extracted_transfer is not None:
extractors.merge(extracted_transfer)
return extractors
def _extract_scenario(self, test_case, case_level_props):
variables = BetterDict.from_dict(case_level_props)
requests = []
extractors = BetterDict()
steps = test_case.findall('.//con:testStep', namespaces=self.NAMESPACES)
for step in steps:
request = None
if step.get("type") == "httprequest":
request = self._extract_http_request(step)
elif step.get("type") == "restrequest":
request = self._extract_rest_request(step)
elif step.get("type") == "request":
request = self._extract_soap_request(step)
elif step.get("type") == "properties":
config_block = step.find('./con:config', namespaces=self.NAMESPACES)
if config_block is not None:
props = self._extract_properties(config_block)
variables.merge(props)
elif step.get("type") == "transfer":
extracted_extractors = self._extract_property_transfers(step) # label -> extractor
if extracted_extractors:
extractors.merge(extracted_extractors)
elif step.get("type") == "groovy":
request = self._extract_script(step)
if request is not None:
requests.append(request)
for request in requests:
label = request["label"]
if label in extractors:
request.update(extractors[label])
scenario = {
"test-case": test_case.get("name"),
"requests": requests
}
if variables:
scenario["variables"] = variables
return scenario
def _extract_script(self, test_step):
label = test_step.get("name", "Script")
script = test_step.find('./con:config/script', namespaces=self.NAMESPACES).text
if script is not None:
script = script.strip()
return {
"label": label,
"action": "pause",
"target": "current-thread",
"pause-duration": "0ms",
"jsr223": [{
"language": "groovy",
"script-text": script,
}]
}
def _extract_test_case(self, test_case, test_suite, suite_level_props):
case_name = test_case.get("name")
scenario_name = test_suite.get("name") + "-" + case_name
case_properties = self._extract_properties(test_case)
case_properties = {
"#TestCase#" + key: value
for key, value in iteritems(case_properties)
}
case_level_props = BetterDict.from_dict(suite_level_props)
case_level_props.merge(case_properties)
scenario = self._extract_scenario(test_case, case_level_props)
scenario['test-suite'] = test_suite.get("name")
return scenario_name, scenario
def _extract_config(self, project, test_suites, target_test_case=None):
execution = []
scenarios = {}
project_properties = self._extract_properties(project, key_prefix="#Project#")
project_name = project.get("name")
interface_exec, interface_scen = self._extract_interface(project_name, self.interface)
execution.append(interface_exec)
scenarios.update(interface_scen)
for suite in test_suites:
suite_props = BetterDict.from_dict(project_properties)
suite_props.merge(self._extract_properties(suite, key_prefix="#TestSuite#"))
test_cases = suite.findall('.//con:testCase', namespaces=self.NAMESPACES)
for case in test_cases:
case_name = case.get("name")
scenario_name, scenario = self._extract_test_case(case, suite, suite_props)
load_exec = self._extract_execution(case)
load_exec['scenario'] = scenario_name
self.log.debug("Extracted execution for scenario %s", scenario_name)
if not scenario["requests"]:
self.log.warning("No requests extracted for scenario %s, skipping it" % scenario_name)
continue
if target_test_case is None or target_test_case == case_name:
self.log.debug("Extracted scenario: %s", scenario_name)
scenarios[scenario_name] = scenario
execution.append(load_exec)
return {
"execution": execution,
"scenarios": scenarios,
}
def convert_script(self, script_path, target_test_case=None):
if not os.path.exists(script_path):
raise ValueError("SoapUI script %s doesn't exist" % script_path)
self.load(script_path)
self.log.debug("Found namespaces: %s", self.NAMESPACES)
projects = self.tree.xpath('//con:soapui-project', namespaces=self.NAMESPACES)
self.log.debug("Found projects: %s", projects)
project = projects[0]
self.interface = project.findall('.//con:interface', namespaces=self.NAMESPACES)
self.log.debug("Found interface: %s", self.interface)
test_suites = project.findall('.//con:testSuite', namespaces=self.NAMESPACES)
self.log.debug("Found test suites: %s", test_suites)
config = self._extract_config(project, test_suites, target_test_case=target_test_case)
if not config["scenarios"]:
self.log.warning("No scenarios were extracted")
if not config["execution"]:
self.log.warning("No load tests were extracted")
return config
def _extract_interface(self, project_name, interfaces):
execution = {
"concurrency": 1,
"iterations": 1,
"ramp-up": "10s",
"scenario": project_name
}
scenarios = {}
interface_requests = []
for interface in interfaces:
try:
endpoint = interface.find('.//con:endpoint', namespaces=self.NAMESPACES).text
resources = interface.findall('.//con:resource', namespaces=self.NAMESPACES)
if not resources:
interface_requests.append({
"url": endpoint
})
continue
except AttributeError:
continue
for resource in resources:
path = resource.get("path")
url = endpoint + path
methods = resource.findall('.//con:method', namespaces=self.NAMESPACES)
for method in methods:
method_type = method.get("method")
requests = method.findall('con:request', namespaces=self.NAMESPACES)
for request in requests:
request_body = request.find('.//con:request', namespaces=self.NAMESPACES).text
interface_requests.append({
"body": request_body,
"method": method_type,
"url": url
})
scenarios.update({project_name: {"requests": interface_requests}})
return execution, scenarios
def find_soapui_test_case(self, test_case, scenarios):
matching_scenarios = [
(name, scen)
for name, scen in iteritems(scenarios)
if scen.get("test-case") == test_case
]
if len(matching_scenarios) == 0:
sorted_scenarios = sorted((name, scen) for name, scen in iteritems(scenarios))
scenario_name, scenario = next(iter(sorted_scenarios))
if test_case is None:
self.log.warning("No `test-case` specified for SoapUI project, will use '%s'",
scenario.get("test-case"))
else:
msg = "No matching test cases found for name '%s', using the '%s'"
self.log.warning(msg, test_case, scenario.get("test-case"))
elif len(matching_scenarios) > 1:
scenario_name, scenario = next(iter(matching_scenarios))
msg = "Multiple test cases found for name '%s', using case '%s' from suite '%s'"
self.log.warning(msg, test_case, scenario.get('test-case'), scenario.get('test-suite'))
else:
scenario_name, scenario = next(iter(matching_scenarios))
return scenario_name, scenario
|
Brighter than the sun | someone els.
Hang in there Els….any major illness is tough to deal with in a relationship! I know first hand. Take care of yourself! |
# -*- coding: utf-8 -*-
from api_managers import api_builder
from tools import loader
import logging
logger = logging.getLogger(__name__)
class Comparator:
def __init__(self, criteria, apis, scorer, fallback_name):
self.criteria = criteria
self.apis = apis
self.scorer = scorer
self.fallback_name = fallback_name
self.results = {}
def compare(self):
results = {}
logger.info('Comparator :')
for language in self.criteria:
logger.info('\tlanguage: {}'.format(language))
results[language] = {}
for criterion in self.criteria[language]:
logger.info('\t\tcriterion: {}'.format(criterion))
# get data_frame
df = loader.load(language, criterion)
logger.info('\t\t\tdata ready')
results[language][criterion] = {}
for api in self.apis:
api_manager = api_builder.build_api(api_name=api, fallback_name=self.fallback_name,
language=language, params={})
self.scorer.fit(api_manager, df)
logger.info('\t\t\tscoring {}'.format(api))
self.scorer.score()
results[language][criterion][str(api_manager)] = {'scores': self.scorer.scores,
'risk_rate': self.scorer.risk_rate}
self.results = results
|
The Mayo family has a problem that has not been resolved in over 119 years. Actually, it may have been understood years ago but the knowledge did not filter down to their descendants.
John Mayo came to Maine before the detailed records of border crossings came into being about 1895. George Lessor came here before that date, too. |
import os
from hashlib import sha256
from hmac import HMAC
import random
# def random_bytes(num_bytes):
# return "".join(chr(random.randrange(256)) for i in xrange(num_bytes))
def random_bytes(num_bytes):
return os.urandom(num_bytes)
def pbkdf_sha256(password, salt, iterations):
result = password
for i in xrange(iterations):
result = HMAC(result, salt, sha256).digest() # use HMAC to apply the salt
return result
NUM_ITERATIONS = 5000
def hash_password(plain_password, salt=None):
if not salt:
salt = random_bytes(8) # 64 bits
plain_password = str(plain_password)
hashed_password = pbkdf_sha256(plain_password, salt, NUM_ITERATIONS)
# return the salt and hashed password, encoded in base64 and split with ","
return salt.encode("base64").strip() + "," + hashed_password.encode("base64").strip()
def check_password(saved_password_entry, plain_password):
salt, hashed_password = saved_password_entry.split(",")
salt = salt.decode("base64")
return saved_password_entry == hash_password(plain_password, salt)
# password_entry = hash_password("mysecret")
# print password_entry # will print, for example: 8Y1ZO8Y1pi4=,r7Acg5iRiZ/x4QwFLhPMjASESxesoIcdJRSDkqWYfaA=
# check_password(password_entry, "mysecret") # returns True
|
breathtaking garden compost bins outdoor gallon waste garbage disposal food kitchen compost bin black picture concept.
impressive mid sized 3 drawer island at portable kitchen island picture ideas.
outstanding reclaimed wood table trestle epoxy resin polyurethane hemlock picture concept.
astounding round swivel chair covers swivel rocker recliner chair covers image concept. |
import sys
import urllib
from numpy import genfromtxt
import numpy as np
def get_teams():
f = urllib.urlopen("http://www.masseyratings.com/scores.php?s=199229&sub=199229&all=1&mode=3&exhib=on&format=2")
s = f.read().split()
my_teamnames = {}
for i in range(0,len(s)/2):
my_teamnames.update({i : s[i*2 + 1]})
return my_teamnames
def get_games(year,exhibition):
if year == 2013:
if exhibition == True:
f = urllib.urlopen('http://www.masseyratings.com/scores.php?s=199229&sub=199229&all=1&mode=3&exhib=on&format=1')
elif exhibition == False:
f = urllib.urlopen('http://www.masseyratings.com/scores.php?s=199229&sub=199229&all=1&mode=3&format=1')
else:
sys.exit('"exhibition" must be "True" or "False"')
elif year == 2012:
if exhibition == True:
f = urllib.urlopen('http://www.masseyratings.com/scores.php?s=181613&sub=181613&all=1&mode=3&exhib=on&format=1')
elif exhibition == False:
f = urllib.urlopen('http://www.masseyratings.com/scores.php?s=181613&sub=181613&all=1&mode=3&format=1')
else:
sys.exit('"exhibition" must be "True" or "False"')
else:
sys.exit('Not a valid year')
s = f.read()
if exhibition == False:
file_name = str('games_'+str(year)+'.txt')
elif exhibition == True:
file_name = str('games_'+str(year)+'_exhib.txt')
k = open(file_name,'w')
k.write(s)
k.close()
f.close()
my_games = genfromtxt(file_name, dtype = None, delimiter=',')
return my_games
|
This listing is for payment 2 of 2. $80. To be paid Friday 3/15/19. |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ControllersOperations(object):
"""ControllersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~dev_spaces_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Controller"
"""Gets an Azure Dev Spaces Controller.
Gets the properties for an Azure Dev Spaces Controller.
:param resource_group_name: Resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the resource.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Controller, or the result of cls(response)
:rtype: ~dev_spaces_management_client.models.Controller
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Controller"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'name': self._serialize.url("name", name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]([_-]*[a-zA-Z0-9])*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DevSpacesErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Controller', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevSpaces/controllers/{name}'} # type: ignore
def _create_initial(
self,
resource_group_name, # type: str
name, # type: str
controller, # type: "_models.Controller"
**kwargs # type: Any
):
# type: (...) -> "_models.Controller"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Controller"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'name': self._serialize.url("name", name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]([_-]*[a-zA-Z0-9])*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(controller, 'Controller')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DevSpacesErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Controller', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Controller', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevSpaces/controllers/{name}'} # type: ignore
def begin_create(
self,
resource_group_name, # type: str
name, # type: str
controller, # type: "_models.Controller"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Controller"]
"""Creates an Azure Dev Spaces Controller.
Creates an Azure Dev Spaces Controller with the specified create parameters.
:param resource_group_name: Resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the resource.
:type name: str
:param controller: Controller create parameters.
:type controller: ~dev_spaces_management_client.models.Controller
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Controller or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~dev_spaces_management_client.models.Controller]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Controller"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
name=name,
controller=controller,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Controller', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'name': self._serialize.url("name", name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]([_-]*[a-zA-Z0-9])*$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevSpaces/controllers/{name}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'name': self._serialize.url("name", name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]([_-]*[a-zA-Z0-9])*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DevSpacesErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevSpaces/controllers/{name}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes an Azure Dev Spaces Controller.
Deletes an existing Azure Dev Spaces Controller.
:param resource_group_name: Resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the resource.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
name=name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'name': self._serialize.url("name", name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]([_-]*[a-zA-Z0-9])*$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevSpaces/controllers/{name}'} # type: ignore
def update(
self,
resource_group_name, # type: str
name, # type: str
controller_update_parameters, # type: "_models.ControllerUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.Controller"
"""Updates an Azure Dev Spaces Controller.
Updates the properties of an existing Azure Dev Spaces Controller with the specified update
parameters.
:param resource_group_name: Resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the resource.
:type name: str
:param controller_update_parameters: Parameters for updating the Azure Dev Spaces Controller.
:type controller_update_parameters: ~dev_spaces_management_client.models.ControllerUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Controller, or the result of cls(response)
:rtype: ~dev_spaces_management_client.models.Controller
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Controller"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'name': self._serialize.url("name", name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]([_-]*[a-zA-Z0-9])*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(controller_update_parameters, 'ControllerUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DevSpacesErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Controller', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Controller', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevSpaces/controllers/{name}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ControllerList"]
"""Lists the Azure Dev Spaces Controllers in a resource group.
Lists all the Azure Dev Spaces Controllers with their properties in the specified resource
group and subscription.
:param resource_group_name: Resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ControllerList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~dev_spaces_management_client.models.ControllerList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ControllerList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ControllerList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DevSpacesErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevSpaces/controllers'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ControllerList"]
"""Lists the Azure Dev Spaces Controllers in a subscription.
Lists all the Azure Dev Spaces Controllers with their properties in the subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ControllerList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~dev_spaces_management_client.models.ControllerList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ControllerList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ControllerList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DevSpacesErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DevSpaces/controllers'} # type: ignore
def list_connection_details(
self,
resource_group_name, # type: str
name, # type: str
list_connection_details_parameters, # type: "_models.ListConnectionDetailsParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.ControllerConnectionDetailsList"
"""Lists connection details for an Azure Dev Spaces Controller.
Lists connection details for the underlying container resources of an Azure Dev Spaces
Controller.
:param resource_group_name: Resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the resource.
:type name: str
:param list_connection_details_parameters: Parameters for listing connection details of Azure
Dev Spaces Controller.
:type list_connection_details_parameters: ~dev_spaces_management_client.models.ListConnectionDetailsParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ControllerConnectionDetailsList, or the result of cls(response)
:rtype: ~dev_spaces_management_client.models.ControllerConnectionDetailsList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ControllerConnectionDetailsList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.list_connection_details.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'name': self._serialize.url("name", name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]([_-]*[a-zA-Z0-9])*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(list_connection_details_parameters, 'ListConnectionDetailsParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DevSpacesErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ControllerConnectionDetailsList', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_connection_details.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DevSpaces/controllers/{name}/listConnectionDetails'} # type: ignore
|
Make sure to check out MissPittyPatsJewels on Etsy to see all of the items Megan has in stock!
Mermaid Tail Towel DIY from Stitch To My Lou!
Fluffy Peanut Butter Frosting from All Recipes! |
import factory
from django.contrib.contenttypes.models import ContentType
from paperclip.models import Attachment
from geotrek.authent.factories import UserFactory
from geotrek.common.utils.testdata import get_dummy_uploaded_file
from . import models
class OrganismFactory(factory.Factory):
FACTORY_FOR = models.Organism
organism = factory.Sequence(lambda n: u"Organism %s" % n)
class FileTypeFactory(factory.Factory):
FACTORY_FOR = models.FileType
type = factory.Sequence(lambda n: u"FileType %s" % n)
class AttachmentFactory(factory.Factory):
"""
Create an attachment. You must provide an 'obj' keywords,
the object (saved in db) to which the attachment will be bound.
"""
FACTORY_FOR = Attachment
attachment_file = get_dummy_uploaded_file()
filetype = factory.SubFactory(FileTypeFactory)
creator = factory.SubFactory(UserFactory)
title = factory.Sequence(u"Title {0}".format)
legend = factory.Sequence(u"Legend {0}".format)
# date_insert, date_update
@classmethod
def _prepare(cls, create, obj=None, **kwargs):
kwargs['content_type'] = ContentType.objects.get_for_model(obj)
kwargs['object_id'] = obj.pk
return super(AttachmentFactory, cls)._prepare(create, **kwargs)
|
DPC_WATCHDOG_VIOLATION – It is also called blue screen of death error and it is a severe problem for Windows 10 users. You will experience that while the notification about the screen will be shown, your system will restart in 5 seconds. But sometimes this problem keeps on irritating you, and that you are really annoyed of observing the error on your screen again and again. You might be looking for solutions to get rid of this problem. Here, we are presenting an article to provide you the much-sought solution. Here are some simple but yet effective ways to fix DPC Watchdog Violation error in Windows 10.
One thing you might suffer from especially when you are Windows 10 user, you could find DPC Watchdog Violation error happening very frequently in your computer. Before moving to a concrete solution, let’s find out what exactly the DPC_WATCHDOG_VIOLATION is and what the root of this frustrating error is.
You might also like reading Fix Kmode Exception Not Handled Windows 10 BSOD Error.
1 What exactly is DPC watchdog violation error?
What exactly is DPC watchdog violation error?
DPC Watchdog error is a popular situation in the Windows operating system. There are some specific reasons for this error to happen. Some of them are: you got unsupported SSD firmware, or suffering from aged SSD driver model. Sometimes you also suffer from hardware incompatibility malfunctioning with your device or there is another reason- your system files might be damaged. If you are using an SSD on your computers, you must check whether it is the firmware version and it should current version.
I must tell you that Windows 10 users will face this problem whenever you will boot the Windows. Let’s discuss some methods which will fix this issue straight away.
Your Computer drivers might be outdated, you can pay a heed to them.
I am sure that you might not have updated them since the day they got installed on your computer. You might be surprised to know that this error is due to this problem and you might not have paid attention.
All in all, you have to fix this incompatible hardware, you can also try to remove any hardware out of your system that you think are the problems, especially any new hardware that you have recently attached to your computer, and soon after mounting it on your system you encounter with this error.
Some users also came up with the complaint that even the mounting of their headphones sometimes results in this blue screen error. The reason behind this is the fact that their sound card doesn’t support their windows version. They might be directly affecting the functionality of the system. This will even damage your software and system files. Try out the methods mentioned below to fix DPC_WATCHDOG_VIOLATION in windows.
Go to the Start button > then Control Panel > go to Power Options.
On the left side of the panel, you have to select Choose what functions the power button plays.
You can now Select Change settings that are currently unavailable on the screen.
Just scroll down to find the option of Turn on fast startup, then you have to uncheck this option.
Then you have to click “Save Changes” to save the settings applied and exit.
Open the Run window and type cmd in the window to solve DPC Watchdog Violation Win 10.
Now right click the option of Command Prompt and choose to Run it as administrator.
Allow the administrator permission and type the following command in window: chkdsk c: /f /r.
Now press the Enter button. Then type Y to allow the check next time you start your computer option.
Then Restart your computer so that Windows will perform a hard disk check on your system. If found any problem in between, it will help you fix it over.
Now open Device Manager to complete the procedure.
Just right click the controller with the name SATA ACHI in it and choose Properties.
Now you have to make sure that you have selected the correct and appropriate controller, then go to the Driver tab, now navigate through Driver Details.
Check for whether iaStorA.sys is listed as a driver or not. Then click OK to exit the window.
Now go back to the Properties window, and choose Update Driver… under the given Driver tab.
Now you can choose to “Browse my computer for driver software” option. Then select “let me pick from a list of device drivers on my computer”. This will enable you to choose from your device itself.
Select Standard SATA AHCI Controller from the list, and now press “Next” to continue and finish the process altogether.
The process is almost finished. Now just Restart your computer and DPC Watchdog Violation Windows 10 error will be solved hopefully.
Now open Run window and type in msconfig and hit the Enter button.
Then click Boot tab. Under Boot Options, you have to check for Safe boot and then choose Network from the options.
After this process, click OK button > Then choose to Restart your system.
You are in a Safe Mode now, and the error will be eliminated.
Now you have to go to Device Manager. Find and elaborate the option named Display adapters. Now right click on the display adapter that you have and choose to Uninstall it from the list.
Now you have to confirm to check the “Delete the driver software for this system” and click on OK button.
Now you have to again Restart your computer and this will certainly fix DPC Watchdog Violation Blue Screen of Death error. And hopefully, this will never appear again in near future.
I hope the issue has been resolved with the help of any of the above method you can opt for the care center if the problem still persists. Hence this is all we have about solving DPC_WATCHDOG_VIOLATION permanently through this article. Do let us know if you still face any issues in the comment section below and we shall try to fix your problem as soon as possible. |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import logging
import time
import redis
import redis.exceptions
import MySQLdb
import config
import data
import exceptions
import log
class Db(object):
db_mem = None
db_mem_posts = None
db_disk_posts = None
db_cursor = None
retries = 360
retry_wait = 10
cmd_retries = 10
cmd_retry_wait = 10
def __init__(self):
c = config.Config()
self.config = c.cfg
self.log = logging.getLogger('db')
self.dir_root = self.config.get('trends', 'root')
def setup(self):
"""
Setup the connection to Redis DB and to MySQL DB.
"""
self.setup_redis()
self.setup_mysql_loop()
# Get marker to know if a post id is in Redis or MySQL.
self.posts_tid = int(self.get('posts_tid'))
def setup_redis(self):
"""Connections to Redis."""
host = self.config.get('redis', 'host')
port = self.config.getint('redis', 'port')
self.db_mem = redis.Redis(host=host, port=port, db=0)
self.db_mem_posts = redis.Redis(host=host, port=port, db=1)
def setup_mysql_loop(self):
"""Setup connection to Redis until it succeeds"""
retry = 0
while retry < self.retries:
try:
self.setup_mysql()
return
except exceptions.DbError:
if retry < self.retries:
time.sleep(self.retry_wait)
retry += 1
self.log.error(
'%d retries to connect to MySQL failed', self.retries)
raise exceptions.DbError()
def setup_mysql(self):
"""Setup connections to MySQL"""
user = self.config.get('mysql', 'user')
password = self.config.get('mysql', 'password')
db = self.config.get('mysql', 'db')
host = self.config.get('mysql', 'host')
try:
self.db_disk_posts = MySQLdb.connect(host=host,
user=user, passwd=password, db=db,
use_unicode=True, charset='utf8')
self.db_cursor = self.db_disk_posts.cursor()
except MySQLdb.Error:
self.log.error('Problem to connect to MySQL host %s', host)
raise exceptions.DbError()
def redis_cmd(self, cmd, *args):
"""Redis command to DB index 0"""
return self.redis_command(0, cmd, *args)
def redis_cmd_db_1(self, cmd, *args):
"""Redis command to DB index 1"""
return self.redis_command(1, cmd, *args)
def redis_command(self, db, cmd, *args):
"""Command to Redis.
Try cmd_retries times.
"""
if db == 0:
dbr = self.db_mem
else:
dbr = self.db_mem_posts
retry = 0
while retry < self.cmd_retries:
try:
return getattr(dbr, cmd)(*args)
except redis.exceptions.RedisError:
self.log.error('Redis cmd %s error', cmd)
retry += 1
if retry <= self.cmd_retries:
time.sleep(self.cmd_retry_wait)
except AttributeError:
self.log.error('Redis cmd %s does not exist', cmd)
raise exceptions.DbError()
raise exceptions.DbError()
def get(self, key, db= 0):
if db == 0:
return self.redis_cmd('get', key)
else:
return self.redis_cmd_db_1('get', key)
def set(self, key, value, db=0):
if db == 0:
return self.redis_cmd('set', key, value)
else:
return self.redis_cmd_db_1('set', key, value)
def delete(self, key):
return self.redis_cmd('delete', key)
def exists(self, key):
return self.redis_cmd('exists', key)
def incr(self, key):
return self.redis_cmd('incr', key)
def rpush(self, key, value):
return self.redis_cmd('rpush', key, value)
def lrange(self, key, start, stop):
return self.redis_cmd('lrange', key, start, stop)
def lset(self, key, index, value):
return self.redis_cmd('lset', key, index, value)
def lindex(self, key, index):
return self.redis_cmd('lindex', key, index)
def mysql_command(self, cmd, sql, writer, commit, *args):
"""Command to MySQL.
Try cmd_retries times."""
retry = 0
while retry < self.cmd_retries:
try:
r = getattr(self.db_cursor, cmd)(sql, args)
if writer:
if commit:
self.db_disk_posts.commit()
return r
else:
return self.db_cursor.fetchall()
except (MySQLdb.OperationalError, MySQLdb.InternalError):
self.log.error('MySQL cmd %s DB error', cmd)
# reconnect
self.setup_mysql_loop()
retry = 0
except MySQLdb.Error:
self.log.error('MySQL cmd %s sql %s failed', cmd, sql)
retry += 1
if retry <= self.cmd_retries:
time.sleep(self.cmd_retry_wait)
except AttributeError:
self.log.error('MySQL cmd %s does not exist', cmd)
raise exceptions.DbError()
raise exceptions.DbError()
def sql_read(self, sql, *args):
"""Read command to MySQL."""
return self.mysql_command('execute', sql, False, False, *args)
def sql_write(self, sql, *args):
"""Write command to MySQL."""
return self.mysql_command('execute', sql, True, True, *args)
def sql_write_no_commit(self, sql, *args):
"""Write command to MySQL but no commit."""
return self.mysql_command('execute', sql, True, False, *args)
def sql_commit(self):
"""Commit changes to disk"""
self.db_disk_posts.commit()
def set_post(self, post_id, value):
"""Add/Update post value in Redis or MySQL based on posts id marker...
"""
if post_id >= self.posts_tid:
self.set('post:%d' % (post_id,), value, db=1)
else:
sql = 'insert into tp_post(post_id, post) values(%s, %s)'\
'on duplicate key update post=%s'
self.sql_write(sql, post_id, value, value)
def add_post(self, post_id, value):
"""Add post in MySQL
"""
sql = 'insert into tp_post(post_id, post) values(%s, %s)'
self.sql_write(sql, post_id, value)
def get_post(self, post_id):
"""Get post value from Redis or MySQL based on posts id marker...
"""
if post_id >= self.posts_tid:
r = self.get('post:%d' % (post_id,), db=1)
else:
try:
sql = 'select post from tp_post where post_id=%s'
r = self.sql_read(sql, post_id)
except exceptions.DbError:
r = None
return r
def set_person_score(self, post_id, person_id, score):
"""Set the person's sentiment score based on the tweet
"""
sql = 'insert into tp_person_score(post_id, person_id, score) values(%s, %s, %s)'
self.sql_write(sql, post_id, person_id, str(score))
def get_persons(self):
"""
Get list of persons from db
"""
names = self.redis_cmd('lrange', 'persons', 0, -1)
persons = []
for n in names:
s = n.split(':')
person = {}
person['id'] = int(s[0])
person['first_name'] = s[1]
person['name'] = s[2]
person['nickname'] = s[3]
person['group'] = int(s[4])
person['words'] = json.loads(s[5])
pc = self.lindex('person:%d:posts_count' % int(s[0]), -1)
posts_count = int((pc if pc else 0))
person['posts_count'] = (posts_count if posts_count > 0 else 0)
rels = self.lindex('person:%d:rel' % int(s[0]), -1)
person['rel'] = json.loads((rels if rels else '{}'))
sentiment = self.lindex('person:%d:sentiment' % int(s[0]), -1)
person['sentiment'] = float((sentiment if sentiment else 0))
sentiment_avg = self.get('person:%d:sentiment_avg' % int(s[0]))
person['sentiment_avg'] = float((sentiment_avg if sentiment_avg else 0.0))
sentiment_total = self.get('person:%d:sentiment_total_count' % int(s[0]))
person['sentiment_total_count'] = int((sentiment_total if sentiment_total else 0))
persons.append(person)
return persons
def set_persons(self):
"""
Set list of persons in db
"""
key = 'persons'
self.redis_cmd('delete', key)
with open('%s/names.txt' % (self.dir_root), 'r') as f:
for line in f:
self.redis_cmd('rpush', key, line.rstrip('\n'))
def iter_posts(self):
post_id_start = 108673
post_id_end = 8561087
last_id = post_id_start
while True:
sql = 'select post_id, post from tp_post'\
' where post_id > %s and post_id <= %s order by post_id'\
' limit 1000'
rows = self.sql_read(sql, last_id, post_id_end)
if not rows:
break
last_id = rows[-1][0]
r = []
for row in rows:
d = data.parse_post(row[1])
d['post_id'] = row[0]
r.append(d)
yield r
def get_person_ids_from_post_id(self, post_id):
sql = 'select person_id from tp_person_post where post_id = %s'
rows = self.sql_read(sql, post_id)
return [row[0] for row in rows]
|
Nowhere is the strength of our unique 3-D imaging system more in evidence than in our personal portraits. It is noteworthy that the inability to retouch a Laser Reflections portrait has turned from being perceived as a weakness to one of its great strengths. In an era dominated by ingenuine digital images, we follow in the footsteps of portrait pioneer Edward Weston in offering unretouched portraits -- truly a moment in time captured forever.
We have testimonials from a long list of high-profile people -- many of them professional models -- who will attest not only to their delight with their images but to the enjoyable nature of the sitting in our Poulsbo studio (Seattle area) as well.
"Working with Laser Reflections has been a great experience for me. Ron and Bernadette are two of the nicest and most professional people I have met in my endeavors as a Playmate. You have to see these images in person; and once you do, you'll love them as much as I do!"
Our portrait packages range in price from $1,995 as a function of size and number of subjects involved. Our portraits on glass plates come in two sizes: 16" x 12" and 20" x 16". The price includes the sitting with a single reflection copy. We offer a discount for multiple subjects during the same session.
"Wow! The image is so real. Everybody who comes to City Hall views it with awe. The City and I thank you."
"The holographers were clever enough to catch the mayor in an uncharacteristically reflective mood. No smile.
The sitting requires ~1-2 hours depending on the number of images created and the time available to the subject (we have done portraits in as little as a 30 minute sitting but two hours allows time for processing the masters and on-site viewing). The positives (holographic equivalent of a negative) are viewable within the hour and serve as the templates for the handmade copies which require 5-7 days for production, archiving and framing. Next day delivery is available at additional charge. |
# -*- coding: utf-8 -*-
from django import forms
class UserprofileForm(forms.Form):
oldPassword = forms.CharField(label='Old Password',widget=forms.PasswordInput())
newPassword = forms.CharField(label='New Password',widget=forms.PasswordInput())
confirmPassword = forms.CharField(label='Confirm Password',widget=forms.PasswordInput())
class AddUserForm(forms.Form):
username = forms.CharField(label='Username')
firstname = forms.CharField(label='Firstname')
lastname = forms.CharField(label='Lastname')
email = forms.CharField(label='Email')
password = forms.CharField(label='Password',widget=forms.PasswordInput())
class EditUserForm(forms.Form):
firstname = forms.CharField(label='Firstname')
lastname = forms.CharField(label='Lastname')
email = forms.CharField(label='Email')
password = forms.CharField(label='Password',widget=forms.PasswordInput())
class AddGroupForm(forms.Form):
groupname = forms.CharField(label='Groupname')
description = forms.CharField(label='Description')
|
Lago Vista windshield replacement needs can occur at any time, but they are much more frequent when the weather is bad. Driving in storms or powerful winds can cause severe damage to glass, creating cracks or even shattering it. When it’s a windshield you have time, but this damage to other auto glass offers you little protection against the elements, leaving you exposed when you are driving or even parked outside. Even a damaged windshield reduces visibility, posing a mild risk when you are on the road.
Austin Mobile Glass offers quick and cost-effective solutions to these problems. You can drive down to any of our service centers or we can come to you in Lago Vista for windshield or auto glass repair. Or just give us a call and we’ll send our technicians over to your home or office.
When it comes to parts, we use only the best quality OEM & OEE glass and parts at Austin Mobile Glass. A network of suppliers, both local and international, makes it easy for us to source parts, even for luxury cars. Whether your car is a popular brand or limited addition, we will always do our best to find the right parts at Austin Mobile Glass. All auto glass replacement and service carry our full lifetime, nationwide warranty.
An impressive range of services, expert technical assistance and quick resolution – all these are guaranteed when you choose Mobile Glass. |
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today Serpent Consulting Services Pvt. Ltd. (<http://www.serpentcs.com>)
# Copyright (C) 2004 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import time
from openerp.report import report_sxw
class reservation_detail_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(reservation_detail_report, self).__init__(cr, uid, name, context)
self.localcontext.update( {
'time': time,
'get_data': self.get_data,
'get_checkin': self.get_checkin,
'get_checkout': self.get_checkout,
# 'get_room':self.get_room,
'get_room_type':self._get_room_type,
'get_room_nos':self._get_room_nos,
'get_room_used_detail':self._get_room_used_detail,
})
self.context=context
def _get_room_type(self, reservation_line):
room_types = ''
for line in reservation_line:
if line.categ_id:
room_types += line.categ_id.name
room_types += ' '
return room_types
def _get_room_nos(self, reservation_line):
room_nos = ''
for line in reservation_line:
for room in line.reserve:
room_nos += room.name
room_nos += ' '
return room_nos
def get_data(self, date_start, date_end):
reservation_obj = self.pool.get('hotel.reservation')
tids = reservation_obj.search(self.cr, self.uid, [('checkin', '>=', date_start), ('checkout', '<=', date_end)])
res = reservation_obj.browse(self.cr, self.uid, tids)
return res
def get_checkin(self, date_start, date_end):
reservation_obj = self.pool.get('hotel.reservation')
tids = reservation_obj.search(self.cr, self.uid, [('checkin', '>=', date_start), ('checkin', '<=', date_end)])
res = reservation_obj.browse(self.cr, self.uid, tids)
return res
def get_checkout(self,date_start,date_end):
reservation_obj = self.pool.get('hotel.reservation')
tids = reservation_obj.search(self.cr,self.uid,[('checkout', '>=', date_start),('checkout', '<=', date_end)])
res = reservation_obj.browse(self.cr,self.uid,tids)
return res
def _get_room_used_detail(self, date_start, date_end):
room_used_details = []
hotel_room_obj = self.pool.get('hotel.room')
room_ids = hotel_room_obj.search(self.cr, self.uid, [])
for room in hotel_room_obj.browse(self.cr, self.uid, room_ids):
counter = 0
details = {}
if room.room_reservation_line_ids:
for room_resv_line in room.room_reservation_line_ids:
if room_resv_line.check_in >= date_start and room_resv_line.check_in <= date_end:
counter += 1
if counter >= 1:
details.update({'name': room.name or '', 'no_of_times_used': counter})
room_used_details.append(details)
return room_used_details
# def get_room(self, date_start, date_end):
# self.cr.execute("select pt.name,count(pt.name) as No_of_times from hotel_reservation as hr " \
# "inner join hotel_reservation_line as hrl on hrl.line_id=hr.id " \
# "inner join hotel_reservation_line_room_rel as hrlrr on hrlrr.room_id=hrl.id " \
# "inner join product_product as pp on pp.product_tmpl_id=hrlrr.hotel_reservation_line_id " \
# "inner join product_template as pt on pt.id=pp.product_tmpl_id " \
# "where hr.state<>'draft' and hr.checkin >= %s and hr.checkout <= %s group by pt.name " \
# ,(date_start,date_end))
# res2=self.cr.dictfetchall()
# return res2
report_sxw.report_sxw('report.reservation.detail', 'hotel.reservation', 'addons/hotel_reservation/report/room_res.rml', parser=reservation_detail_report)
report_sxw.report_sxw('report.checkin.detail', 'hotel.reservation', 'addons/hotel_reservation/report/checkinlist.rml', parser=reservation_detail_report)
report_sxw.report_sxw('report.checkout.detail', 'hotel.reservation', 'addons/hotel_reservation/report/checkoutlist.rml', parser=reservation_detail_report)
report_sxw.report_sxw('report.maxroom.detail', 'hotel.reservation', 'addons/hotel_reservation/report/maxroom.rml', parser=reservation_detail_report)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
At Second Baptist Church, we believe in the Priesthood of All Believers. God calls each of us to some kind of ministry either at church or somewhere out in the world. Would you like to explore the spiritual gifts God has given you? If so, please download and print the file by clicking on the link below. Be sure to read the instructions carefully, complete the survey and score your "Giftabilities." When you are done, rank your gifts and note which gifts are your strongest and which are your weakest. Do not read the descriptions of the gifts towards the end of the questionnaire until you have completed it. Scriptural references about each spiritual gift-ability are provided in that section. If you would like to use or gently explore one of your gifts through our ministries at Second Baptist Church, please share your results with us. Our pastor would be happy to talk with you. It is an honor for us to explore ways to make serving God a rich and meaningful experience for you.
P.S. We owe many thanks to our sisters and brothers in Christ at Quaker Hill Baptist Church for sharing this helpful tool with us. |
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input preprocessors."""
from lingvo import compat as tf
from lingvo.core import base_layer
from lingvo.core import py_utils
from lingvo.core import schedule
from lingvo.tasks.car import car_lib
from lingvo.tasks.car import detection_3d_lib
from lingvo.tasks.car import geometry
from lingvo.tasks.car import ops
import numpy as np
# pylint:disable=g-direct-tensorflow-import
from tensorflow.python.ops import inplace_ops
# pylint:enable=g-direct-tensorflow-import
def _ConsistentShuffle(tensors, seed):
"""Shuffle multiple tensors with the same shuffle order."""
shuffled_idx = tf.range(tf.shape(tensors[0])[0])
shuffled_idx = tf.random.shuffle(shuffled_idx, seed=seed)
return tuple([tf.gather(t, shuffled_idx) for t in tensors])
def _GetApplyPointMaskFn(points_mask):
"""Returns a function that applies a mask to one of our points tensors."""
def _ApplyPointMaskFn(points_tensor):
"""Applies a mask to the points tensor."""
if points_tensor is None:
return points_tensor
return tf.boolean_mask(points_tensor, points_mask)
return _ApplyPointMaskFn
def _Dense(sparse):
return tf.sparse_to_dense(
sparse_indices=sparse.indices,
output_shape=sparse.dense_shape,
sparse_values=sparse.values,
default_value=0)
class Preprocessor(base_layer.BaseLayer):
"""Base class for input preprocessor.
Input preprocessors expect the combined output of all extractors and performs
a transformation on them. Input preprocessors can add/edit/remove fields
from the NestedMap of features.
Note: Features correspond to that for one example (no batch dimension).
Sub-classes need to implement the following three functions:
1) TransformFeatures(features): Given a NestedMap of features representing the
output of all the extractors, apply a transformation on the features.
2) TransformShapes(shapes): Given a corresponding NestedMap of shapes,
produce a NestedMap of shapes that corresponds to the transformation of the
features after TransformFeatures.
3) TransformDTypes(dtypes): Given a corresponding NestedMap of dtypes,
produce a NestedMap of dtypes that corresponds to the transformation of the
features after TransformFeatures.
The preprocessor is expected to explicitly pass through untouched fields.
For example, a preprocessor that does data augmentation should modify the
features NestedMap on the fields it cares about augmenting, and then return
the features NestedMap.
"""
@classmethod
def Params(cls):
"""Default params."""
p = super().Params()
p.name = cls.__name__
return p
def FProp(self, theta, features):
"""Performs TransformFeatures."""
del theta # unused
return self.TransformFeatures(features)
def TransformFeatures(self, features):
"""Transforms the features for one example.
Args:
features: A `NestedMap` of tensors.
Returns:
A `NestedMap` of tensors corresponding.
"""
raise NotImplementedError()
def TransformBatchedFeatures(self, features):
"""Transforms the features for a batch of examples.
Args:
features: A `NestedMap` of batched tensors.
Returns:
A `NestedMap` of tensors corresponding.
"""
dtypes = features.Transform(lambda v: v.dtype)
dtypes = self.TransformDTypes(dtypes)
# Default impl uses map_fn.
result = tf.map_fn(
self.TransformFeatures, elems=features, dtype=dtypes, back_prop=False)
return result
def TransformShapes(self, shapes):
"""Sets correct shapes corresponding to TransformFeatures.
Args:
shapes: A `NestedMap` of TensorShapes, corresponding to the
pre-transformed features.
Returns:
A `NestedMap` of TensorShapes corresponding to the transformed features.
"""
raise NotImplementedError()
def TransformDTypes(self, dtypes):
"""Sets correct dtypes corresponding to TransformFeatures.
Args:
dtypes: A `NestedMap` of DTypes, corresponding to the pre-transformed
features.
Returns:
A `NestedMap` of DTypes corresponding to the transformed features.
"""
raise NotImplementedError()
class EntryPreprocessor(Preprocessor):
"""A Preprocessor that transforms a NestedMap sub-structure.
Some preprocessors want to apply a function to any NestedMap whose key matches
a specific prefix. An EntryPreprocessor provides an interface for specifying
the function transformation for a NestedMap of inputs, adding, modifying, or
deleting the entries in that NestedMap.
For example, if an input contains a nested structure such as:
- lasers.front.xyz
.features
- lasers.side.xyz
.features
and one wants to apply a transform that modifies the .xyz features
on both structures, one can define an EntryPreprocessor that implements:
UpdateEntry(entry):
UpdateEntryShape(shapes):
UpdateEntryDType(dtypes):
and set self.params.prefixes = ['lasers.front', 'lasers.side']
where the prefixes refer to a fully-qualified NestedMap sub-structure.
The arguments to these functions will contain just the NestedMap structure
whose key prefix can be found in self.params.prefixes. One can then modify
these structures as desired.
Example:
def UpdateEntry(self, entry):
# entry is a NestedMap.
assert 'xyz' in entry
entry.xyz = self._ApplyFn(entry.xyz)
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('prefixes', ['pseudo_ri'], 'List of keys to apply to.')
return p
def _ApplyToMatchingStructure(self, nested_map, fn):
"""Apply fn to any NestedMap sub-structure whose prefix is in p.prefixes."""
p = self.params
# Don't mutate the original.
nested_map = nested_map.DeepCopy()
updated_entries = []
for prefix in p.prefixes:
entry = nested_map.GetItem(prefix)
if not isinstance(entry, py_utils.NestedMap):
raise TypeError('Prefix key {} selected a {}, not a NestedMap!'.format(
prefix, type(entry)))
fn(entry)
updated_entries.append(entry)
return nested_map, updated_entries
def UpdateEntry(self, entry):
"""Update the Tensors in a NestedMap entry.
Args:
entry: A NestedMap of Tensors.
"""
raise NotImplementedError()
def UpdateEntryShape(self, shapes):
"""Update the shapes in a NestedMap entry.
Args:
shapes: A NestedMap of TensorShapes.
"""
raise NotImplementedError()
def UpdateEntryDType(self, dtypes):
"""Transform the dtypes in a NestedMap entry.
Args:
dtypes: A NestedMap of dtypes.
"""
raise NotImplementedError()
def TransformFeatures(self, features):
features, _ = self._ApplyToMatchingStructure(features, self.UpdateEntry)
return features
def TransformShapes(self, shapes):
shapes, _ = self._ApplyToMatchingStructure(shapes, self.UpdateEntryShape)
return shapes
def TransformDTypes(self, dtypes):
dtypes, _ = self._ApplyToMatchingStructure(dtypes, self.UpdateEntryDType)
return dtypes
class CreateDecoderCopy(Preprocessor):
"""Creates references to current lasers, images, and labels.
This is useful if the data is further transformed.
If desired, the keys that are copied can be customized by overriding the
default keys param.
This preprocessor expects features to optionally contain the following keys:
- lasers - a NestedMap of tensors
- images - a NestedMap of tensors
- labels - a NestedMap of tensors
Adds the following features (if the features existed):
- decoder_copy.lasers - a copy of the lasers NestedMap
- decoder_copy.images - a copy of the images NestedMap
- decoder_copy.labels - a copy of the labels NestedMap
The processor also by default pads the laser features; this can be disabled
by setting the pad_lasers param to None.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keys', ['lasers', 'labels', 'images'],
'Keys to look for and copy if exists.')
p.Define('parent_key', 'decoder_copy', 'The key to nest the copies under.')
p.Define('pad_lasers', PadLaserFeatures.Params(),
'Params for a layer that pads the laser features.')
p.name = 'create_decoder_copy'
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.pad_lasers is not None:
self.CreateChild('pad_lasers', p.pad_lasers)
def _DeepCopyIfExists(self, keys, nested_map, parent_key):
"""Deep copy a specific key to a parent key if it exists."""
for key in keys:
if key in nested_map:
if parent_key not in nested_map:
nested_map[parent_key] = py_utils.NestedMap()
nested_map[parent_key][key] = nested_map[key].DeepCopy()
return nested_map
def TransformFeatures(self, features):
p = self.params
features = self._DeepCopyIfExists(p.keys, features, p.parent_key)
if p.pad_lasers is not None:
features[p.parent_key] = self.pad_lasers.TransformFeatures(
features[p.parent_key])
return features
def TransformShapes(self, shapes):
p = self.params
shapes = self._DeepCopyIfExists(p.keys, shapes, p.parent_key)
if p.pad_lasers is not None:
shapes[p.parent_key] = self.pad_lasers.TransformShapes(
shapes[p.parent_key])
return shapes
def TransformDTypes(self, dtypes):
p = self.params
dtypes = self._DeepCopyIfExists(p.keys, dtypes, p.parent_key)
if p.pad_lasers is not None:
dtypes[p.parent_key] = self.pad_lasers.TransformDTypes(
dtypes[p.parent_key])
return dtypes
class FilterByKey(Preprocessor):
"""Filters features to keep only specified keys.
This keeps only feature entries that are specified. This allows us to reduce
the number of fields returned. For example, during training, one may not
need the actual laser points if training with a pillars based model that
has a preprocessor that already maps the points to grid.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'keep_key_prefixes', [''], 'Prefixes of keys to keep. If this '
'contains the empty string, then it will keep all the keys.')
return p
def _FilterFn(self, key, entry):
"""Filter a nested map."""
del entry # unused
p = self.params
for prefix in p.keep_key_prefixes:
if key.startswith(prefix):
return True
return False
def TransformFeatures(self, features):
return features.FilterKeyVal(self._FilterFn)
def TransformShapes(self, shapes):
return shapes.FilterKeyVal(self._FilterFn)
def TransformDTypes(self, dtypes):
return dtypes.FilterKeyVal(self._FilterFn)
class FilterGroundTruthByNumPoints(Preprocessor):
"""Removes ground truth boxes with less than params.min_num_points points.
This preprocessor expects features to contain the following keys::
labels.labels of shape [..., L]
labels.bboxes_3d of shape [..., L, 7]
labels.bboxes_3d_mask of shape [..., L]
labels.unfiltered_bboxes_3d_mask of shape [..., L]
labels.bboxes_3d_num_points of shape [..., L].
Modifies the bounding box data to turn off ground truth objects that don't
meet the params.min_num_points point filter:
labels.labels: Boxes with less than params.min_num_points have their label
set to params.background_id (defaults to 0).
labels.bboxes_3d_mask: Boxes with less than params.min_num_points are set
to 0.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'min_num_points', 1, 'The minimum number of points allowed before '
'the associated ground truth box is turned off. Defaults to 1.')
p.Define(
'background_id', 0, 'The ID of the background class we set '
'filtered boxes to. Defaults to 0.')
return p
def TransformFeatures(self, features):
p = self.params
bbox_is_valid = tf.greater_equal(features.labels.bboxes_3d_num_points,
p.min_num_points)
features.labels.labels = tf.where(
bbox_is_valid, features.labels.labels,
p.background_id * tf.ones_like(features.labels.labels))
features.labels.bboxes_3d_mask *= tf.cast(bbox_is_valid, tf.float32)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class FilterGroundTruthByDifficulty(Preprocessor):
"""Removes groundtruth boxes based on detection difficulty.
This preprocessor expects features to contain the following keys::
labels.single_frame_detection_difficulties of shape [..., L]
labels.labels of shape [..., L]
labels.bboxes_3d_mask of shape [..., L]
labels.unfiltered_bboxes_3d_mask of shape [..., L]
The preprocessor masks out the bboxes_3d_mask / labels based on whether
single_frame_detection_difficulties is greater than p.difficulty_threshold.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'background_id', 0, 'The ID of the background class we set '
'filtered boxes to. Defaults to 0.')
p.Define(
'difficulty_threshold', 1,
'Filter groundtruth bounding boxes whose detection difficulty is '
'greater than `difficulty_threshold`')
return p
def TransformFeatures(self, features):
p = self.params
bbox_is_valid = tf.less_equal(
features.labels.single_frame_detection_difficulties,
p.difficulty_threshold)
features.labels.labels = tf.where(
bbox_is_valid, features.labels.labels,
p.background_id * tf.ones_like(features.labels.labels))
features.labels.bboxes_3d_mask *= tf.cast(bbox_is_valid, tf.float32)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class CountNumberOfPointsInBoxes3D(Preprocessor):
"""Computes bboxes_3d_num_points.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- labels.bboxes_3d of shape [L, 7]
- labels.bboxes_3d_mask of shape [L]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Adds the following features:
labels.bboxes_3d_num_points: [L] - integer tensor containing the number of
laser points for each corresponding bbox.
"""
def TransformFeatures(self, features):
points_xyz = features.lasers.points_xyz
if 'points_padding' in features.lasers:
points_mask = 1 - features.lasers.points_padding
points_xyz = tf.boolean_mask(points_xyz, points_mask)
points_in_bboxes_mask = geometry.IsWithinBBox3D(points_xyz,
features.labels.bboxes_3d)
bboxes_3d_num_points = tf.reduce_sum(
tf.cast(points_in_bboxes_mask, tf.int32), axis=0, keepdims=False)
bboxes_3d_num_points *= tf.cast(features.labels.bboxes_3d_mask, tf.int32)
features.labels.bboxes_3d_num_points = bboxes_3d_num_points
return features
def TransformShapes(self, shapes):
num_bboxes = shapes.labels.bboxes_3d[0]
shapes.labels.bboxes_3d_num_points = tf.TensorShape([num_bboxes])
return shapes
def TransformDTypes(self, dtypes):
dtypes.labels.bboxes_3d_num_points = tf.int32
return dtypes
class AddPerPointLabels(Preprocessor):
"""Computes the class and bbox id of each point.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- labels.bboxes_3d of shape [L, 7]
- labels.labels of shape [L]
This makes an assumption that each point is only in 1 box, which should
almost always true in 3D. In cases where this is not true, the largest
label integer and largest bbox_id will be assigned.
NOTE: Be very careful that this is performed after any modifications
to the semantic labels of each point in the pointcloud. Examples of this
would be operators like GroundTruthAugmentation, or DropBoxesOutOfRange.
Adds the following features:
lasers.points_label: [P] - integer tensor containing the class id of each
point.
lasers.points_bbox_id: [P] - integer tensor containing box id of each
point from 0 to num_bboxes, where an id of num_bboxes indicates a
background point.
lasers.points_bbox_3d: [P, 7] - float tensor containing bounding box of
each point.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'per_dimension_adjustment', None,
'A list of len 3 of floats with the amount (in meters) to add to '
'each dimension of the box before using it to select points. '
'If enabled, this is designed to protect against overly tight box '
'annotations that appear in KITTI.')
return p
def TransformFeatures(self, features):
p = self.params
points_xyz = features.lasers.points_xyz
bboxes_3d = features.labels.bboxes_3d
num_points, _ = py_utils.GetShape(points_xyz)
num_bboxes, _ = py_utils.GetShape(bboxes_3d)
if p.per_dimension_adjustment:
if len(p.per_dimension_adjustment) != 3:
raise ValueError(
'param `per_dimension_adjustment` expected to be len 3.')
dims_adjustment = tf.constant([0, 0, 0] + p.per_dimension_adjustment +
[0])
bboxes_3d = bboxes_3d + dims_adjustment
# Find which points are in each box and what class each box is.
points_in_bboxes_mask = geometry.IsWithinBBox3D(points_xyz, bboxes_3d)
points_in_bboxes_mask = tf.cast(points_in_bboxes_mask, tf.int32)
points_in_bboxes_mask = py_utils.HasShape(points_in_bboxes_mask,
[num_points, num_bboxes])
# points_in_bboxes_mask is a [num_points, num_bboxes] 0/1 tensor
# indicating whether that point is in a given box.
# Each point should only be in one box, so after broadcasting the label
# across the binary mask, we do a reduce_max to get the max label id
# for each point. Since each point only belongs to one box, it will be
# the only non-zero (background) label in that box.
# Note: We assume background to be class_id == 0
points_label = tf.reduce_max(
points_in_bboxes_mask * features.labels.labels, axis=1)
points_bbox_id = tf.argmax(
points_in_bboxes_mask, axis=1, output_type=tf.int32)
# If the class is background, make its id == num_bboxes
points_bbox_id = tf.where(points_label > 0, points_bbox_id,
tf.broadcast_to(num_bboxes, [num_points]))
# For each point, get the bbox_3d data.
dummy_bbox = tf.constant([[0, 0, 0, 0, 0, 0, 0]], dtype=tf.float32)
bboxes_3d = tf.concat([bboxes_3d, dummy_bbox], axis=0)
points_bbox_3d = tf.gather(bboxes_3d, points_bbox_id)
points_label = tf.reshape(points_label, [num_points])
points_bbox_id = tf.reshape(points_bbox_id, [num_points])
features.lasers.points_label = points_label
features.lasers.points_bbox_id = points_bbox_id
features.lasers.points_bbox_3d = points_bbox_3d
return features
def TransformShapes(self, shapes):
num_points = shapes.lasers.points_xyz[0]
shapes.lasers.points_label = tf.TensorShape([num_points])
shapes.lasers.points_bbox_id = tf.TensorShape([num_points])
shapes.lasers.points_bbox_3d = tf.TensorShape([num_points, 7])
return shapes
def TransformDTypes(self, dtypes):
dtypes.lasers.points_label = tf.int32
dtypes.lasers.points_bbox_id = tf.int32
dtypes.lasers.points_bbox_3d = tf.float32
return dtypes
class PointsToGrid(Preprocessor):
"""Bins points to a 3D-grid using custom op: ops.point_to_grid.
Expects features to have keys:
- lasers.points_xyz of shape [P, 3]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
If normalizing the labels is enabled, then also expects:
- labels.weights
- labels.bboxes_td
- labels.bboxes_td_mask
- labels.bboxes_3d_mask
Let:
gx, gy, gz = p.grid_size
F = 3 + num_laser_features
Adds the following features:
grid_centers: [gx, gy, gz, 3]: For each grid cell, the (x,y,z)
floating point coordinate of its center.
grid_num_points: [gx, gy, gz]: The number of points in each grid
cell (integer).
laser_grid: [gx, gy, gz, num_points_per_cell, F] - A 5D floating
point Tensor containing the laser data placed into a fixed grid.
Modifies the bboxes in labels to also be within the grid range x/y by default.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_points_per_cell', 100,
'The maximum number of points per cell.')
p.Define('grid_size', (40, 40, 1), 'Grid size along x,y,z axis.')
# The max range of x and y is [-80, 80].
p.Define('grid_range_x', (-80, 80), 'The X-axis Range covered by the grid')
p.Define('grid_range_y', (-80, 80), 'The Y-axis Range covered by the grid')
p.Define('grid_range_z', (-2, 4), 'The Z-axis Range covered by the grid')
p.Define('normalize_td_labels', True,
'Whether to clip the labels to the grid limits.')
return p
def _NormalizeLabels(self, ymin, xmin, ymax, xmax, x_range, y_range):
"""Normalizes the bboxes within a given range."""
assert x_range, 'Must specify x_range if clipping.'
assert y_range, 'Must specify y_range if clipping.'
assert len(x_range) == 2, 'x_range %s must be 2 elements.' % x_range
assert len(y_range) == 2, 'y_range %s must be 2 elements.' % y_range
x_range_min = x_range[0]
x_range_len = x_range[1] - x_range[0]
y_range_min = y_range[0]
y_range_len = y_range[1] - y_range[0]
xmin = tf.cast(xmin - x_range_min, tf.float32) / tf.cast(
x_range_len, tf.float32)
xmax = tf.cast(xmax - x_range_min, tf.float32) / tf.cast(
x_range_len, tf.float32)
ymin = tf.cast(ymin - y_range_min, tf.float32) / tf.cast(
y_range_len, tf.float32)
ymax = tf.cast(ymax - y_range_min, tf.float32) / tf.cast(
y_range_len, tf.float32)
return ymin, xmin, ymax, xmax
def TransformFeatures(self, features):
p = self.params
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
if ('points_padding' in features.lasers and
features.lasers.points_padding is not None):
points_mask = 1 - features.lasers.points_padding
points_xyz = tf.boolean_mask(points_xyz, points_mask)
points_feature = tf.boolean_mask(points_feature, points_mask)
points_full = tf.concat([points_xyz, points_feature], axis=-1)
points_grid_full, grid_centers, num_points = ops.point_to_grid(
points_full, p.num_points_per_cell, p.grid_size[0], p.grid_size[1],
p.grid_size[2], p.grid_range_x, p.grid_range_y, p.grid_range_z)
features.laser_grid = points_grid_full
features.grid_centers = grid_centers
features.grid_num_points = num_points
if p.normalize_td_labels:
# Normalize bboxes_td w.r.t grid range.
obb = features.labels
x_range = p.grid_range_x
y_range = p.grid_range_y
ymin, xmin, ymax, xmax = tf.unstack(obb.bboxes_td[..., :4], axis=-1)
ymin, xmin, ymax, xmax = self._NormalizeLabels(
ymin, xmin, ymax, xmax, x_range=x_range, y_range=y_range)
obb.bboxes_td = tf.concat(
[tf.stack([ymin, xmin, ymax, xmax], axis=-1), obb.bboxes_td[..., 4:]],
axis=-1)
return features
def TransformShapes(self, shapes):
p = self.params
shapes.grid_centers = tf.TensorShape(list(p.grid_size) + [3])
shapes.grid_num_points = tf.TensorShape(list(p.grid_size))
shapes.laser_grid = tf.TensorShape(
list(p.grid_size) +
[p.num_points_per_cell, 3 + shapes.lasers.points_feature[-1]])
return shapes
def TransformDTypes(self, dtypes):
dtypes.grid_centers = tf.float32
dtypes.grid_num_points = tf.int32
dtypes.laser_grid = tf.float32
return dtypes
class _PointPillarGridSettings:
"""Settings for PointPillars model defined in paper.
https://arxiv.org/abs/1812.05784
"""
# Chooses grid sizes that are a multiple of 16 to support point pillars
# model requirements. These also happen to match the values
# in the PointPillars paper (voxel width of 0.16m in x, y)
GRID_X = 432
GRID_Y = 496
GRID_Z = 1
# These fields are set in the subclasses.
GRID_X_RANGE = None
GRID_Y_RANGE = None
GRID_Z_RANGE = None
@classmethod
def UpdateGridParams(cls, grid_params):
"""Apply PointPillars settings to grid_params."""
grid_params.grid_size = (cls.GRID_X, cls.GRID_Y, cls.GRID_Z)
grid_params.grid_range_x = cls.GRID_X_RANGE
grid_params.grid_range_y = cls.GRID_Y_RANGE
grid_params.grid_range_z = cls.GRID_Z_RANGE
@classmethod
def UpdateAnchorGridParams(cls, anchor_params, output_stride=2):
"""Apply PointPillars settings to anchor_params."""
# Set anchor settings to match grid settings.
# Grid size for anchors is half the resolution.
anchor_params.grid_size = (cls.GRID_X // output_stride,
cls.GRID_Y // output_stride, cls.GRID_Z)
anchor_params.grid_range_x = cls.GRID_X_RANGE
anchor_params.grid_range_y = cls.GRID_Y_RANGE
# Grid along z axis should be pinned to 0.
anchor_params.grid_range_z = (0, 0)
def MakeGridSettings(grid_x_range, grid_y_range, grid_z_range, grid_x, grid_y,
grid_z):
"""Returns configured class for PointPillar grid settings."""
class GridSettings(_PointPillarGridSettings):
GRID_X_RANGE = grid_x_range
GRID_Y_RANGE = grid_y_range
GRID_Z_RANGE = grid_z_range
GRID_X = grid_x
GRID_Y = grid_y
GRID_Z = grid_z
return GridSettings
PointPillarGridCarSettings = MakeGridSettings(
grid_x_range=(0, 69.12),
grid_y_range=(-39.68, 39.68),
grid_z_range=(-3, 1),
grid_x=432,
grid_y=496,
grid_z=1)
PointPillarGridPedCycSettings = MakeGridSettings(
grid_x_range=(0, 47.36),
grid_y_range=(-19.84, 19.84),
grid_z_range=(-2.5, 0.5),
grid_x=432,
grid_y=496,
grid_z=1)
class GridToPillars(Preprocessor):
"""Create pillars from a grid of points.
Expects features to have keys:
grid_centers: [gx, gy, gz, 3]
grid_num_points: [gx, gy, gz]
laser_grid: [gx, gy, gz, num_points_per_cell, F]
Adds the following features:
point_count: [num_pillars]. The number of points in the pillar.
point_locations: [num_pillars, 3]. The grid location of each pillar.
pillar_points: [num_pillars, num_points_per_cell, F]. Points of each
pillar.
Drops the following features by default:
laser_grid
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_points_per_cell', 100,
'The maximum number of points per cell.')
p.Define('num_pillars', 12000, 'The maximum number of pillars to produce.')
p.Define('drop_laser_grid', True, 'Whether to drop the laser_grid feature.')
# The density based sampler is more expensive.
p.Define('use_density_sampler', False,
'Use a density based sampler during pillar selection.')
return p
def _GumbelTransform(self, probs):
"""Adds gumbel noise to log probabilities for multinomial sampling.
This enables fast sampling from a multinomial distribution without
replacement. See https://arxiv.org/abs/1611.01144 for details.
A colab that demonstrates this in practice is here:
http://colab/drive/1iuMt2n_r7dKPQG9T0UVMuK3fkbBayKjd
Args:
probs: A 1-D float tensor containing probabilities, summing to 1.
Returns:
A 1-D float tensor of the same size of probs, with gumbel noise added to
log probabilities. Taking the top k elements from this provides a
multinomial sample without replacement.
"""
p = self.params
log_prob = tf.math.log(probs)
probs_shape = tf.shape(probs)
uniform_samples = tf.random.uniform(
shape=probs_shape,
dtype=probs.dtype,
seed=p.random_seed,
name='uniform_samples')
gumbel_noise = -tf.math.log(-tf.math.log(uniform_samples))
return gumbel_noise + log_prob
def _DensitySample(self, num_points):
p = self.params
# Flatten to [nx * ny * nz] for convenience during sampling.
num_grid_points = np.prod(p.grid_size)
flattened_num_points = tf.reshape(num_points, [num_grid_points])
# Normalize flattened_num_points to sum to 1.
flattened_num_points = tf.cast(flattened_num_points, tf.float32)
flattened_num_points /= tf.reduce_sum(flattened_num_points)
# TODO(jngiam): Consider generalizing this to enable other methods of
# sampling: e.g., use largest deviation in z-axis. The gumbel transform
# can still be applied regardless.
# Add gumbel noise for multinomial sampling.
sampling_logits = self._GumbelTransform(flattened_num_points)
_, locations = tf.nn.top_k(
sampling_logits, k=min(p.num_pillars, num_grid_points))
# Unravel coordinates back to grid locations.
locations = tf.unravel_index(locations, p.grid_size)
# Unravel index will return a 3 x num_locations tensor, this needs to be
# transposed so that we have it as num_locations x 3.
locations = py_utils.HasShape(locations, [3, -1])
locations = tf.transpose(locations)
return locations
def TransformFeatures(self, features):
p = self.params
num_points = features.grid_num_points
if p.use_density_sampler:
locations = self._DensitySample(num_points)
else:
# Select non-empty cells uniformly at random.
locations = tf.random.shuffle(tf.cast(tf.where(num_points > 0), tf.int32))
num_features = py_utils.GetShape(features.laser_grid)[-1]
# [nx, ny, nz, np, 4] (x, y, z, f)
points = features.laser_grid
# [K, np, 4] (x, y, z, f)
points = tf.gather_nd(points, locations)
# [nx, ny, nz, 1, 3] (cx, cy, cz)
centers = features.grid_centers[..., tf.newaxis, :]
# [K, 1, 3] (cx, cy, cz)
centers = tf.gather_nd(centers, locations)
# NOTE: If there are fewer pillars than p.num_pillars, the following
# padding creates many 'fake' pillars at grid cell (0, 0, 0) with
# an all-zero pillar. Hopefully, the model can learn to ignore these.
#
# pillar_points[i, :, :] is the pillar located at pillar_locations[i, :3],
# and pillar_points[i, :, :] == points_grid_full[pillar_locations[i, :3]].
# for 0 <= i < pillar_count;
# pillar_locations[i, :3] are zero-ed, for i >= pillar_count.
features.pillar_count = tf.shape(locations)[0]
features.pillar_locations = py_utils.PadOrTrimTo(locations,
[p.num_pillars, 3])
features.pillar_points = py_utils.PadOrTrimTo(
points, [p.num_pillars, p.num_points_per_cell, num_features])
features.pillar_centers = py_utils.PadOrTrimTo(centers,
[p.num_pillars, 1, 3])
if p.drop_laser_grid:
del features['laser_grid']
return features
def TransformShapes(self, shapes):
p = self.params
num_features = shapes.laser_grid[-1]
shapes.pillar_count = tf.TensorShape([])
shapes.pillar_locations = tf.TensorShape([p.num_pillars, 3])
shapes.pillar_points = tf.TensorShape(
[p.num_pillars, p.num_points_per_cell, num_features])
shapes.pillar_centers = tf.TensorShape([p.num_pillars, 1, 3])
if p.drop_laser_grid:
del shapes['laser_grid']
return shapes
def TransformDTypes(self, dtypes):
p = self.params
dtypes.pillar_count = tf.int32
dtypes.pillar_locations = tf.int32
dtypes.pillar_points = tf.float32
dtypes.pillar_centers = tf.float32
if p.drop_laser_grid:
del dtypes['laser_grid']
return dtypes
class GridAnchorCenters(Preprocessor):
"""Create anchor centers on a grid.
Anchors are placed in the middle of each grid cell. For example, on a 2D grid
range (0 -> 10, 0 -> 10) with a 10 x 5 grid size, the anchors will be placed
at [(0.5, 1), (0.5, 3), ... , (9.5, 7), (9.5, 9)].
Adds the following features:
anchor_centers: [num_locations, 3] - Floating point output containing the
center (x, y, z) locations for tiling anchor boxes.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'grid_size', (20, 20, 1), 'Grid size along x,y,z axis. This will '
'be used to generate the anchor center locations. Note that this '
'would likely be different from the grid_* parameters in '
'LaserGridExtractor: the grid extractor may choose to extract '
'points more densely. Instead, this should correspond to the '
'model\'s prediction layer: the predicted anchor box residuals '
'should match this grid.')
p.Define('grid_range_x', (-25, 25), 'The x-axis range covered by the grid.')
p.Define('grid_range_y', (-25, 25), 'The y-axis range covered by the grid.')
p.Define('grid_range_z', (0, 0), 'The z-axis range covered by the grid.')
return p
def TransformFeatures(self, features):
p = self.params
utils_3d = detection_3d_lib.Utils3D()
# Compute the grid cell size and adjust the range sent to dense coordinates
# by half a cell size so as to ensure that the anchors are placed in the
# center of each grid cell.
grid_size_x, grid_size_y, grid_size_z = p.grid_size
grid_cell_sizes = [
float(p.grid_range_x[1] - p.grid_range_x[0]) / grid_size_x,
float(p.grid_range_y[1] - p.grid_range_y[0]) / grid_size_y,
float(p.grid_range_z[1] - p.grid_range_z[0]) / grid_size_z,
]
half_size_x, half_size_y, half_size_z = np.asarray(grid_cell_sizes) / 2.0
grid_shape = list(p.grid_size) + [3]
anchor_centers = utils_3d.CreateDenseCoordinates([
[
p.grid_range_x[0] + half_size_x,
p.grid_range_x[1] - half_size_x,
grid_size_x
],
[
p.grid_range_y[0] + half_size_y,
p.grid_range_y[1] - half_size_y,
grid_size_y
],
[
p.grid_range_z[0] + half_size_z,
p.grid_range_z[1] - half_size_z,
grid_size_z
],
]) # pyformat: disable
features.anchor_centers = tf.reshape(anchor_centers, grid_shape)
return features
def TransformShapes(self, shapes):
p = self.params
shapes.anchor_centers = tf.TensorShape(list(p.grid_size) + [3])
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_centers = tf.float32
return dtypes
class SparseCenterSelector(Preprocessor):
"""Select centers for anchors and cells.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
If lasers.num_seeded_points of shape [] is provided, it indicates that the
first num_seeded_points of lasers.points_xyz should be used as seeds for
farthest point sampling (e.g., always chosen). Currently the concept
of seeding is not implemented for anything but farthest point sampling.
Adds the following features:
anchor_centers: [num_cell_centers, 3] - Floating point output containing the
center (x, y, z) locations for tiling anchor boxes.
cell_center_xyz: [num_cell_centers, 3] - Floating point output containing
the center (x, y, z) locations for each cell to featurize.
"""
_SAMPLING_METHODS = ['farthest_point', 'random_uniform']
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_cell_centers', 256, 'Number of centers.')
p.Define(
'features_preparation_layers', [],
'A list of Params for layers to run on the features before '
'performing farthest point sampling. For example, one may wish to '
'drop points out of frustum for KITTI before selecting centers. '
'Note that these layers will not mutate the original features, '
'instead, a copy will be made.')
p.Define(
'sampling_method', 'farthest_point',
'Which sampling method to use. One of {}'.format(cls._SAMPLING_METHODS))
p.Define(
'fix_z_to_zero', True, 'Whether to fix z to 0 when retrieving the '
'center xyz coordinates.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.sampling_method not in self._SAMPLING_METHODS:
raise ValueError('Param `sampling_method` must be one of {}.'.format(
self._SAMPLING_METHODS))
if p.features_preparation_layers is not None:
self.CreateChildren('features_preparation_layers',
p.features_preparation_layers)
def _FarthestPointSampleCenters(self, points_xyz, num_seeded_points):
"""Samples centers with Farthest Point Sampling.
Args:
points_xyz: An unpadded tf.float32 Tensor of shape [P, 3] with per point
(x, y, z) locations. We expect any padded points to be removed before
this function is called.
num_seeded_points: integer indicating how many of the first
num_seeded_points points in points_xyz should be considered
as seeds for FPS (always chosen).
Returns:
A tf.float32 Tensor of shape [p.num_cell_centers, 3] with selected centers
to use as anchors.
"""
p = self.params
num_points = tf.shape(points_xyz)[0]
points_padding = tf.zeros((num_points,), dtype=tf.float32)
padded_num_points = tf.maximum(num_points, p.num_cell_centers)
# Pad both the points and padding if for some reason the input pointcloud
# has less points than p.num_cell_centers.
points_xy = py_utils.PadOrTrimTo(points_xyz[:, :2], [padded_num_points, 2])
points_padding = py_utils.PadOrTrimTo(
points_padding, [padded_num_points], pad_val=1.0)
sampled_idx, _ = car_lib.FarthestPointSampler(
points_xy[tf.newaxis, ...],
points_padding[tf.newaxis, ...],
p.num_cell_centers,
num_seeded_points=num_seeded_points,
random_seed=p.random_seed)
sampled_idx = sampled_idx[0, :]
# Gather centers.
if p.fix_z_to_zero:
centers = tf.concat([
tf.gather(points_xy, sampled_idx),
tf.zeros((p.num_cell_centers, 1)),
], axis=-1) # pyformat: disable
else:
centers = tf.gather(points_xyz, sampled_idx)
return centers
def _RandomUniformSampleCenters(self, points_xyz):
"""Samples centers with Random Uniform Sampling.
Args:
points_xyz: An unpadded tf.float32 Tensor of shape [P, 3] with per point
(x, y, z) locations. We expect any padded points to be removed before
this function is called.
Returns:
A tf.float32 Tensor of shape [p.num_cell_centers, 3] with selected centers
to use as anchors.
"""
p = self.params
# We want the center Z value to be 0 so just exclude it
centers_xy = tf.random.shuffle(points_xyz[:, :2], seed=p.random_seed)
selected_centers_xy = py_utils.PadOrTrimTo(centers_xy,
[p.num_cell_centers, 2])
return tf.concat([selected_centers_xy,
tf.zeros((p.num_cell_centers, 1))],
axis=-1)
def _SampleCenters(self, points_xyz, num_seeded_points):
p = self.params
if p.sampling_method == 'farthest_point':
return self._FarthestPointSampleCenters(points_xyz, num_seeded_points)
elif p.sampling_method == 'random_uniform':
if num_seeded_points > 0:
raise NotImplementedError(
'Random sampling with seeded points not yet implemented.')
return self._RandomUniformSampleCenters(points_xyz)
else:
raise ValueError('Param `sampling_method` must be one of {}.'.format(
self._SAMPLING_METHODS))
def TransformFeatures(self, features):
p = self.params
prepared_features = features.DeepCopy()
for prep_layer in self.features_preparation_layers:
prepared_features = prep_layer.FPropDefaultTheta(prepared_features)
num_seeded_points = prepared_features.lasers.get('num_seeded_points', 0)
points_data = prepared_features.lasers
points_xyz = points_data.points_xyz
if 'points_padding' in points_data:
points_padding = points_data.points_padding
points_mask = 1 - points_padding
points_xyz = tf.boolean_mask(points_xyz, points_mask)
centers = self._SampleCenters(points_xyz, num_seeded_points)
centers = py_utils.HasShape(centers, [p.num_cell_centers, 3])
features.anchor_centers = centers
features.cell_center_xyz = centers
return features
def TransformShapes(self, shapes):
p = self.params
shapes.anchor_centers = tf.TensorShape([p.num_cell_centers, 3])
shapes.cell_center_xyz = tf.TensorShape([p.num_cell_centers, 3])
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_centers = tf.float32
dtypes.cell_center_xyz = tf.float32
return dtypes
class SparseCellGatherFeatures(Preprocessor):
"""Select local features for each cell.
This preprocessor expects features to contain:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
- cell_center_xyz of shape [C, 3]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Adds the following features:
cell_points_xyz: [num_centers, num_points_per_cell, 3] - Floating point
output containing the (x, y, z) locations for each point for a given
center.
cell_feature: [num_centers, num_points_per_cell, F] - Floating point output
containing the features for each point for a given center.
cell_points_padding: [num_centers, num_points_per_cell] - 0/1 padding
for the points in each cell.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_points_per_cell', 128, 'The number of points per cell.')
p.Define('max_distance', 3.0, 'Max distance of point to cell center.')
p.Define(
'sample_neighbors_uniformly', False,
'Whether to sample the neighbor points for every cell center '
'uniformly at random. If False, this will default to selecting by '
'distance.')
return p
def TransformFeatures(self, features):
p = self.params
num_centers = py_utils.GetShape(features.cell_center_xyz, 1)[0]
num_features = py_utils.GetShape(features.lasers.points_feature)[-1]
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
if 'points_padding' in features.lasers:
points_mask = 1 - features.lasers.points_padding
points_xyz = tf.boolean_mask(points_xyz, points_mask)
points_feature = tf.boolean_mask(points_feature, points_mask)
# Note: points_xyz and points_feature must be unpadded as we pass
# padding=None to neighborhood indices. Ensuring that it is unpadded
# helps improve performance.
# Get nearby points using kNN.
sample_indices, sample_indices_padding = car_lib.NeighborhoodIndices(
tf.expand_dims(points_xyz, 0),
tf.expand_dims(features.cell_center_xyz, 0),
p.num_points_per_cell,
points_padding=None,
max_distance=p.max_distance,
sample_neighbors_uniformly=p.sample_neighbors_uniformly)
# Take first example since NeighboorhoodIndices expects batch dimension.
sample_indices = sample_indices[0, :, :]
sample_indices_padding = sample_indices_padding[0, :, :]
sample_indices = py_utils.HasShape(sample_indices,
[num_centers, p.num_points_per_cell])
cell_points_xyz = tf.gather(points_xyz, sample_indices)
cell_points_xyz = py_utils.HasShape(cell_points_xyz,
[num_centers, p.num_points_per_cell, 3])
cell_feature = tf.gather(points_feature, sample_indices)
cell_feature = py_utils.HasShape(
cell_feature, [num_centers, p.num_points_per_cell, num_features])
cell_points_padding = py_utils.HasShape(
sample_indices_padding, [num_centers, p.num_points_per_cell])
features.update({
'cell_points_xyz': cell_points_xyz,
'cell_feature': cell_feature,
'cell_points_padding': cell_points_padding,
})
return features
def TransformShapes(self, shapes):
p = self.params
num_centers = shapes.cell_center_xyz[0]
base_shape = [num_centers, p.num_points_per_cell]
num_features = shapes.lasers.points_feature[-1]
shapes.cell_points_xyz = tf.TensorShape(base_shape + [3])
shapes.cell_feature = tf.TensorShape(base_shape + [num_features])
shapes.cell_points_padding = tf.TensorShape(base_shape)
return shapes
def TransformDTypes(self, dtypes):
dtypes.cell_points_xyz = tf.float32
dtypes.cell_feature = tf.float32
dtypes.cell_points_padding = tf.float32
return dtypes
class SparseCellCentersTopK(Preprocessor):
"""Given selected centers and gathered points/features, apply a filter.
This preprocessor expects features to contain `cell_center_xyz` and all
entries in params.features_to_modify, and that the leading dimension should
all be the same (num_cell_centers from SparseCenterSelector).
We then modify all values in features that are specified in
params.features_to_modify by sorting them with the specified sort function
(specified by params.sort_by) operating on features.cell_center_xyz, and then
taking the top K (specified by params.num_cell_centers) along the first
dimension.
"""
_REGISTERED_SORT_FUNCTIONS = ['distance']
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_cell_centers', 512, 'The number of centers after filtering.')
p.Define(
'sort_by', 'distance', 'A string specifying which sort function '
'to use. Currently we just support `distance`.')
p.Define('features_to_modify', [
'cell_center_xyz', 'anchor_centers', 'cell_points_xyz', 'cell_feature',
'cell_points_padding'
], 'A list of keys from the features dict to modify.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.sort_by not in self._REGISTERED_SORT_FUNCTIONS:
raise ValueError('{} not supported. We only support {}.'.format(
p.sort_by, self._REGISTERED_SORT_FUNCTIONS))
if len(p.features_to_modify) < 1:
raise ValueError('Need to modify at least one feature.')
def _SortByDistance(self, features):
dist = tf.linalg.norm(features.cell_center_xyz, axis=-1)
return tf.argsort(dist, axis=-1, direction='ASCENDING')
def _Sort(self, features):
p = self.params
if p.sort_by == 'distance':
return self._SortByDistance(features)
else:
raise ValueError('Unsupported sort function: {}.'.format(p.sort_by))
def TransformFeatures(self, features):
p = self.params
sort_indices = self._Sort(features)
sort_indices_top_k = sort_indices[:p.num_cell_centers, ...]
# Gather each of the relevant items
for key in p.features_to_modify:
shape = py_utils.GetShape(features[key])
output_shape = [p.num_cell_centers] + shape[1:]
features[key] = py_utils.PadOrTrimTo(
tf.gather(features[key], sort_indices_top_k), output_shape)
return features
def TransformShapes(self, shapes):
p = self.params
for key in p.features_to_modify:
shapes[key] = tf.TensorShape([p.num_cell_centers] + shapes[key][1:])
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class TileAnchorBBoxes(Preprocessor):
"""Creates anchor_bboxes given anchor_centers.
This preprocessor expects features to contain the following keys:
- anchor_centers of shape [...base shape..., 3]
Adds the following features:
anchor_bboxes: base_shape + [7] - Floating point anchor box
output containing the anchor boxes and the 7 floating point
values for each box that define the box (x, y, z, dx, dy, dz, phi).
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('anchor_box_dimensions', [],
'List of anchor box sizes per center.')
p.Define('anchor_box_offsets', [], 'List of anchor box offsets per center.')
p.Define('anchor_box_rotations', [],
'List of anchor box rotations per center.')
return p
def TransformFeatures(self, features):
p = self.params
utils_3d = detection_3d_lib.Utils3D()
assert p.anchor_box_dimensions
assert p.anchor_box_offsets
assert p.anchor_box_rotations
base_shape = py_utils.GetShape(features.anchor_centers)[:-1]
num_box_per_center = len(p.anchor_box_dimensions)
anchor_centers = tf.reshape(features.anchor_centers, [-1, 3])
anchor_bboxes = utils_3d.MakeAnchorBoxes(
anchor_centers, tf.identity(p.anchor_box_dimensions),
tf.identity(p.anchor_box_offsets), tf.identity(p.anchor_box_rotations))
features.anchor_bboxes = tf.reshape(anchor_bboxes,
base_shape + [num_box_per_center, 7])
return features
def TransformShapes(self, shapes):
p = self.params
base_shape = shapes.anchor_centers[:-1]
num_box_per_center = len(p.anchor_box_dimensions)
shapes.anchor_bboxes = base_shape.concatenate([num_box_per_center, 7])
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_bboxes = tf.float32
return dtypes
class _AnchorBoxSettings:
"""Helper class to parameterize and update anchor box settings."""
# Implementations should fill out the following class members.
DIMENSION_PRIORS = []
ROTATIONS = []
CENTER_X_OFFSETS = []
CENTER_Y_OFFSETS = []
CENTER_Z_OFFSETS = []
@classmethod
def NumAnchors(cls):
return np.prod([
len(cls.DIMENSION_PRIORS),
len(cls.ROTATIONS),
len(cls.CENTER_X_OFFSETS),
len(cls.CENTER_Y_OFFSETS),
len(cls.CENTER_Z_OFFSETS)
])
@classmethod
def GenerateAnchorSettings(cls):
"""Generate anchor settings.
Returns:
A `NestedMap` containing three lists of the same length:
- anchor_box_dimensions
- anchor_box_rotations
- anchor_box_offsets
These can be used with the TileAnchorBBoxes preprocessor.
"""
anchor_box_dimensions = []
anchor_box_rotations = []
anchor_box_offsets = []
# The following is equivalent to a formulation of itertools.product, but
# is explicitly listed for readability.
# *Please note*: The ordering is important for ModelV2, which makes
# assumptions that the offset dimensions come first.
for cx in cls.CENTER_X_OFFSETS:
for cy in cls.CENTER_Y_OFFSETS:
for cz in cls.CENTER_Z_OFFSETS:
for rot in cls.ROTATIONS:
for dims in cls.DIMENSION_PRIORS:
anchor_box_dimensions += [dims]
anchor_box_rotations += [rot]
anchor_box_offsets += [(cx, cy, cz)]
# Check one of the lists has entries.
assert anchor_box_dimensions
return py_utils.NestedMap(
anchor_box_dimensions=anchor_box_dimensions,
anchor_box_rotations=anchor_box_rotations,
anchor_box_offsets=anchor_box_offsets)
@classmethod
def Update(cls, params):
"""Updates anchor box settings from input configuration lists.
Given dimensions priors, rotations, and offsets, computes the cartesian
product of the settings.
Args:
params: The KITTIAnchorExtractorBase.Params() object to update.
Returns:
Params updated with the anchor settings.
In total there are N combinations, where each (anchor_box_dimensions[i],
anchor_box_rotations[i], anchor_box_offsets[i]) for i in range(N) is an
option.
"""
p = params
settings = cls.GenerateAnchorSettings()
p.anchor_box_dimensions = settings.anchor_box_dimensions
p.anchor_box_rotations = settings.anchor_box_rotations
p.anchor_box_offsets = settings.anchor_box_offsets
return p
def MakeAnchorBoxSettings(dimension_priors, rotations, center_x_offsets,
center_y_offsets, center_z_offsets):
"""Returns a configured class for setting anchor box settings."""
class CustomAnchorBoxSettings(_AnchorBoxSettings):
DIMENSION_PRIORS = dimension_priors
ROTATIONS = rotations
CENTER_X_OFFSETS = center_x_offsets
CENTER_Y_OFFSETS = center_y_offsets
CENTER_Z_OFFSETS = center_z_offsets
return CustomAnchorBoxSettings
class SparseCarV1AnchorBoxSettings(_AnchorBoxSettings):
"""Anchor box settings for training on Cars for Sparse models."""
# Borrowed from PointPillar dimension prior for cars.
DIMENSION_PRIORS = [(1.6, 3.9, 1.56)]
# 4 Rotations with axis aligned and both diagonals.
ROTATIONS = [0, np.pi / 2, np.pi / 4, 3 * np.pi / 4]
# 25 offsets per anchor box with fixed z offset at -1.
CENTER_X_OFFSETS = np.linspace(-1.5, 1.5, 5)
CENTER_Y_OFFSETS = np.linspace(-1.5, 1.5, 5)
CENTER_Z_OFFSETS = [-1.]
class PointPillarAnchorBoxSettingsCar(_AnchorBoxSettings):
DIMENSION_PRIORS = [(1.6, 3.9, 1.56)]
ROTATIONS = [0, np.pi / 2]
# Fixed offset for every anchor box, based on a reading of the paper / code
# 0 offsets for x and y, and -1 for z.
CENTER_X_OFFSETS = [0.]
CENTER_Y_OFFSETS = [0.]
CENTER_Z_OFFSETS = [-1.]
class PointPillarAnchorBoxSettingsPed(PointPillarAnchorBoxSettingsCar):
DIMENSION_PRIORS = [(0.6, 0.8, 1.73)]
CENTER_Z_OFFSETS = [-0.6]
class PointPillarAnchorBoxSettingsCyc(PointPillarAnchorBoxSettingsCar):
DIMENSION_PRIORS = [(0.6, 1.76, 1.73)]
CENTER_Z_OFFSETS = [-0.6]
class PointPillarAnchorBoxSettingsPedCyc(PointPillarAnchorBoxSettingsCar):
DIMENSION_PRIORS = [(0.6, 0.8, 1.7), (0.6, 1.76, 1.73)]
CENTER_Z_OFFSETS = [-0.6]
class AnchorAssignment(Preprocessor):
"""Perform anchor assignment on the features.
This preprocessor expects features to contain the following keys:
- anchor_bboxes of shape [...base shape..., 7]
- labels.bboxes_3d
- labels.labels
- labels.bboxes_3d_mask
Adds the following features:
anchor_localization_residuals: base_shape + [7] floating point tensor of
residuals. The model is expected to regress against these residuals as
targets. The residuals can be converted back into bboxes using
detection_3d_lib.Utils3D.ResidualsToBBoxes.
assigned_gt_idx: base_shape - The corresponding index of the ground
truth bounding box for each anchor box in anchor_bboxes, anchors not
assigned will have idx be set to -1.
assigned_gt_bbox: base_shape + [7] - The corresponding ground
truth bounding box for each anchor box in anchor_bboxes.
assigned_gt_labels: base_shape - The assigned groundtruth label
for each anchor box.
assigned_gt_similarity_score: base_shape - The similarity score
for each assigned anchor box.
assigned_cls_mask: base_shape mask for classification loss per anchor.
This should be 1.0 if the anchor has a foreground or background
assignment; otherwise, it will be assigned to 0.0.
assigned_reg_mask: base_shape mask for regression loss per anchor.
This should be 1.0 if the anchor has a foreground assignment;
otherwise, it will be assigned to 0.0.
Note: background anchors do not have regression targets.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'foreground_assignment_threshold', 0.5,
'Score (usually IOU) threshold for assigning a box as foreground.')
p.Define(
'background_assignment_threshold', 0.35,
'Score (usually IOU) threshold for assigning a box as background.')
return p
def TransformFeatures(self, features):
p = self.params
utils_3d = detection_3d_lib.Utils3D()
# anchor_bboxes will be returned with shape [#centers, #boxes_per_center, 7]
# flatten boxes here for matching.
base_shape = py_utils.GetShape(features.anchor_bboxes)[:-1]
anchor_bboxes = tf.reshape(features.anchor_bboxes, [-1, 7])
assigned_anchors = utils_3d.AssignAnchors(
anchor_bboxes,
features.labels.bboxes_3d,
features.labels.labels,
features.labels.bboxes_3d_mask,
foreground_assignment_threshold=p.foreground_assignment_threshold,
background_assignment_threshold=p.background_assignment_threshold)
# Add new features.
features.assigned_gt_idx = tf.reshape(assigned_anchors.assigned_gt_idx,
base_shape)
features.assigned_gt_bbox = tf.reshape(assigned_anchors.assigned_gt_bbox,
base_shape + [7])
features.assigned_gt_labels = tf.reshape(
assigned_anchors.assigned_gt_labels, base_shape)
features.assigned_gt_similarity_score = tf.reshape(
assigned_anchors.assigned_gt_similarity_score, base_shape)
features.assigned_cls_mask = tf.reshape(assigned_anchors.assigned_cls_mask,
base_shape)
features.assigned_reg_mask = tf.reshape(assigned_anchors.assigned_reg_mask,
base_shape)
# Compute residuals.
features.anchor_localization_residuals = utils_3d.LocalizationResiduals(
features.anchor_bboxes, features.assigned_gt_bbox)
return features
def TransformShapes(self, shapes):
base_shape = shapes.anchor_bboxes[:-1]
box_shape = base_shape.concatenate([7])
shapes.anchor_localization_residuals = box_shape
shapes.assigned_gt_idx = base_shape
shapes.assigned_gt_bbox = box_shape
shapes.assigned_gt_labels = base_shape
shapes.assigned_gt_similarity_score = base_shape
shapes.assigned_cls_mask = base_shape
shapes.assigned_reg_mask = base_shape
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_localization_residuals = tf.float32
dtypes.assigned_gt_idx = tf.int32
dtypes.assigned_gt_bbox = tf.float32
dtypes.assigned_gt_labels = tf.int32
dtypes.assigned_gt_similarity_score = tf.float32
dtypes.assigned_cls_mask = tf.float32
dtypes.assigned_reg_mask = tf.float32
return dtypes
class DropLaserPointsOutOfRange(Preprocessor):
"""Drops laser points that are out of pre-defined x/y/z ranges.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Modifies the following features:
Removes or sets padding to 1 for all points outside a given range. Modifies
all items in the lasers subdictionary like lasers.points_xyz,
lasers.points_feature, lasers.points_padding, and optionally
lasers.points_label, lasers.points_bbox_id.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keep_x_range', (-np.inf, np.inf),
'Only points that have x coordinates within this range are kept.')
p.Define('keep_y_range', (-np.inf, np.inf),
'Only points that have y coordinates within this range are kept.')
p.Define(
'keep_z_range', (-np.inf, np.inf),
'Only points that have z coordinates within this range are kept. '
'Approximate ground-removal can be performed by specifying a '
'lower-bound on the z-range.')
return p
def TransformFeatures(self, features):
p = self.params
points_xyz = features.lasers.points_xyz
if 'points_padding' in features.lasers:
points_mask = tf.cast(1 - features.lasers.points_padding, tf.bool)
else:
# All points are real, we keep points unpadded by applying boolean_mask
# on points_mask later.
points_mask = tf.ones_like(points_xyz[:, 0], dtype=tf.bool)
min_x, max_x = p.keep_x_range
min_y, max_y = p.keep_y_range
min_z, max_z = p.keep_z_range
# Short-circuit if all ranges are set to -inf, inf.
if (np.all(np.isneginf([min_x, min_y, min_z])) and
np.all(np.isposinf([max_x, max_y, max_z]))):
return features
if min_x != -np.inf:
points_mask &= points_xyz[:, 0] >= min_x
if min_y != -np.inf:
points_mask &= points_xyz[:, 1] >= min_y
if min_z != -np.inf:
points_mask &= points_xyz[:, 2] >= min_z
if max_x != np.inf:
points_mask &= points_xyz[:, 0] <= max_x
if max_y != np.inf:
points_mask &= points_xyz[:, 1] <= max_y
if max_z != np.inf:
points_mask &= points_xyz[:, 2] <= max_z
if 'points_padding' in features.lasers:
# Suffices to just update the padding.
features.lasers.points_padding = 1. - tf.cast(points_mask, tf.float32)
else:
features.lasers = features.lasers.Transform(
_GetApplyPointMaskFn(points_mask))
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class KITTIDropPointsOutOfFrustum(Preprocessor):
"""Drops laser points that are outside of the camera frustum.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
- images.velo_to_image_plane of shape [3, 4]
- images.width of shape [1]
- images.height of shape [1]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Modifies the following features:
lasers.points_xyz, lasers.points_feature, lasers.points_padding, and
optionally lasers.points_label, lasers.points_bbox_id so that
points outside the frustum have padding set to 1 or are removed.
"""
def TransformFeatures(self, features):
# Drop points behind the car (behind x-axis = 0).
images = features.images
front_indices = features.lasers.points_xyz[:, 0] >= 0
if 'points_padding' not in features.lasers:
# Keep tensors unpadded and small using boolean_mask.
features.lasers.points_xyz = tf.boolean_mask(features.lasers.points_xyz,
front_indices)
features.lasers.points_feature = tf.boolean_mask(
features.lasers.points_feature, front_indices)
# Drop those points outside the image plane.
points_image = geometry.PointsToImagePlane(features.lasers.points_xyz,
images.velo_to_image_plane)
in_image_plane = (
(points_image[:, 0] >= 0) &
(points_image[:, 0] <= tf.cast(images.width, tf.float32)) &
(points_image[:, 1] >= 0) &
(points_image[:, 1] <= tf.cast(images.height, tf.float32)))
if 'points_padding' in features.lasers:
# Update padding to only include front indices and in image plane.
points_mask = tf.cast(1 - features.lasers.points_padding, tf.bool)
points_mask &= front_indices
points_mask &= in_image_plane
features.lasers.points_padding = 1. - tf.cast(points_mask, tf.float32)
else:
features.lasers = features.lasers.Transform(
_GetApplyPointMaskFn(in_image_plane))
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomWorldRotationAboutZAxis(Preprocessor):
"""Rotates the world randomly as a form of data augmentation.
Rotations are performed around the *z-axis*. This assumes that the car is
always level. In general, we'd like to instead rotate the car on the spot,
this would then make sense for cases where the car is on a slope.
When there are leading dimensions, this will rotate the boxes with the same
transformation across all the frames. This is useful when the input is a
sequence of frames from the same run segment.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [..., 3]
- labels.bboxes_3d of shape [..., 7]
Modifies the following features:
lasers.points_xyz, labels.bboxes_3d with the same rotation applied to both.
Adds the following features:
world_rot_z which contains the rotation applied to the example.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'max_rotation', None,
'The rotation amount will be randomly picked from '
'[-max_rotation, max_rotation).')
p.Define(
'include_world_rot_z', True,
'Whether to include the applied rotation as an additional tensor. '
'It can be helpful to disable this when using the preprocessor in a '
'way that expects the structure of the features to be the same '
'(e.g., as a branch in tf.cond).')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.max_rotation is None:
raise ValueError('max_rotation needs to be specified, instead of None.')
def TransformFeatures(self, features):
p = self.params
rot = tf.random.uniform((),
minval=-p.max_rotation,
maxval=p.max_rotation,
seed=p.random_seed)
# Rotating about the z-axis is equal to experiencing yaw.
pose = [0., 0., 0., rot, 0., 0.]
# Rotate points.
features.lasers.points_xyz = geometry.CoordinateTransform(
features.lasers.points_xyz, pose)
# Rotate bboxes, note that heading has a special case.
bboxes_xyz = features.labels.bboxes_3d[..., :3]
bboxes_dims = features.labels.bboxes_3d[..., 3:6]
bboxes_rot = features.labels.bboxes_3d[..., 6:]
bboxes_xyz = geometry.CoordinateTransform(bboxes_xyz, pose)
# The heading correction should subtract rot from the bboxes rotations.
bboxes_rot = geometry.WrapAngleRad(bboxes_rot - rot)
features.labels.bboxes_3d = tf.concat([bboxes_xyz, bboxes_dims, bboxes_rot],
axis=-1)
if p.include_world_rot_z:
features.world_rot_z = rot
return features
def TransformShapes(self, shapes):
if self.params.include_world_rot_z:
shapes.world_rot_z = tf.TensorShape([])
return shapes
def TransformDTypes(self, dtypes):
if self.params.include_world_rot_z:
dtypes.world_rot_z = tf.float32
return dtypes
class DropPointsOutOfFrustum(Preprocessor):
"""Drops points outside of pre-defined theta / phi ranges.
Note that the ranges for keep_phi_range can be negative, this is because the
phi values wrap around 2*pi. Thus, a valid range that filters the 90 deg
frontal field of view of the car can be specified as [-pi/4, pi/4].
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
Modifies the following features:
- lasers.points_xyz removing any points out of frustum.
- lasers.points_feature removing any points out of frustum.
Note: We expect a downstream processor that filters out boxes with few points
to drop the corresponding bboxes.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keep_theta_range', (0., np.pi),
'Only points that have theta coordinates within this range.')
p.Define('keep_phi_range', (0., 2. * np.pi),
'Only points that have phi coordinates within this range.')
return p
def TransformFeatures(self, features):
p = self.params
if 'points_padding' in features.lasers:
raise ValueError('DropPointsOutOfFrustum preprocessor does not support '
'padded lasers.')
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
min_theta, max_theta = p.keep_theta_range
if (min_theta < 0. or min_theta > np.pi or max_theta < 0. or
max_theta > np.pi):
raise ValueError('Valid values for theta are between 0 and pi, '
'keep_theta_range={}'.format(p.keep_theta_range))
if min_theta > max_theta:
raise ValueError('min_theta must be <= max_theta, '
'keep_theta_range={}'.format(p.keep_theta_range))
min_phi, max_phi = p.keep_phi_range
if (min_phi < -2. * np.pi or min_phi > 2. * np.pi or
max_phi < -2. * np.pi or max_phi > 2. * np.pi):
raise ValueError('Valid values for phi are between -2*pi and 2*pi,'
'keep_phi_range={}'.format(p.keep_phi_range))
if min_phi > max_phi:
raise ValueError('min_phi must be <= max_phi, '
'keep_phi_range={}'.format(p.keep_phi_range))
_, theta, phi = tf.unstack(
geometry.SphericalCoordinatesTransform(points_xyz), axis=-1)
# phi is returned in range [-pi, pi], we shift the values which are between
# [-pi, 0] to be [pi, 2pi] instead to make the logic below easier to follow.
# Hence, all phi values after this will be [0, 2pi].
phi = tf.where(phi >= 0., phi, 2. * np.pi + phi)
# Theta does not have circular boundary conditions, a simple check suffices.
points_mask = (theta >= min_theta) & (theta <= max_theta)
if min_phi < 0. and max_phi < 0.:
# Both are less than zero, we just just add 2pi and will use the regular
# check.
min_phi += 2. * np.pi
max_phi += 2. * np.pi
if min_phi < 0.:
# The minimum threshold is below 0, so we split into checking between
# (0 to min_phi) and (0 to max_phi). Note that min_phi is negative, but
# phi is always positive, so we take 2*pi + min_phi to get the range of
# appropriate values.
points_mask &= (phi >= (2. * np.pi + min_phi)) | (phi <= max_phi)
else:
# Both must be greater than 0 if we get to this condition.
assert min_phi >= 0.
assert max_phi >= 0.
points_mask &= (phi >= min_phi) & (phi <= max_phi)
features.lasers.points_xyz = tf.boolean_mask(points_xyz, points_mask)
features.lasers.points_feature = tf.boolean_mask(points_feature,
points_mask)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class DropBoxesOutOfRange(Preprocessor):
"""Drops boxes outside of pre-defined x/y/z ranges (boundaries inclusive).
This preprocessor expects features to contain the following keys:
- labels.bboxes_3d of shape [N, 7]
- labels.bboxes_3d_mask of shape [N]
Modifies the following features:
- labels.bboxes_3d_mask to mask out any additional boxes.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keep_x_range', (-np.inf, np.inf),
'Only boxes that have x coordinates within this range are kept.')
p.Define('keep_y_range', (-np.inf, np.inf),
'Only boxes that have y coordinates within this range are kept.')
p.Define('keep_z_range', (-np.inf, np.inf),
'Only boxes that have z coordinates within this range are kept.')
return p
def TransformFeatures(self, features):
p = self.params
min_x, max_x = p.keep_x_range
min_y, max_y = p.keep_y_range
min_z, max_z = p.keep_z_range
# Short-circuit if all ranges are set to -inf, inf.
if (np.all(np.isneginf([min_x, min_y, min_z])) and
np.all(np.isposinf([max_x, max_y, max_z]))):
return features
# For each bounding box, compute whether any of its extrema
# fall outside of the range.
bboxes_3d_corners = geometry.BBoxCorners(
features.labels.bboxes_3d[tf.newaxis, ...])[0]
bboxes_3d_corners = py_utils.HasShape(bboxes_3d_corners, [-1, 8, 3])
min_bbox_x = tf.reduce_min(bboxes_3d_corners[:, :, 0], axis=-1)
max_bbox_x = tf.reduce_max(bboxes_3d_corners[:, :, 0], axis=-1)
min_bbox_y = tf.reduce_min(bboxes_3d_corners[:, :, 1], axis=-1)
max_bbox_y = tf.reduce_max(bboxes_3d_corners[:, :, 1], axis=-1)
min_bbox_z = tf.reduce_min(bboxes_3d_corners[:, :, 2], axis=-1)
max_bbox_z = tf.reduce_max(bboxes_3d_corners[:, :, 2], axis=-1)
mask = (
tf.math.logical_and(min_bbox_x >= min_x, max_bbox_x <= max_x)
& tf.math.logical_and(min_bbox_y >= min_y, max_bbox_y <= max_y)
& tf.math.logical_and(min_bbox_z >= min_z, max_bbox_z <= max_z))
max_num_boxes = py_utils.GetShape(features.labels.bboxes_3d_mask)
mask = py_utils.HasShape(mask, max_num_boxes)
features.labels.bboxes_3d_mask *= tf.cast(mask, tf.float32)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class PadLaserFeatures(Preprocessor):
"""Pads laser features so that the dimensions are fixed.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Modifies the following features:
lasers.points_xyz and lasers.points_feature to add padding.
Optionally also modifies lasers.points_label and lasers.points_bbox_id
if they exist to add padding.
Modifies/adds the following features:
labels.points_padding of shape [P] representing the padding.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('max_num_points', 128500,
'Max number of points to pad the points to.')
return p
def TransformFeatures(self, features):
p = self.params
if 'points_padding' in features.lasers:
points_mask = 1 - features.lasers.points_padding
points_mask = tf.cast(points_mask, tf.bool)
features.lasers = features.lasers.Transform(
_GetApplyPointMaskFn(points_mask))
npoints = tf.shape(features.lasers.points_xyz)[0]
features.lasers.points_padding = tf.ones([npoints])
shuffled_idx = tf.range(npoints)
shuffled_idx = tf.random.shuffle(shuffled_idx, seed=p.random_seed)
def _PadOrTrimFn(points_tensor):
# Shuffle before trimming so we have a random sampling
points_tensor = tf.gather(points_tensor, shuffled_idx)
return py_utils.PadOrTrimTo(points_tensor, [p.max_num_points] +
points_tensor.shape[1:].as_list())
features.lasers = features.lasers.Transform(_PadOrTrimFn)
features.lasers.points_padding = 1.0 - features.lasers.points_padding
return features
def TransformShapes(self, shapes):
p = self.params
def _TransformShape(points_shape):
return tf.TensorShape([p.max_num_points] + points_shape[1:].as_list())
shapes.lasers = shapes.lasers.Transform(_TransformShape)
shapes.lasers.points_padding = tf.TensorShape([p.max_num_points])
return shapes
def TransformDTypes(self, dtypes):
dtypes.lasers.points_padding = tf.float32
return dtypes
class WorldScaling(Preprocessor):
"""Scale the world randomly as a form of data augmentation.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- labels.bboxes_3d of shape [L, 7]
Modifies the following features:
lasers.points_xyz, labels.bboxes_3d with the same scaling applied to both.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('scaling', None, 'The scaling range.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.scaling is None:
raise ValueError('scaling needs to be specified, instead of None.')
if len(p.scaling) != 2:
raise ValueError('scaling needs to be a list of two elements.')
def TransformFeatures(self, features):
p = self.params
scaling = tf.random.uniform((),
minval=p.scaling[0],
maxval=p.scaling[1],
seed=p.random_seed,
dtype=features.lasers.points_xyz.dtype)
# Scale points [num_points, 3].
features.lasers.points_xyz *= scaling
# Scaling bboxes (location and dimensions).
bboxes_xyz = features.labels.bboxes_3d[..., :3] * scaling
bboxes_dims = features.labels.bboxes_3d[..., 3:6] * scaling
bboxes_rot = features.labels.bboxes_3d[..., 6:]
features.labels.bboxes_3d = tf.concat([bboxes_xyz, bboxes_dims, bboxes_rot],
axis=-1)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomDropLaserPoints(Preprocessor):
"""Randomly dropout laser points and the corresponding features.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
Modifies the following features:
lasers.points_xyz, lasers.points_feature.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keep_prob', 0.95, 'Probability for keeping points.')
return p
def TransformFeatures(self, features):
p = self.params
num_points, _ = py_utils.GetShape(features.lasers.points_xyz)
pts_keep_sample_prob = tf.random.uniform([num_points],
minval=0,
maxval=1,
seed=p.random_seed)
pts_keep_mask = pts_keep_sample_prob < p.keep_prob
if 'points_padding' in features.lasers:
# Update points_padding so that where pts_keep_mask is True,
# points_padding remains 0.
points_mask = 1 - features.lasers.points_padding
points_mask *= tf.cast(pts_keep_mask, tf.float32)
features.lasers.points_padding = 1 - points_mask
else:
features.lasers.points_xyz = tf.boolean_mask(features.lasers.points_xyz,
pts_keep_mask)
features.lasers.points_feature = tf.boolean_mask(
features.lasers.points_feature, pts_keep_mask)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomFlipY(Preprocessor):
"""Flip the world along axis Y as a form of data augmentation.
When there are leading dimensions, this will flip the boxes with the same
transformation across all the frames. This is useful when the input is a
sequence of frames from the same run segment.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [..., 3]
- labels.bboxes_3d of shape [..., 7]
Modifies the following features:
lasers.points_xyz, labels.bboxes_3d with the same flipping applied to both.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('flip_probability', 0.5, 'Probability of flipping.')
return p
def TransformFeatures(self, features):
p = self.params
threshold = 1. - p.flip_probability
choice = tf.random.uniform(
(), minval=0.0, maxval=1.0, seed=p.random_seed) >= threshold
# Flip points
points_xyz = features.lasers.points_xyz
points_y = tf.where(choice, -points_xyz[..., 1:2], points_xyz[..., 1:2])
features.lasers.points_xyz = tf.concat(
[points_xyz[..., 0:1], points_y, points_xyz[..., 2:3]], axis=-1)
# Flip boxes
bboxes_xyz = features.labels.bboxes_3d[..., :3]
bboxes_y = tf.where(choice, -bboxes_xyz[..., 1:2], bboxes_xyz[..., 1:2])
bboxes_xyz = tf.concat(
[bboxes_xyz[..., 0:1], bboxes_y, bboxes_xyz[..., 2:3]], axis=-1)
# Compensate rotation.
bboxes_dims = features.labels.bboxes_3d[..., 3:6]
bboxes_rot = features.labels.bboxes_3d[..., 6:]
bboxes_rot = tf.where(choice, geometry.WrapAngleRad(-bboxes_rot),
bboxes_rot)
features.labels.bboxes_3d = tf.concat([bboxes_xyz, bboxes_dims, bboxes_rot],
axis=-1)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class GlobalTranslateNoise(Preprocessor):
"""Add global translation noise of xyz coordinates to points and boxes.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- labels.bboxes_3d of shape [L, 7]
Modifies the following features:
lasers.points_xyz, labels.bboxes_3d with the same
random translation noise applied to both.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('noise_std', [0.2, 0.2, 0.2],
'Standard deviation of translation noise per axis.')
return p
def TransformFeatures(self, features):
p = self.params
# Use three different seeds but the same base seed so
# that the values are different.
base_seed = p.random_seed
x_seed = base_seed
y_seed = None if base_seed is None else base_seed + 1
z_seed = None if base_seed is None else base_seed + 2
random_translate_x = tf.random.normal((),
mean=0.0,
stddev=p.noise_std[0],
seed=x_seed)
random_translate_y = tf.random.normal((),
mean=0.0,
stddev=p.noise_std[1],
seed=y_seed)
random_translate_z = tf.random.normal((),
mean=0.0,
stddev=p.noise_std[2],
seed=z_seed)
pose = tf.stack([
random_translate_x, random_translate_y, random_translate_z, 0.0, 0.0,
0.0
],
axis=0)
# Translate points.
points_xyz = features.lasers.points_xyz
features.lasers.points_xyz = geometry.CoordinateTransform(points_xyz, pose)
# Translate boxes
bboxes_xyz = features.labels.bboxes_3d[..., :3]
bboxes_xyz = geometry.CoordinateTransform(bboxes_xyz, pose)
features.labels.bboxes_3d = tf.concat(
[bboxes_xyz, features.labels.bboxes_3d[..., 3:]], axis=-1)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomBBoxTransform(Preprocessor):
"""Randomly transform bounding boxes and the points inside them.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
- lasers.points_padding of shape [P]
- labels.bboxes_3d of shape [L, 7]
- labels.bboxes_3d_mask of shape [L]
Modifies the following features:
lasers.points_{xyz,feature,padding}, labels.bboxes_3d with the
transformed bounding boxes and points.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'max_rotation', None,
'The rotation amount will be randomly picked from '
'[-max_rotation, max_rotation).')
# At the moment we don't use this because it can cause boxes to collide with
# each other. We need to compute box intersections when deciding whether to
# apply the translation jitter. Theoretically we should also do this for
# rotation.
p.Define('noise_std', [0.0, 0.0, 0.0],
'Standard deviation of translation noise per axis.')
p.Define(
'max_scaling', None,
'An optional float list of length 3. When max_scaling is not none, '
'delta parameters s_x, s_y, s_z are drawn from '
'[-max_scaling[i], max_scaling[i]] where i is in [0, 2].')
p.Define(
'max_shearing', None,
'An optional float list of length 6. When max_shearing is not none, '
'shearing parameters sh_x^y, sh_x^z, sh_y^x, sh_y^z, sh_z^x, sh_z^y are'
'drawn from [-max_shearing[i], max_shearing[i]], where i is in [0, 5].')
p.Define(
'max_num_points_per_bbox', 16384,
'The maximum number of points that fall within a bounding box. '
'Bounding boxes with more points than this value will '
'have some points droppped.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.max_rotation is None:
raise ValueError('max_rotation needs to be specified, instead of None.')
if p.max_scaling is not None:
if len(p.max_scaling) != 3:
raise ValueError('max_scaling needs to be specified as either None or '
'list of 3 floating point numbers, instead of {}.'
''.format(p.max_scaling))
if p.max_shearing is not None:
if len(p.max_shearing) != 6:
raise ValueError('max_shearing needs to be specified as either None or '
'list of 6 floating point numbers, instead of {}.'
''.format(p.max_shearing))
def _Foreground(self, features, points_xyz, points_feature, real_bboxes_3d,
points_in_bbox_mask, rotation, translate_pose, transform_fn):
"""Extract and transform foreground points and features."""
out_bbox_xyz, out_bbox_feature, out_bbox_mask = self._ForLoopBuffers(
features)
# Only iterate over the actual number of boxes in the scene.
actual_num_bboxes = tf.reduce_sum(
tf.cast(features.labels.bboxes_3d_mask, tf.int32))
ret = py_utils.ForLoop(
body=transform_fn,
start=0,
limit=actual_num_bboxes,
delta=1,
loop_state=py_utils.NestedMap(
points_xyz=points_xyz,
points_feature=points_feature,
bboxes_3d=real_bboxes_3d,
points_in_bbox_mask=points_in_bbox_mask,
rotation=rotation,
translate_pose=translate_pose,
out_bbox_points=out_bbox_xyz,
out_bbox_feature=out_bbox_feature,
out_bbox_mask=out_bbox_mask))
# Gather all of the transformed points and features
out_bbox_xyz = tf.reshape(ret.out_bbox_points, [-1, 3])
num_features = features.lasers.points_feature.shape[-1]
out_bbox_feature = tf.reshape(ret.out_bbox_feature, [-1, num_features])
out_bbox_mask = tf.cast(tf.reshape(ret.out_bbox_mask, [-1]), tf.bool)
fg_xyz = tf.boolean_mask(out_bbox_xyz, out_bbox_mask)
fg_feature = tf.boolean_mask(out_bbox_feature, out_bbox_mask)
return fg_xyz, fg_feature
def _Background(self, points_xyz, points_feature, points_in_bbox_mask):
# If a point is in any bounding box, it is a foreground point.
foreground_points_mask = tf.reduce_any(points_in_bbox_mask, axis=-1)
# All others are background. We rotate all of the foreground points to
# final_points_* and keep the background points unchanged
background_points_mask = tf.math.logical_not(foreground_points_mask)
background_points_xyz = tf.boolean_mask(points_xyz, background_points_mask)
background_points_feature = tf.boolean_mask(points_feature,
background_points_mask)
return background_points_xyz, background_points_feature
def _ForLoopBuffers(self, features):
"""Create and return the buffers for the for loop."""
p = self.params
bboxes_3d = features.labels.bboxes_3d
# Compute the shapes and create the buffers for the For loop.
max_num_bboxes = tf.shape(bboxes_3d)[0]
per_box_shape = [max_num_bboxes, p.max_num_points_per_bbox, 3]
out_bbox_points = inplace_ops.empty(
per_box_shape, dtype=tf.float32, init=True)
num_features = features.lasers.points_feature.shape[-1]
bbox_feature_shape = [
max_num_bboxes, p.max_num_points_per_bbox, num_features
]
out_bbox_feature = inplace_ops.empty(
bbox_feature_shape, dtype=tf.float32, init=True)
per_box_mask_shape = [max_num_bboxes, p.max_num_points_per_bbox]
out_bbox_mask = inplace_ops.empty(
per_box_mask_shape, dtype=tf.float32, init=True)
return out_bbox_points, out_bbox_feature, out_bbox_mask
def TransformFeatures(self, features):
p = self.params
num_features = features.lasers.points_feature.shape[-1]
def Transform(i, state):
"""Transform the points in bounding box `i`."""
state.points_xyz = tf.reshape(state.points_xyz, [-1, 3])
bbox_mask = tf.reshape(state.points_in_bbox_mask[:, i], [-1])
# Fetch only the points in the bounding box.
points_xyz_masked = tf.boolean_mask(state.points_xyz, bbox_mask)
points_feature_masked = tf.boolean_mask(state.points_feature, bbox_mask)
num_points = tf.shape(points_xyz_masked)[0]
# TODO(vrv): Fold the following into a single transformation
# matrix.
#
# Translate the box to the origin, then rotate the desired
# rotation angle.
translation_vec = state.bboxes_3d[i, 0:3]
rotation_vec = [state.rotation[i], 0., 0.]
pose = tf.concat([-translation_vec, rotation_vec], axis=0)
points_xyz_adj = geometry.CoordinateTransform(points_xyz_masked, pose)
if p.max_scaling is not None or p.max_shearing is not None:
# Translate the points in the bounding box by moving dz/2 so that the
# bottom of the bounding box is at Z = 0 when any of the two
# (max_scaling or max_shearing) is not None
translation_scale_or_shear = tf.stack(
[0., 0., state.bboxes_3d[i, 5] / 2], axis=0)
pose1 = tf.concat([translation_scale_or_shear, [0., 0., 0.]], axis=0)
points_xyz_adj = geometry.CoordinateTransform(points_xyz_adj, pose1)
else:
translation_scale_or_shear = tf.stack([0., 0., 0.], axis=0)
if p.max_scaling is not None:
# Perform scaling to the point cloud
# Scaling matrix
# [[s_x+1 0 0]
# [ 0 s_y+1 0]
# [ 0 0 s_z+1]]
sx = tf.random.uniform([],
minval=-p.max_scaling[0],
maxval=p.max_scaling[0],
seed=p.random_seed)
sy = tf.random.uniform([],
minval=-p.max_scaling[1],
maxval=p.max_scaling[1],
seed=p.random_seed)
sz = tf.random.uniform([],
minval=-p.max_scaling[2],
maxval=p.max_scaling[2],
seed=p.random_seed)
scaling_matrix = tf.stack(
[[sx + 1., 0., 0.], [0., sy + 1., 0.], [0., 0., sz + 1.]], axis=0)
points_xyz_adj = tf.einsum('ij,kj->ki', scaling_matrix, points_xyz_adj)
if p.max_shearing is not None:
# Perform shearing to the point cloud
# Shearing matrix
# [[1 sh_x^y sh_x^z]
# [sh_y^x 1 sh_y^z]
# [sh_z^x sh_z^y 1 ]]
sxy = tf.random.uniform([],
minval=-p.max_shearing[0],
maxval=p.max_shearing[0],
seed=p.random_seed)
sxz = tf.random.uniform([],
minval=-p.max_shearing[1],
maxval=p.max_shearing[1],
seed=p.random_seed)
syx = tf.random.uniform([],
minval=-p.max_shearing[2],
maxval=p.max_shearing[2],
seed=p.random_seed)
syz = tf.random.uniform([],
minval=-p.max_shearing[3],
maxval=p.max_shearing[3],
seed=p.random_seed)
szx = tf.random.uniform([],
minval=-p.max_shearing[4],
maxval=p.max_shearing[4],
seed=p.random_seed)
szy = tf.random.uniform([],
minval=-p.max_shearing[5],
maxval=p.max_shearing[5],
seed=p.random_seed)
shearing_matrix = tf.stack(
[[1., sxy, sxz], [syx, 1., syz], [szx, szy, 1.]], axis=0)
points_xyz_adj = tf.einsum('ij,kj->ki', shearing_matrix, points_xyz_adj)
# Translate the points back, adding noise if needed.
translation_with_noise = (
translation_vec - translation_scale_or_shear +
state.translate_pose[i])
pose2 = tf.concat([translation_with_noise, [0., 0., 0.]], axis=0)
final_points_xyz = geometry.CoordinateTransform(points_xyz_adj, pose2)
# final_points_xyz is an [M, 3] Tensor where M is the number of points in
# the box.
points_mask = tf.ones([num_points], dtype=tf.float32)
final_points_xyz = py_utils.PadOrTrimTo(final_points_xyz,
[p.max_num_points_per_bbox, 3])
final_points_feature = py_utils.PadOrTrimTo(
points_feature_masked, [p.max_num_points_per_bbox, num_features])
points_mask = py_utils.PadOrTrimTo(points_mask,
[p.max_num_points_per_bbox])
state.out_bbox_points = inplace_ops.alias_inplace_update(
state.out_bbox_points, [i], tf.expand_dims(final_points_xyz, 0))
state.out_bbox_feature = inplace_ops.alias_inplace_update(
state.out_bbox_feature, [i], tf.expand_dims(final_points_feature, 0))
state.out_bbox_mask = inplace_ops.alias_inplace_update(
state.out_bbox_mask, [i], tf.expand_dims(points_mask, 0))
return state
# Get the points and features that reside in boxes.
if 'points_padding' in features.lasers:
points_mask = 1 - features.lasers.points_padding
points_xyz = tf.boolean_mask(features.lasers.points_xyz, points_mask)
points_feature = tf.boolean_mask(features.lasers.points_feature,
points_mask)
else:
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
# Fetch real bounding boxes and compute point mask.
real_bboxes_3d = tf.boolean_mask(features.labels.bboxes_3d,
features.labels.bboxes_3d_mask)
points_in_bbox_mask = geometry.IsWithinBBox3D(points_xyz, real_bboxes_3d)
# Choose a random rotation for every real box.
num_boxes = tf.shape(real_bboxes_3d)[0]
rotation = tf.random.uniform([num_boxes],
minval=-p.max_rotation,
maxval=p.max_rotation,
seed=p.random_seed)
base_seed = p.random_seed
x_seed = base_seed
y_seed = None if base_seed is None else base_seed + 1
z_seed = None if base_seed is None else base_seed + 2
random_translate_x = tf.random.normal([num_boxes],
mean=0.0,
stddev=p.noise_std[0],
seed=x_seed)
random_translate_y = tf.random.normal([num_boxes],
mean=0.0,
stddev=p.noise_std[1],
seed=y_seed)
random_translate_z = tf.random.normal([num_boxes],
mean=0.0,
stddev=p.noise_std[2],
seed=z_seed)
translate_pose = tf.stack(
[random_translate_x, random_translate_y, random_translate_z], axis=1)
fg_xyz, fg_feature = self._Foreground(features, points_xyz, points_feature,
real_bboxes_3d, points_in_bbox_mask,
rotation, translate_pose, Transform)
# Concatenate them with the background points and features.
bg_xyz, bg_feature = self._Background(points_xyz, points_feature,
points_in_bbox_mask)
all_points = tf.concat([bg_xyz, fg_xyz], axis=0)
all_features = tf.concat([bg_feature, fg_feature], axis=0)
# Shuffle the points/features randomly.
all_points, all_features = _ConsistentShuffle((all_points, all_features),
p.random_seed)
# Padding should technically be unnecessary: the number of points before and
# after should be the same, but in practice we sometimes seem to drop a few
# points, and so we pad to make the shape fixed.
#
# TODO(vrv): Identify the source of this problem and then assert a shape
# matching check.
if 'points_padding' in features.lasers:
features.lasers.points_xyz = py_utils.PadOrTrimTo(
all_points, tf.shape(features.lasers.points_xyz))
features.lasers.points_feature = py_utils.PadOrTrimTo(
all_features, tf.shape(features.lasers.points_feature))
total_points = tf.shape(all_points)[0]
features.lasers.points_padding = 1.0 - py_utils.PadOrTrimTo(
tf.ones([total_points]), tf.shape(features.lasers.points_padding))
else:
features.lasers.points_xyz = all_points
features.lasers.points_feature = all_features
# Translate noise.
bboxes_xyz = real_bboxes_3d[..., :3]
bboxes_xyz += translate_pose[..., :3]
bboxes_dim = real_bboxes_3d[..., 3:6]
# Rotate bboxes by their corresponding rotation.
bboxes_rot = real_bboxes_3d[..., 6:]
bboxes_rot -= rotation[:, tf.newaxis]
features.labels.bboxes_3d = py_utils.PadOrTrimTo(
tf.concat([bboxes_xyz, bboxes_dim, bboxes_rot], axis=-1),
tf.shape(features.labels.bboxes_3d))
features.labels.bboxes_3d_mask = py_utils.PadOrTrimTo(
tf.ones(tf.shape(real_bboxes_3d)[0]),
tf.shape(features.labels.bboxes_3d_mask))
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class GroundTruthAugmentor(Preprocessor):
"""Augment bounding box labels and points from a database.
This preprocessor expects features to contain the following keys:
lasers.points_xyz of shape [P, 3]
lasers.points_feature of shape [P, F]
lasers.points_padding of shape [P]
labels.bboxes_3d of shape [L, 7]
labels.bboxes_3d_mask of shape [L]
labels.labels of shape [L]
Modifies the above features so that additional objects from
a groundtruth database are added.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'groundtruth_database', None,
'If not None, loads groundtruths from this database and adds '
'them to the current scene. Groundtruth database is expected '
'to be a TFRecord of KITTI or Waymo crops.')
p.Define(
'num_db_objects', None,
'Number of objects in the database. Because we use TFRecord '
'we cannot easily query the number of objects efficiencly.')
p.Define('max_num_points_per_bbox', 2048,
'Maximum number of points in each bbox to augment with.')
p.Define(
'filter_min_points', 0,
'Minimum number of points each database object must have '
'to be included in an example.')
p.Define(
'filter_max_points', None,
'Maximum number of points each database object must have '
'to be included in an example.')
p.Define(
'difficulty_sampling_probability', None,
'Probability for sampling ground truth example whose difficulty '
'equals {0, 1, 2, 3, ...}. Example: [1.0, 1.0, 1.0, 1.0] for '
'uniform sampling 4 different difficulties. Default value is '
'None = uniform sampling for all difficulties.')
p.Define(
'class_sampling_probability', None,
'Probability for sampling ground truth example based on its class index'
' Example: For KITTI classes are [Background, Car, Van, Truck, '
'Pedestrian, Person_sitting, Cyclist, Tram, Misc, DontCare], using '
'probability vector [0., 1.0, 1.0, 0., 0., 0., 0.,0., 0., 0.], we '
'uniformly sampling Car and Van. Default value is None: Uses '
'label_filter flag and does not sample based on class.')
p.Define('filter_min_difficulty', 0,
'Filter ground truth boxes whose difficulty is < this value.')
p.Define('max_augmented_bboxes', 15,
'Maximum number of augmented bounding boxes per scene.')
p.Define(
'label_filter', [],
'A list where if specified, only examples of these label integers will '
'be included in an example.')
p.Define(
'batch_mode', False, 'Bool value to control whether the whole'
'groundtruth database is loaded or partially loaded to save memory'
'usage. Setting to False loads the whole ground truth database into '
'memory. Otherwise, only a fraction of the data will be loaded into '
'the memory.')
return p
def _ReadDB(self, file_patterns):
"""Read the groundtruth database and return as a NestedMap of Tensors."""
p = self.params
def Process(record):
"""Process a groundtruth record."""
feature_map = {
'num_points': tf.io.FixedLenFeature((), tf.int64, 0),
'points': tf.io.VarLenFeature(dtype=tf.float32),
'points_feature': tf.io.VarLenFeature(dtype=tf.float32),
'bbox_3d': tf.io.VarLenFeature(dtype=tf.float32),
'label': tf.io.FixedLenFeature((), tf.int64, 0),
'difficulty': tf.io.FixedLenFeature((), tf.int64, 0),
'text': tf.io.VarLenFeature(dtype=tf.string),
}
example_data = tf.io.parse_single_example(record, feature_map)
num_points = example_data['num_points']
points = tf.reshape(_Dense(example_data['points']), [num_points, 3])
features = tf.reshape(
_Dense(example_data['points_feature']), [num_points, 1])
points_mask = tf.ones(num_points, dtype=tf.bool)
# TODO(vrv): Use random selection instead of first N points.
points = py_utils.PadOrTrimTo(points, [p.max_num_points_per_bbox, 3])
features = py_utils.PadOrTrimTo(features, [p.max_num_points_per_bbox, 1])
points_mask = py_utils.PadOrTrimTo(points_mask,
[p.max_num_points_per_bbox])
bboxes_3d = tf.reshape(_Dense(example_data['bbox_3d']), [7])
label = tf.cast(example_data['label'], tf.int32)
difficulty = tf.cast(example_data['difficulty'], tf.int32)
return (points, features, points_mask, bboxes_3d, label, difficulty)
if p.batch_mode:
# Prepare dataset for ground truth bounding boxes. Randomly shuffle the
# file patterns.
file_count = len(tf.io.gfile.glob(file_patterns))
dataset = tf.stateless_list_files(file_patterns)
dataset = dataset.apply(tf.stateless_cache_dataset())
dataset = dataset.apply(
tf.stateless_shuffle_dataset(
buffer_size=file_count, reshuffle_each_iteration=True))
dataset = dataset.interleave(
tf.data.TFRecordDataset, cycle_length=10, num_parallel_calls=10)
dataset = dataset.repeat()
# Only prefetch a few objects from the database to reduce memory
# consumption.
dataset = dataset.map(Process, num_parallel_calls=10)
# We need more bboxes than max_augmented_bboxes in a batch, because some
# of the boxes are filtered out.
dataset = dataset.batch(p.max_augmented_bboxes * 10)
dataset = dataset.apply(tf.stateless_cache_dataset()).prefetch(
p.max_augmented_bboxes * 30)
else:
# Prepare dataset for ground truth bounding boxes.
dataset = tf.stateless_list_files(file_patterns)
dataset = dataset.interleave(
tf.data.TFRecordDataset, cycle_length=10, num_parallel_calls=10)
# Read the entire dataset into memory.
dataset = dataset.take(p.num_db_objects)
dataset = dataset.map(Process, num_parallel_calls=10)
# We batch the output of the dataset into a very large Tensor, then cache
# it in memory.
dataset = dataset.batch(p.num_db_objects)
dataset = dataset.apply(tf.stateless_cache_dataset()).repeat()
iterator = dataset.make_one_shot_iterator()
input_batch = iterator.get_next()
(db_points_xyz, db_points_feature, db_points_mask, db_bboxes, db_labels,
db_difficulties) = input_batch
return py_utils.NestedMap(
points_xyz=db_points_xyz,
points_feature=db_points_feature,
points_mask=db_points_mask,
bboxes_3d=db_bboxes,
labels=db_labels,
difficulties=db_difficulties)
def _CreateExampleFilter(self, db):
"""Construct db example filter.
Args:
db: NestedMap of the following Tensors: points_mask - [N, P] - The points
mask for every object in the database, where N is the number of objects
and P is the maximum number of points per object. labels - [N] - int32
Label for each object in the database. difficulties - [N] - int32
Difficulty for each label in the database.
Returns:
A [N] boolean Tensor for each object in the database, True if
that corresponding object passes the filter.
"""
p = self.params
db_points_mask = db.points_mask
db_label = db.labels
db_difficulty = db.difficulties
num_objects_in_database = tf.shape(db_points_mask)[0]
# Filter number of objects.
points_per_object = tf.reduce_sum(tf.cast(db_points_mask, tf.int32), axis=1)
example_filter = points_per_object >= p.filter_min_points
if p.filter_max_points:
example_filter = tf.math.logical_and(
example_filter, points_per_object <= p.filter_max_points)
if p.difficulty_sampling_probability is not None:
# Sample db based on difficulity of each example.
sampling_prob = p.difficulty_sampling_probability
db_difficulty_probability = tf.zeros_like(db_difficulty, dtype=tf.float32)
for difficulty_idx, difficulty_prob in enumerate(sampling_prob):
db_difficulty_probability += (
tf.cast(tf.equal(db_difficulty, difficulty_idx), tf.float32) *
difficulty_prob)
sampled_filter = tf.random.uniform(
tf.shape(example_filter),
minval=0,
maxval=1,
dtype=tf.float32,
seed=p.random_seed)
sampled_filter = sampled_filter < db_difficulty_probability
example_filter &= sampled_filter
else:
# Filter out db examples below min difficulty
example_filter = tf.math.logical_and(
example_filter, db_difficulty >= p.filter_min_difficulty)
example_filter = tf.reshape(example_filter, [num_objects_in_database])
db_label = tf.reshape(db_label, [num_objects_in_database])
if p.class_sampling_probability is not None:
# Sample example based on its class probability.
sampling_prob = p.class_sampling_probability
db_class_probability = tf.zeros_like(db_label, dtype=tf.float32)
for class_idx, class_prob in enumerate(sampling_prob):
db_class_probability += (
tf.cast(tf.equal(db_label, class_idx), tf.float32) * class_prob)
sampled_filter = tf.random.uniform(
tf.shape(example_filter),
minval=0,
maxval=1,
dtype=tf.float32,
seed=p.random_seed)
sampled_filter = sampled_filter < db_class_probability
example_filter &= sampled_filter
elif p.label_filter:
# Filter based on labels.
# Create a label filter where all is false
valid_labels = tf.constant(p.label_filter)
label_mask = tf.reduce_any(
tf.equal(db_label[..., tf.newaxis], valid_labels), axis=1)
example_filter = tf.math.logical_and(example_filter, label_mask)
return example_filter
# TODO(vrv): Create an overlap filter that also ensures that boxes don't
# overlap with groundtruth points, so that the scenes are more plausible.
def _FilterIndices(self, gt_bboxes_3d, db_bboxes, db_idx):
"""Identify database boxes that don't overlap with other boxes."""
# We accomplish overlap filtering by first computing the pairwise 3D IoU of
# all boxes (concatenated) as a way of computing pairwise box overlaps.
num_gt_bboxes = tf.shape(gt_bboxes_3d)[0]
filtered_bboxes = tf.gather(db_bboxes, db_idx)
all_bboxes = tf.concat([gt_bboxes_3d, filtered_bboxes], axis=0)
pairwise_overlap = ops.pairwise_iou3d(all_bboxes, all_bboxes)
# We now have an M x M matrix with 1s on the diagonal and non-zero entries
# whenever a box collides with another.
#
# To increase the number of boxes selected, we filter the upper triangular
# entries so that the boxes are chosen greedily: boxes with smaller indices
# will be selected before later boxes, because earlier boxes will not appear
# to collide with later boxes, but later boxes may collide with earlier
# ones.
pairwise_overlap = tf.linalg.band_part(pairwise_overlap, -1, 0)
# We compute the sum of the IoU overlaps for all database boxes.
db_overlap_sums = tf.reduce_sum(pairwise_overlap[num_gt_bboxes:], axis=1)
# Those boxes that don't overlap with any other boxes will only have
# a 1.0 IoU with itself.
non_overlapping_boxes = tf.reshape(db_overlap_sums <= 1., [-1])
# Filter to select only those object ids that pass this filter.
db_idx = tf.boolean_mask(db_idx, non_overlapping_boxes)
return db_idx
def TransformFeatures(self, features):
p = self.params
tf.logging.info('Loading groundtruth database at %s' %
(p.groundtruth_database))
db = self._ReadDB(p.groundtruth_database)
original_features_shape = tf.shape(features.lasers.points_feature)
# Compute the number of bboxes to augment.
num_bboxes_in_scene = tf.reduce_sum(
tf.cast(features.labels.bboxes_3d_mask, tf.int32))
max_bboxes = tf.shape(features.labels.bboxes_3d_mask)[0]
num_augmented_bboxes = tf.minimum(max_bboxes - num_bboxes_in_scene,
p.max_augmented_bboxes)
# Compute an object index over all objects in the database.
num_objects_in_database = tf.shape(db.points_xyz)[0]
db_idx = tf.range(num_objects_in_database)
# Find those indices whose examples pass the filters, and select only those
# indices.
example_filter = self._CreateExampleFilter(db)
db_idx = tf.boolean_mask(db_idx, example_filter)
# At this point, we might still have a large number of object candidates,
# from which we only need a sample.
# To reduce the amount of computation, we randomly subsample to slightly
# more than we want to augment.
db_idx = tf.random.shuffle(
db_idx, seed=p.random_seed)[0:num_augmented_bboxes * 5]
# After filtering, further filter out the db boxes that would occlude with
# other boxes (including other database boxes).
#
# Gather the filtered ground truth bounding boxes according to the mask, so
# we can compute overlaps below.
gt_bboxes_3d_mask = tf.cast(features.labels.bboxes_3d_mask, tf.bool)
gt_bboxes_3d = tf.boolean_mask(features.labels.bboxes_3d, gt_bboxes_3d_mask)
gt_bboxes_3d = py_utils.HasShape(gt_bboxes_3d, [num_bboxes_in_scene, 7])
db_idx = self._FilterIndices(gt_bboxes_3d, db.bboxes_3d, db_idx)
# From the filtered object ids, select only as many boxes as we need.
shuffled_idx = db_idx[0:num_augmented_bboxes]
num_augmented_bboxes = tf.shape(shuffled_idx)[0]
# Gather based off the indices.
sampled_points_xyz = tf.gather(db.points_xyz, shuffled_idx)
sampled_points_feature = tf.gather(db.points_feature, shuffled_idx)
sampled_mask = tf.reshape(
tf.gather(db.points_mask, shuffled_idx),
[num_augmented_bboxes, p.max_num_points_per_bbox])
sampled_bboxes = tf.gather(db.bboxes_3d, shuffled_idx)
sampled_labels = tf.gather(db.labels, shuffled_idx)
# Mask points/features.
sampled_points_xyz = tf.boolean_mask(sampled_points_xyz, sampled_mask)
sampled_points_feature = tf.boolean_mask(sampled_points_feature,
sampled_mask)
# Flatten before concatenation with ground truths.
sampled_points_xyz = tf.reshape(sampled_points_xyz, [-1, 3])
sampled_points_feature = tf.reshape(sampled_points_feature,
[-1, original_features_shape[-1]])
sampled_bboxes = tf.reshape(sampled_bboxes, [-1, 7])
# Concatenate the samples with the ground truths.
if 'points_padding' in features.lasers:
points_mask = tf.cast(1. - features.lasers.points_padding, tf.bool)
# Densify the original points.
dense_points_xyz = tf.boolean_mask(features.lasers.points_xyz,
points_mask)
dense_points_feature = tf.boolean_mask(features.lasers.points_feature,
points_mask)
# Concatenate the dense original points with our new sampled oints.
points_xyz = tf.concat([dense_points_xyz, sampled_points_xyz], axis=0)
points_feature = tf.concat([dense_points_feature, sampled_points_feature],
axis=0)
original_points_shape = tf.shape(features.lasers.points_xyz)
features.lasers.points_xyz = py_utils.PadOrTrimTo(points_xyz,
original_points_shape)
features.lasers.points_feature = py_utils.PadOrTrimTo(
points_feature, original_features_shape)
# Compute the modified mask / padding.
final_points_mask = py_utils.PadOrTrimTo(
tf.ones(tf.shape(points_xyz)[0]),
tf.shape(features.lasers.points_padding))
features.lasers.points_padding = 1. - final_points_mask
else:
points_xyz = tf.concat([features.lasers.points_xyz, sampled_points_xyz],
axis=0)
points_feature = tf.concat(
[features.lasers.points_feature, sampled_points_feature], axis=0)
features.lasers.points_xyz = points_xyz
features.lasers.points_feature = points_feature
# Reconstruct a new, dense, bboxes_3d vector that includes the filtered
# groundtruth bounding boxes followed by the database augmented boxes.
bboxes_3d = tf.concat([gt_bboxes_3d, sampled_bboxes], axis=0)
bboxes_3d = py_utils.PadOrTrimTo(bboxes_3d, [max_bboxes, 7])
features.labels.bboxes_3d = bboxes_3d
bboxes_3d_mask = tf.ones(
num_bboxes_in_scene + num_augmented_bboxes, dtype=tf.float32)
features.labels.bboxes_3d_mask = py_utils.PadOrTrimTo(
bboxes_3d_mask, [max_bboxes])
gt_labels = tf.boolean_mask(features.labels.labels, gt_bboxes_3d_mask)
gt_labels = py_utils.HasShape(gt_labels, [num_bboxes_in_scene])
labels = tf.concat([gt_labels, sampled_labels], axis=0)
features.labels.labels = py_utils.PadOrTrimTo(labels, [max_bboxes])
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class FrustumDropout(Preprocessor):
"""Randomly drops out points in a frustum.
All points are first converted to spherical coordinates, and then a point
is randomly selected. All points in the frustum around that point within
a given phi, theta angle width and distance to the original greater than
a given value are dropped with probability = 1 - keep_prob.
Here, we can specify whether the dropped frustum is the union or intersection
of the phi and theta angle filters.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
Optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Modifies the following features:
lasers.points_xyz, lasers.points_feature, lasers.points_padding with points
randomly dropped out.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('theta_width', 0.03, 'Theta angle width for dropping points.')
p.Define('phi_width', 0.0, 'Phi angle width for dropping points.')
p.Define(
'distance', 0.0, 'Drop points that have larger distance to the'
'origin than the value given here.')
p.Define(
'keep_prob', 0.0, 'keep_prob: 1. = drop no points in the Frustum,'
'0 = drop all points, between 0 and 1 = down sample the points.')
p.Define(
'drop_type', 'union', 'Drop either the union or intersection of '
'phi width and theta width.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.phi_width < 0:
raise ValueError('phi_width must be >= 0, phi_width={}'.format(
p.phi_width))
if p.theta_width < 0:
raise ValueError('theta_width must be >= 0, theta_width={}'.format(
p.theta_width))
if p.distance < 0:
raise ValueError('distance must be >= 0, distance={}'.format(p.distance))
if p.keep_prob < 0 or p.keep_prob > 1:
raise ValueError('keep_prob must be >= 0 and <=1, keep_prob={}'.format(
p.keep_prob))
if p.drop_type not in ['union', 'intersection']:
raise ValueError('drop_type must be union or intersection ,'
'drop_type={}'.format(p.drop_type))
def TransformFeatures(self, features):
p = self.params
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
if 'points_padding' in features.lasers:
points_padding = features.lasers.points_padding
else:
points_padding = None
if points_padding is not None:
points_mask = tf.cast(1 - points_padding, tf.bool)
num_total_points = py_utils.GetShape(points_mask)[0]
real_points_idx = tf.boolean_mask(
tf.range(0, num_total_points, dtype=tf.int32), points_mask)
num_points = py_utils.GetShape(real_points_idx)[0]
else:
points_mask = tf.ones_like(points_xyz[:, 0], dtype=tf.bool)
num_total_points = py_utils.GetShape(points_mask)[0]
num_points = py_utils.GetShape(points_xyz)[0]
r, theta, phi = tf.unstack(
geometry.SphericalCoordinatesTransform(points_xyz), axis=-1)
def _PickRandomPoint():
point_idx = tf.random.uniform((),
minval=0,
maxval=num_points,
dtype=tf.int32)
if points_padding is not None:
point_idx = real_points_idx[point_idx]
return point_idx
# Pick a point at random and drop all points that are near that point in the
# frustum for distance larger than r; repeat this for both theta and phi.
if p.theta_width > 0:
theta_half_width = p.theta_width / 2.
point_idx = _PickRandomPoint()
# Points within theta width and further than distance will be dropped.
theta_drop_filter = ((theta < (theta[point_idx] + theta_half_width)) &
(theta > (theta[point_idx] - theta_half_width)) &
(r > p.distance))
else:
theta_drop_filter = tf.zeros_like(points_mask, dtype=tf.bool)
if p.phi_width > 0:
phi_half_width = p.phi_width / 2.
point_idx = _PickRandomPoint()
# Points within phi width and further than distance will be dropped.
phi_drop_filter = ((phi < (phi[point_idx] + phi_half_width)) &
(phi >
(phi[point_idx] - phi_half_width)) & (r > p.distance))
else:
phi_drop_filter = tf.zeros_like(points_mask, dtype=tf.bool)
# Create drop_filter by combining filters. This contains a filter for the
# points to be removed. One can use the intersection method to limit the
# dropped points be within both phi and theta ranges.
if p.drop_type == 'union':
drop_filter = theta_drop_filter | phi_drop_filter
elif p.drop_type == 'intersection':
drop_filter = theta_drop_filter & phi_drop_filter
if p.keep_prob == 0:
# Drop all points in drop_filter.
down_sampling_filter = drop_filter
else:
# Randomly drop points in drop_filter based on keep_prob.
sampling_drop_filter = tf.random.uniform([num_total_points],
minval=0,
maxval=1,
dtype=tf.float32)
# Points greater than the threshold (keep_prob) will be dropped.
sampling_drop_filter = sampling_drop_filter > p.keep_prob
# Instead of dropping all points in the frustum, we drop out points
# that are in the selected frustum (drop_filter).
down_sampling_filter = drop_filter & sampling_drop_filter
points_mask &= ~down_sampling_filter
if points_padding is not None:
features.lasers.points_padding = 1 - tf.cast(points_mask, tf.float32)
else:
features.lasers.points_xyz = tf.boolean_mask(points_xyz, points_mask)
features.lasers.points_feature = tf.boolean_mask(points_feature,
points_mask)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RepeatPreprocessor(Preprocessor):
"""Repeat a preprocessor multiple times.
This preprocessor takes a preprocessor as a subprocessor and apply the
subprocessor to features multiple times (repeat_count).
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('repeat_count', 1, 'Number of times the subprocessor is applied to'
' features.')
p.Define('subprocessor', None, 'One of the input preprocessors.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.subprocessor is None:
raise ValueError('No subprocessor was specified for RepeatPreprocessor.')
if p.repeat_count < 0 or not isinstance(p.repeat_count, int):
raise ValueError(
'repeat_count must be >= 0 and int, repeat_count={}'.format(
p.repeat_count))
self.CreateChild('subprocessor', p.subprocessor)
def TransformFeatures(self, features):
p = self.params
for _ in range(p.repeat_count):
features = self.subprocessor.FPropDefaultTheta(features)
return features
def TransformShapes(self, shapes):
p = self.params
for _ in range(p.repeat_count):
shapes = self.subprocessor.TransformShapes(shapes)
return shapes
def TransformDTypes(self, dtypes):
p = self.params
for _ in range(p.repeat_count):
dtypes = self.subprocessor.TransformDTypes(dtypes)
return dtypes
class RandomApplyPreprocessor(Preprocessor):
"""Randomly apply a preprocessor with certain probability.
This preprocessor takes a preprocessor as a subprocessor and apply the
subprocessor to features with certain probability.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('prob', 1.0, 'The probability the subprocessor being executed.')
p.Define('subprocessor', None, 'Params for an input preprocessor.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.subprocessor is None:
raise ValueError('No subprocessor was specified for RepeatPreprocessor.')
if p.prob < 0 or p.prob > 1 or not isinstance(p.prob, float):
raise ValueError(
'prob must be >= 0 and <=1 and float type, prob={}'.format(p.prob))
self.CreateChild('subprocessor', p.subprocessor)
def TransformFeatures(self, features):
p = self.params
choice = tf.random.uniform(
(), minval=0.0, maxval=1.0, seed=p.random_seed) <= p.prob
# Features is passed downstream and may be modified, we make deep copies
# here to use with tf.cond to avoid having tf.cond access updated
# versions. Note that we need one copy for each branch in case the branches
# further modify features.
features_0, features_1 = features.DeepCopy(), features.DeepCopy()
features = tf.cond(choice,
lambda: self.subprocessor.TransformFeatures(features_0),
lambda: features_1)
return features
def TransformShapes(self, shapes):
shapes_transformed = self.subprocessor.TransformShapes(shapes)
if not shapes.IsCompatible(shapes_transformed):
raise ValueError(
'NestedMap structures are different between shapes and transformed'
'shapes. Original shapes: {}. Transformed shapes: {}'.format(
shapes, shapes_transformed))
def IsCompatibleWith(a, b):
return a.is_compatible_with(b)
if not all(
py_utils.Flatten(
py_utils.Transform(IsCompatibleWith, shapes, shapes_transformed))):
raise ValueError(
'Shapes after transformation - {} are different from original '
'shapes - {}.'.format(shapes_transformed, shapes))
return shapes
def TransformDTypes(self, dtypes):
transformed_dtypes = self.subprocessor.TransformDTypes(dtypes)
if transformed_dtypes != dtypes:
raise ValueError(
'DTypes after transformation of preprocessor - {} should be '
'the same as {}, but get {}.'.format(self.params.subprocessor, dtypes,
transformed_dtypes))
return dtypes
class ConstantPreprocessor(Preprocessor):
"""Preprocessor that produces specified constant values in a nested output."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'constants', py_utils.NestedMap(),
'Map of key names to numpy arrays of constant values to use. '
'Must be a NestedMap or dict convertible to NestedMap.')
return p
def TransformFeatures(self, features):
constants = py_utils.NestedMap(self.params.constants)
features.update(constants.Transform(tf.constant))
return features
def TransformShapes(self, shapes):
constants = py_utils.NestedMap(self.params.constants)
shapes.update(
constants.Transform(lambda x: tf.TensorShape(np.array(x).shape)))
return shapes
def TransformDTypes(self, dtypes):
constants = py_utils.NestedMap(self.params.constants)
dtypes.update(constants.Transform(lambda x: tf.as_dtype(np.array(x).dtype)))
return dtypes
class IdentityPreprocessor(Preprocessor):
"""Preprocessor that passes all inputs through.
This may be useful for situations where one wants a 'no-op' preprocessor, such
as being able to randomly choose to do nothing among a set of preprocessor
choices.
"""
def TransformFeatures(self, features):
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomChoicePreprocessor(Preprocessor):
"""Randomly applies a preprocessor with specified weights.
The input at features[p.weight_tensor_key] must be a floating point vector
Tensor whose length matches the number of subprocessors to select among. The
values in that Tensor are interpreted as relative weights.
For example, if p.subprocessors = [preprocessor1, preprocessor2] and the
weights are [1., 2.], then preprocessor1 will be applied with probability 1/3,
and preprocessor2 will be applied with probability 2/3.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'subprocessors', [],
'Params for preprocessors. Each value should be a tuple of '
'(Preprocessor.Params(), BaseSchedule.Params()), where the schedule '
'defines the weights to use over time.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if not p.subprocessors:
raise ValueError('No subprocessors were specified.')
subprocessors, schedules = zip(*p.subprocessors)
def _FilterNonSchedules(v):
return not issubclass(getattr(v, 'cls', False), schedule.BaseSchedule)
invalid_values = [_FilterNonSchedules(s) for s in schedules]
if any(invalid_values):
raise TypeError('Not all schedule values were schedules: '
f'{invalid_values}')
self.CreateChildren('subprocessors', list(subprocessors))
self.CreateChildren('schedules', list(schedules))
def TransformFeatures(self, features):
p = self.params
choice_list = []
weight_list = []
# Pass a unique copy of the input to each branch, in case the
# subprocessor destructively modifies the features in unexpected ways.
for subp, sched in zip(self.subprocessors, self.schedules):
choice_list.append(
lambda subp=subp: subp.TransformFeatures(features.DeepCopy()))
weight_list.append(sched.Value())
weight_tensor = tf.stack(weight_list)
chosen_bin = tf.random.categorical(
tf.math.log(weight_tensor[tf.newaxis]),
1,
seed=p.random_seed,
dtype=tf.int32)[0, 0]
features = tf.switch_case(chosen_bin, branch_fns=choice_list)
return features
def TransformShapes(self, shapes):
transformed_shapes = [
subp.TransformShapes(shapes.DeepCopy()) for subp in self.subprocessors
]
if not all(transformed_shapes[0] == curr for curr in transformed_shapes):
raise ValueError('Shapes after transformations were not identical: '
f'{transformed_shapes}')
return transformed_shapes[0]
def TransformDTypes(self, dtypes):
transformed_dtypes = [
subp.TransformDTypes(dtypes.DeepCopy()) for subp in self.subprocessors
]
if not all(transformed_dtypes[0] == curr for curr in transformed_dtypes):
raise ValueError('DTypes after transformations were not identical: '
f'{transformed_dtypes}')
return transformed_dtypes[0]
class Sequence(Preprocessor):
"""Packages a sequence of preprocessors as one preprocessor."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'preprocessors', [], 'A list of preprocessors. '
'Each should be of type Preprocessor.Params().')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
self.CreateChildren('preprocessors', p.preprocessors)
def TransformFeatures(self, features):
for preprocessor in self.preprocessors:
features = preprocessor.TransformFeatures(features)
return features
def TransformShapes(self, shapes):
for preprocessor in self.preprocessors:
shapes = preprocessor.TransformShapes(shapes)
return shapes
def TransformDTypes(self, dtypes):
for preprocessor in self.preprocessors:
dtypes = preprocessor.TransformDTypes(dtypes)
return dtypes
class SparseSampler(Preprocessor):
"""Fused SparseCenterSelector and SparseCellGatherFeatures.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
Adds the following features:
anchor_centers - [num_centers, 3] - Floating point output containing the
center (x, y, z) locations for tiling anchor boxes.
cell_center_xyz - [num_centers, 3] - Floating point output containing
the center (x, y, z) locations for each cell to featurize.
cell_center_padding - [num_centers] - 0/1 padding for each center.
cell_points_xyz - [num_centers, num_neighbors, 3] - Floating point
output containing the (x, y, z) locations for each point for a given
center.
cell_feature - [num_centers, num_neighbors, F] - Floating point output
containing the features for each point for a given center.
cell_points_padding - [num_centers, num_neighbors] - 0/1 padding
for the points in each cell.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('center_selector', 'farthest', 'Method to sample centers. '
'Valid options - uniform, farthest.')
p.Define('neighbor_sampler', 'uniform', 'Method to select neighbors. '
'Valid options - uniform, closest.')
p.Define('num_centers', 16, 'The number of centers to sample.')
p.Define(
'features_preparation_layers', [],
'A list of Params for layers to run on the features before '
'performing farthest point sampling. For example, one may wish to '
'drop points out of frustum for KITTI before selecting centers. '
'Note that these layers will not mutate the original features, '
'instead, a copy will be made.')
p.Define(
'keep_z_range', (-np.inf, np.inf),
'Only points that have z coordinates within this range are kept. '
'Approximate ground-removal can be performed by specifying a '
'lower-bound on the z-range.')
p.Define('num_neighbors', 64, 'Sample these many points within the '
'neighorhood.')
p.Define(
'max_distance', 1.0, 'Points with L2 distances from a center '
'larger than this threshold are not considered to be in the '
'neighborhood.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.features_preparation_layers:
self.CreateChildren('features_preparation_layers',
p.features_preparation_layers)
def TransformFeatures(self, features):
p = self.params
n, m = p.num_centers, p.num_neighbors
prepared_features = features.DeepCopy()
if p.features_preparation_layers:
for prep_layer in self.features_preparation_layers:
prepared_features = prep_layer.FPropDefaultTheta(prepared_features)
points_data = prepared_features.lasers
points = py_utils.HasShape(points_data.points_xyz, [-1, 3])
if 'points_padding' in points_data:
points_mask = 1 - points_data.points_padding
points = tf.boolean_mask(points, points_mask)
# If num_points < num_centers, pad points to have at least num_centers
# points.
num_points = tf.shape(points)[0]
required_num_points = tf.maximum(num_points, p.num_centers)
zeros = tf.zeros([required_num_points - num_points, 3])
points = tf.concat([points, zeros], axis=0)
num_seeded_points = points_data.get('num_seeded_points', 0)
neighbor_algorithm = 'auto'
# Based on benchmarks, the hash solution works better when the number of
# centers is >= 16 and there are at least 10k points per point cloud.
if p.num_centers >= 16:
neighbor_algorithm = 'hash'
centers, center_paddings, indices, indices_paddings = ops.sample_points(
points=tf.expand_dims(points, 0),
points_padding=tf.zeros([1, required_num_points], tf.float32),
num_seeded_points=num_seeded_points,
center_selector=p.center_selector,
neighbor_sampler=p.neighbor_sampler,
neighbor_algorithm=neighbor_algorithm,
num_centers=p.num_centers,
center_z_min=p.keep_z_range[0],
center_z_max=p.keep_z_range[1],
num_neighbors=p.num_neighbors,
max_distance=p.max_distance,
random_seed=p.random_seed if p.random_seed else -1)
centers = py_utils.HasShape(centers, [1, n])[0, :]
center_paddings = py_utils.HasShape(center_paddings, [1, n])[0, :]
indices = py_utils.HasShape(indices, [1, n, m])[0, :]
indices_paddings = py_utils.HasShape(indices_paddings, [1, n, m])[0, :]
features.cell_center_padding = center_paddings
features.cell_center_xyz = py_utils.HasShape(
tf.gather(points, centers), [n, 3])
features.anchor_centers = features.cell_center_xyz
features.cell_points_xyz = py_utils.HasShape(
tf.gather(points, indices), [n, m, 3])
features.cell_feature = tf.gather(points_data.points_feature, indices)
features.cell_points_padding = indices_paddings
return features
def TransformShapes(self, shapes):
p = self.params
n, m, f = p.num_centers, p.num_neighbors, shapes.lasers.points_feature[-1]
shapes.anchor_centers = tf.TensorShape([n, 3])
shapes.cell_center_padding = tf.TensorShape([n])
shapes.cell_center_xyz = tf.TensorShape([n, 3])
shapes.cell_points_xyz = tf.TensorShape([n, m, 3])
shapes.cell_feature = tf.TensorShape([n, m, f])
shapes.cell_points_padding = tf.TensorShape([n, m])
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_centers = tf.float32
dtypes.cell_center_padding = tf.float32
dtypes.cell_center_xyz = tf.float32
dtypes.cell_points_xyz = tf.float32
dtypes.cell_feature = tf.float32
dtypes.cell_points_padding = tf.float32
return dtypes
|
The Shadowboxers are ringing in the new year with more dates on their headlining “EastWest” Tour! The tour is stopping in NYC at Brooklyn Bowl next Friday, January 13th! If you haven’t had the chance to see The Shadowboxers live, they put on an incredible show. Their set will have you dancing from start to finish. The guys have been hard at work on new music, and we’re looking forward to hearing some of the new tracks at the show!
Special VIP tickets are available for the show as well. You’ll get a meet and greet and private song with the brand, plus exclusive content and merch. Learn more about the package here!
Plus, the show is at Brooklyn Bowl, which combines a few of our favorite things: great music, good food (they have BBQ, fried chicken, and milk shakes) and of course bowling!
The Shadowboxers are also bringing their friends Tiny Victories along with them for the EastWest Tour.
Be sure to pick up a copy of “Build The Beat” from The Shadowboxers on iTunes so you can sing along!
Follow The Shadowboxers, Tiny Victories, and Secret Fangirls on Twitter. |
# -*- coding: utf-8 -*-
# Copyright 2013 Dr. Jan Müller
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expressed or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import plistlib
import unicodedata
import sys
from xml.etree.ElementTree import Element, SubElement, tostring
"""
You should run your script via /bin/bash with all escape options ticked.
The command line should be
python yourscript.py "{query}" arg2 arg3 ...
"""
UNESCAPE_CHARACTERS = u""" ;()"""
_MAX_RESULTS_DEFAULT = 9
preferences = plistlib.readPlist('info.plist')
bundleid = preferences['bundleid']
class Item(object):
@classmethod
def unicode(cls, value):
try:
items = iter(value.items())
except AttributeError:
return unicode(value)
else:
return dict(map(unicode, item) for item in items)
def __init__(self, attributes, title, subtitle, icon=None):
self.attributes = attributes
self.title = title
self.subtitle = subtitle
self.icon = icon
def __str__(self):
return tostring(self.xml()).decode('utf-8')
def xml(self):
item = Element(u'item', self.unicode(self.attributes))
for attribute in (u'title', u'subtitle', u'icon'):
value = getattr(self, attribute)
if value is None:
continue
if len(value) == 2 and isinstance(value[1], dict):
(value, attributes) = value
else:
attributes = {}
SubElement(item, attribute, self.unicode(attributes)).text = self.unicode(value)
return item
def args(characters=None):
return tuple(unescape(decode(arg), characters) for arg in sys.argv[1:])
def config():
return _create('config')
def decode(s):
return unicodedata.normalize('NFD', s.decode('utf-8'))
def uid(uid):
return u'-'.join(map(str, (bundleid, uid)))
def unescape(query, characters=None):
for character in (UNESCAPE_CHARACTERS if (characters is None) else characters):
query = query.replace('\\%s' % character, character)
return query
def work(volatile):
path = {
True: '~/Library/Caches/com.runningwithcrayons.Alfred-2/Workflow Data',
False: '~/Library/Application Support/Alfred 2/Workflow Data'
}[bool(volatile)]
return _create(os.path.join(os.path.expanduser(path), bundleid))
def write(text):
sys.stdout.write(text)
def xml(items, maxresults=_MAX_RESULTS_DEFAULT):
root = Element('items')
for item in itertools.islice(items, maxresults):
root.append(item.xml())
return tostring(root, encoding='utf-8')
def _create(path):
if not os.path.isdir(path):
os.mkdir(path)
if not os.access(path, os.W_OK):
raise IOError('No write access: %s' % path)
return path |
Here's a cool selection of the best free flying games to play on your pc, hand picked for some great flying game fun!
Download Games Free Website Games Video Tips and Tricks Help Internet … Fly Shoot A. Drop Bombs; There are 3 awards in Fighter Pilot 2. View All Awards.
There are 298 Flying games on GaHe.Com. We have chosen the best Flying games which you can play online for free and add new games daily, enjoy!
Flying games free – download now! All Flying games are 100% free, no trials, no ads, no payments. Only free full version Flying games for you. Trusted and Safe!
Description Racing Penguin (or Flying Penguin) is a physics based slide and fly game! Slide down the mountains of Antarctica and flap your wings to fly.
About the only way to make flight simulators better is to make them free flight simulators. We found two great ones for you to try.
Flying Games Free Download Big collection of free full version games for computer and PC. All listed games are absolutely free games for free download.
Forget dumb repetitive games. In Flight Pilot Simulator 3D Free, … Download now to Fly a commercial airliner in the most realistic pilot simulator!
Fly games. Free download! Best Fly mobile games.
Download free Fly games at mob.org. Always new free games for Fly. Java games and other mobile content can be easily downloaded! |
#!/usr/bin/env python
'''
Exercise 5 - Class 4
Gleydson Mazioli <gleydsonmazioli@gmail.com
'''
import netmiko
def remote_connect(host):
'''
Connect to a remote host
'''
return netmiko.ConnectHandler(**host)
def send_command(conn, cmd):
'''
Send a command
'''
return conn.send_command(cmd)
def run_config_command(conn, cmd):
'''
Run a command in config mode
'''
return conn.send_config_set(cmd)
def main():
'''
Main function
'''
pynet2 = {
'device_type': 'cisco_ios',
'ip': '50.76.53.27',
'username': 'pyclass',
'password': '88newclass',
'port': '8022'
}
for rtr in ['pynet2']:
my_rtr = remote_connect(eval(rtr))
config_cmds = ['logging buffered 65535']
print 'Running commands on {}'.format(rtr)
print run_config_command(my_rtr, config_cmds)
print my_rtr.send_command("show run | inc logging buffered")
print '\n'
my_rtr.disconnect()
if __name__ == "__main__":
main()
|
Sebastian Soto clocked a lot of tube time during his 2018 globe trot. Hit play on his new edit, “Crudo,” above to watch the Puerto Rican soul arch through some heavies at home, Indonesia and especially Mexico. Soto’s approach to pumping XXL Puerto Escondido is drenched in both confidence and style well beyond his years.
For more angles of Soto’s Puerto Escondido bombs, click here. |
#!/usr/bin/env python3
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test basic signet functionality"""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
# Dogecoin: Had to replace the version on this as it was invalid under AuxPoW rules.
bad_signet_blksig_block = '03006200a585d01fddeed2b0ed42703e0a048407c05509e3e55d241b3f8bb5a3002c1af2f575c83235984e7dc4afc1f30944c170462e84437ab6f2d52e16878a79e4678bd1914d5f7af7001f5f71000001010000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff025151feffffff0200f2052a010000001600149243f727dd5343293eb83174324019ec16c2630f0000000000000000776a24aa21a9ede2f61c3f71d1defd3fa999dfa36953755c690689799962b48bebd836974e8cf94c4fecc7daa2490047304402205e423a8754336ca99dbe16509b877ef1bf98d008836c725005b3c787c41ebe46022047246e4467ad7cc7f1ad98662afcaf14c115e0095a227c7b05c5182591c23e7e01000120000000000000000000000000000000000000000000000000000000000000000000000000'
class SignetBasicTest(BitcoinTestFramework):
def set_test_params(self):
self.chain = "signet"
self.num_nodes = 6
self.setup_clean_chain = True
shared_args1 = ["-signetchallenge=51"] # OP_TRUE
shared_args2 = ["-signetchallenge=50"] # Dogecoin: OP_FALSE, but we don't actually use these nodes
# we use the Bitcoin default challenge except we do it as a 2-of-2, which means it should fail
shared_args3 = ["-signetchallenge=522103ad5e0edad18cb1f0fc0d28a3d4f1f3e445640337489abb10404f2d1e086be430210359ef5021964fe22d6f8e05b2463c9540ce96883fe3b278760f048f5189f2e6c452ae"]
self.extra_args = [
shared_args1, shared_args1,
shared_args2, shared_args2,
shared_args3, shared_args3,
]
def run_test(self):
self.log.info("basic tests using OP_TRUE challenge")
self.log.info('getmininginfo')
mining_info = self.nodes[0].getmininginfo()
assert_equal(mining_info['blocks'], 0)
assert_equal(mining_info['chain'], 'signet')
assert 'currentblocktx' not in mining_info
assert 'currentblockweight' not in mining_info
assert_equal(mining_info['networkhashps'], Decimal('0'))
assert_equal(mining_info['pooledtx'], 0)
self.nodes[0].generate(1)
# Dogecoin: No default Signet network, so pregenerated blocks are not relevant.
# self.log.info("pregenerated signet blocks check")
# height = 0
# for block in signet_blocks:
# assert_equal(self.nodes[2].submitblock(block, None)
# height += 1
# assert_equal(self.nodes[2].getblockcount(), height)
self.log.info("pregenerated signet blocks check (incompatible solution)")
assert_equal(self.nodes[4].submitblock(bad_signet_blksig_block), 'bad-signet-blksig')
self.log.info("test that signet logs the network magic on node start")
with self.nodes[0].assert_debug_log(["Signet derived magic (message start)"]):
self.restart_node(0)
if __name__ == '__main__':
SignetBasicTest().main()
|
Pioneers in the industry, we offer steel dining table and dining table furniture from India.
We are the leading manufacturers and suppliers of the industry, offering a wide array of Steel Dining Table. These products are reckoned amongst the clients for their beautiful appearance. The offered products are suitable to enhance the looks of interior of living room. Our products are made using supreme quality steel which is known for their abrasion and corrosion resistant nature.
To meet the requirements of esteemed clients, we are manufacturing and supplying a wide array of Dining Table Furniture. These products are widely demanded by the clients for their beautiful appearance and designs. The offered products are suitable to enhance the looks of living room. Besides, our skilled professionals made these products that to using supreme quality material.
Looking for Dining Table ? |
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import re
import time as py_time
from django.conf import settings
from common import api
from common import clock
from common import exception
from common.protocol import sms
from common.protocol import xmpp
utcnow = lambda: clock.utcnow()
_re_match_url = re.compile(r'(http://[^/]+(/[^\s]+))', re.M)
def get_url(s):
m = _re_match_url.search(s)
if not m:
return None
return m.group(1)
def get_relative_url(s):
m = _re_match_url.search(s)
if not m:
return None
return m.group(2)
def exhaust_queue(nick):
for i in xrange(1000):
try:
api.task_process_actor(api.ROOT, nick)
except exception.ApiNoTasks:
break
def exhaust_queue_any():
for i in xrange(1000):
try:
api.task_process_any(api.ROOT)
except exception.ApiNoTasks:
break
class TestXmppConnection(xmpp.XmppConnection):
def send_message(self, to_jid_list, message):
logging.debug('XMPP SEND -> %s: %s', to_jid_list, message)
for jid in to_jid_list:
xmpp.outbox.append((jid, message))
class TestSmsConnection(sms.SmsConnection):
def send_message(self, to_list, message):
to_list = self.filter_targets(to_list, message)
logging.debug('SMS SEND -> %s: %s', to_list, message)
for recp in to_list:
sms.outbox.append((recp, message))
class FakeRequest(object):
def __init__(self, **kw):
self.user = kw.get('user', None)
self.POST = kw.get('post', {})
self.GET = kw.get('get', {})
@property
def REQUEST(self):
return dict(list(self.POST.items()) + list(self.GET.items()))
class FakeMemcache(object):
""" a disappointingly full-featured fake memcache :( """
def __init__(self, *args, **kw):
self._data = {}
pass
def _get_valid(self, key):
if key not in self._data:
return None
data = self._data[key]
if data[1]:
now = py_time.mktime(utcnow().timetuple())
if now > data[1]:
#logging.info('invalid key, %s, %s > %s', key, now, data[1])
return None
#logging.info('valid key, %s returning: %s', key, data[0])
return data[0]
def set(self, key, value, time=0):
if time:
if time < 2592000: # approx 1 month
time = py_time.mktime(utcnow().timetuple()) + time
#logging.info('setting key %s to %s', key, (value, time))
self._data[key] = (value, time)
return True
def set_multi(self, mapping, time=0, key_prefix=''):
for k, v in mapping.iteritems():
self.set(key_prefix + k, v, time=time)
return []
def add(self, key, value, time=0):
if self._get_valid(key) is not None:
return False
self.set(key, value, time)
return True
def incr(self, key, delta=1):
data = self._get_valid(key)
if data is None:
return None
data_tup = self._data[key]
try:
count = int(data)
except ValueError:
return None
count += delta
self.set(key, count, time=data_tup[1])
return count
def decr(self, key, delta=1):
return incr(key, delta=-(delta))
def delete(self, key, seconds=0):
# NOTE: doesn't support seconds
try:
del self._data[key]
return 2
except KeyError:
return 1
def get(self, key):
return self._get_valid(key)
def get_multi(self, keys, key_prefix=''):
out = {}
for k in keys:
v = self._get_valid(key_prefix + k)
out[k] = v
return out
class ClockOverride(object):
old = None
kw = None
def __init__(self, module, **kw):
self.kw = kw
self.old = {}
self.module = module
def override(self):
self.old = getattr(self.module, 'utcnow')
new_utcnow = lambda: (datetime.datetime.utcnow() +
datetime.timedelta(**self.kw))
setattr(self.module, 'utcnow', new_utcnow)
def reset(self):
setattr(self.module, 'utcnow', self.old)
def override_clock(module, **kw):
o = ClockOverride(module, **kw)
o.override()
return o
class SettingsOverride(object):
old = None
kw = None
def __init__(self, **kw):
self.kw = kw
self.old = {}
def override(self):
for k, v in self.kw.iteritems():
self.old[k] = getattr(settings, k, None)
setattr(settings, k, v)
def reset(self):
for k, v in self.old.iteritems():
setattr(settings, k, v)
def override(**kw):
o = SettingsOverride(**kw)
o.override()
return o
|
The ChargeCard began as a Kickstarter project that was funded last summer. At the time, the product only offered support for Apple's 30-pin connector and Micro USB as Apple's new Lightning connector had yet to be introduced, but a new version for the iPhone 5 that supports Lightning is now available for purchase.
Designed to fit inside of a wallet, the ChargeCard is twice as thick as a standard credit card. It comes equipped with both a USB plug and a foldable Lightning cable, allowing the iPhone 5 to be charged via any available USB port.
Lightning ChargeCard is your iPhone's new best friend. Your iPhone 5 is your most important tool, but it's just glass and aluminum once your battery dies. With ChargeCard, you'll have a Lightning cable with you at all times, so you can juice up from any USB port and turn that glass back into an iPhone.
Currently, the ChargeCard is not MFi Certified, and it should be noted that the current beta version of iOS 7 warns users when an unauthorized accessory is used.
The ChargeCard retails for $24.99 and can be purchased from BiteMyApple.co or directly from the manufacturer's website.
Not for me. I don't like carrying a **** ton of crap on my keyring. In fact, the home alarm and car alarm fob are too much already.
The keyring only connects your phone to a computer, which means you have to have your computer with you at all times to get a charge. The ChargeCard, which I first saw on Kickstarter, requires nothing but a card you carry in your wallet/pocket. Big difference.
Umm, news flash! The ChargeCard is NOT a battery, you still need to plug it in to a USB port on any computer to charge your iOS device, just like the other keyring device mentioned.
Now that iOS7 not working with 3rd party lightning, will this thing work?
iOS7 works with 3rd party. it just may display a warning when you first plug it in.
I would think its MFi certified.
I'm going to update the post, but this accessory is not yet MFi certified. The company is working on it, apparently.
Is ChargeCard Made for iPhone? As in, is it official Lightning gear?
DISCLAIMER: Available Active USB Port required.
ummm "news flash" (love it when ppl say that, like humor, i guess) but I did confuse this product with another which contains battery and is tiny, which works well on a film shoot since it's in my pocket any/every where, though small charge but enough for emergency.
If it's to use on the go, then I would have to ask someone in that place to lend me a computer or a power brick. I guess is cool if you need to carry a cable and brick w/ you everywhere then you just really have to carry the brick only.
True, but available USB ports, in some form or other, are becoming more popular -- many cars have them, airplane seats have them, power bars are starting to include them, etc. Airport lounges offer them. Even some restaurants do too now.
You're right, this sort of device is not strictly needed in the sense that if you know you're going somewhere where you may want to charge your phone, you will remember to bring your own cable and adapter. But having something like this is a good "better than nothing" option for when you're suddenly stuck or forgot your main charger. |
# randomizer class
# This program is part of the TUPack software package and makes use of NuPack 3.0. NuPack can be found on <www.nupack.org>g>
# This class creates an object performing random sequence mutations to outline the spread of the free energy predictions.
# For more info about the program, please read the user manual.
#
# written by Sander Rodenburg
# Eindhoven University of Technology
# 2012
# importing the NUPACK object, for running complexes and for reading the NuPack output files
import nupack
# importing the Strand library, containing the objects Strand and StrandRelation for implementing the DNA network
import Strand
# importing other libraries
import random
import math
import time
import os
import sys
import Tkinter
import tkFileDialog
import copy
class randomizer:
def __init__(self, nRuns, maxCxSize=1, seqFile=None, bindingFile=None, saveRuns=[]):
# defining seed for random
random.seed(time.time())
#random.seed(1)
# lists holding the Strands, duplexes and Inhibitors
self.Strands = []
self.Duplexes = []
self.Inhibitors = []
# list for keeping the order in which the sequences should be adjusted
self.adjustOrder = []
# NuPack maximum complex size
self.maxCxSize = maxCxSize
# runs in which the program should make a distinct save
self.saveRuns = saveRuns
# if the files are not defined in the class parameters, a filechooser pops up
try:
self.seqFile = open(seqFile, "r")
except (TypeError, IOError):
self.seqFile = self._chooseFile("Select sequence file")
try:
self.bindingFile = open(bindingFile, "r")
except (TypeError, IOError):
self.bindingFile = self._chooseFile("Select binding file")
# if there are still no files selected, raise an error
if self.seqFile == None or self.bindingFile == None:
sys.exit("One or more input files are missing.")
#defining file path
wd = os.path.dirname(self.seqFile.name)
# changing directory to input file path
ioDir = wd+"/NuPackIO"
if not os.path.exists(ioDir):
os.mkdir(ioDir)
os.chdir(wd+"/NuPackIO")
# initializing the NUPACK object in silent mode
self.nupack = nupack.NUPACK(prefix=os.getcwd()+"/cxFile",
paramFile=os.path.abspath(os.path.join(os.getcwd(), os.path.pardir))+"/parameters.txt",
silent=True)
# reading input files for initial sequence generations
self._readSeqFile(self.seqFile)
self._readBindingFile(self.bindingFile)
# close the files
self.seqFile.close()
self.bindingFile.close()
# adjust initial sequences
self._adjustSequenceSet()
self._adjustInhibitors()
# run the mainloop for simulated annealing and sequence mutation
self._mainloop(nRuns, False)
print "Done."
print "Files stored in " +os.getcwd()
def _mainloop(self, nRuns, screenOut=False):
gcFile = open("GC_Content.txt", "w")
feFile = open("Free Energies.txt", "w")
if screenOut:
self.printSequences()
run = 0
# for each run
for Run in range(nRuns):
run += 1
self._processMutation()
NpOut = self._runNuPack()
fe = NpOut
gc = []
for strand in self.Strands:
gc += [strand.getGC()]
if run == 1:
self._writeFeFile(fe, feFile, True)
self._writeGcFile(gc, gcFile, True)
else:
self._writeFeFile(fe, feFile, False)
self._writeGcFile(gc, gcFile, False)
# if the run is in the save list, run an extra time on another file name to prevent overwriting the files
if run in self.saveRuns:
self._saveRun("run"+str(run))
if run % 1000 == 0:
print "Done "+str(run)+" of "+str(nRuns)
if screenOut:
self.printSequences(True)
#self.printSequences(True)
gcFile.close()
feFile.close()
# copy the output files, to prevent overwriting
def _saveRun(self, title):
os.system("cp cxFile.in " +title+".in")
os.system("cp cxFile.cx " +title+ ".cx")
os.system("cp cxFile.ocx-mfe " +title+".ocx-mfe")
# print the sequences, reversed or normal
def printSequences(self, comp=False):
for seq in self.Strands:
if seq.Type == "binding" or seq.Type == "inhibitor":
if comp:
seq.Sequence.reverse()
print seq.Sequence + " (REVERSED)"
seq.Sequence.reverse()
else:
print seq.Sequence
else:
print seq.Sequence
print "\n=================\n"
# write the GC file
def _writeGcFile(self, gcList, gcFile, printHeader=True):
# if the header should be written
if printHeader:
header = ""
for strand in self.Strands:
header += strand.Name + "\t"
gcFile.write(header.rstrip("\t") + "\n")
gcData = ""
for strandGC in gcList:
gcData += str(strandGC) + "\t"
gcFile.write(gcData.rstrip("\t") + "\n")
# function for writing the output file containing the errors
def _writeErrFile(self, errList, errFile, printHeader=True):
if printHeader:
errFile.write("Error:\tdError:\n")
errFile.write(str(errList[0])+"\t"+str(errList[1])+"\n")
# function for reading and writing NuPack complexes
def _runNuPack(self):
# write the nupack input files '.in' and '.list'
self._writeNpInput("cxFile", self.maxCxSize)
# run the nupack binary complexes
self.nupack.runComplexes()
# add the output to the free energy list
NpOut = self.nupack.readLastOutput()
return NpOut
# makes a random mutation in a sequence, and adjusts this in dependant sequences
def _processMutation(self):
# make a random mutation
mutated = self._makeRandomMutation()
seqList = []
for strand in self.Strands:
seqList.append(strand.Name)
# make the order in which the sequences should be made complementary
self.adjustOrder = []
self._makeAdjustOrder(mutated, seqList)
# make the sequences and the inhibitors complementary
self._adjustSequenceSet()
self._adjustInhibitors()
# makes the strands and inhibitors complementary in the right order
def _adjustInhibitors(self):
for duplex in self.Inhibitors:
if duplex.RelationType == "inhibiting":
duplex.adjustSequences(duplex.Strand1.Name)
for duplex in self.Inhibitors:
if duplex.RelationType == "binding":
duplex.adjustSequences(duplex.Strand2.Name)
# makes all strands complementary
def _adjustSequenceSet(self):
# if there is a specified order
if self.adjustOrder != []:
for order in self.adjustOrder:
dupIndex = 0
for dup in self.Duplexes:
if (dup.Strand1.Name == order[0] and dup.Strand2.Name == order[1]):
dup.adjustSequences(dup.Strand1.Name)
if (dup.Strand1.Name == order[1] and dup.Strand2.Name == order[0]):
dup.adjustSequences(dup.Strand2.Name)
# if the order is not important
else:
for duplex in self.Duplexes:
if duplex.Strand1.Mutated and duplex.Strand2.Mutated:
pass
if duplex.Strand1.Mutated and not duplex.Strand2.Mutated:
duplex.adjustSequences(duplex.Strand1.Name)
if not duplex.Strand1.Mutated and duplex.Strand2.Mutated:
duplex.adjustSequences(duplex.Strand2.Name)
if not duplex.Strand1.Mutated and not duplex.Strand2.Mutated:
duplex.adjustSequences(duplex.Strand1.Name)
for strand in self.Strands:
strand.Mutated = False
# make a random mutation
def _makeRandomMutation(self):
templates = []
index = []
for strand in self.Strands:
# only templates can be mutated
if strand.Type == "template":
templates += [strand]
# pick a random template strand
template = random.choice(templates)
# randomize the whole sequence
template.randomize()
return template
# makes the order in which the sequences should be adjusted
def _makeAdjustOrder(self, baseStrand, seqList):
if baseStrand.Name in seqList:
seqList.remove(baseStrand.Name)
for dup in self.Duplexes:
Continue = False
if dup.Strand1.Name == baseStrand.Name:
compStrand = dup.Strand2
Continue = True
if dup.Strand2.Name == baseStrand.Name:
compStrand = dup.Strand1
Continue = True
if Continue:
if compStrand.Name in seqList:
self.adjustOrder += [[baseStrand.Name, compStrand.Name]]
self._makeAdjustOrder(compStrand, seqList)
# function for reading the sequence input
def _readSeqFile(self, seqFile):
try:
# reading the sequences file
ID = 1
for line in seqFile:
line = line.strip("\n").strip("\r").split("\t")
newStrand = Strand.Strand(line[1].strip("\r"), line[0], ID)
self.Strands += [newStrand]
newStrand.defineN()
if line[2] == "T":
newStrand.Type = "template"
elif line[2] == "B":
newStrand.Type = "binding"
elif line[2] == "I":
newStrand.Type = "inhibitor"
ID += 1
#print newStrand.Name, newStrand.Type
except IndexError:
sys.exit("The sequence file has a wrong format. Check the user manual for the right file formats.")
# reads the binding file
def _readBindingFile(self, bindingFile):
try:
# for each line in the binding file
for line in bindingFile:
line = line.strip("\n").strip("\r").split("\t")
strandFound = False
for strand in self.Strands:
if strand.Name == line[0]:
Strand_1 = strand
strandFound = True
if strand.Name == line[1]:
Strand_2 = strand
strandFound = True
if strandFound == False:
sys.exit(line[0]+" or "+line[1]+" is not defined in sequence file.")
# if type is inhibitor, define in the duplex object whether it is a normal binding or an inhibiting duplex
if Strand_2.Type == "inhibitor":
if line[4] == "B":
duplex = Strand.StrandRelation(Strand_1, Strand_2, "binding")
duplex.setImmutable()
elif line[4] == "I":
duplex = Strand.StrandRelation(Strand_1, Strand_2, "inhibiting")
else:
sys.exit("Inhibitor binding types must be specified.")
duplex.TargetEnergy = float(line[3])
duplex.defineBindingStructure(line[2], ["(", ")", "#"])
self.Inhibitors += [duplex]
else:
duplex = Strand.StrandRelation(Strand_1, Strand_2)
duplex.TargetEnergy = float(line[3])
duplex.defineBindingStructure(line[2], ["(", ")"])
self.Duplexes += [duplex]
except IndexError:
sys.exit("The binding file has a wrong format. Check the user manual for the right file formats.")
# function for writing the free energies to a file.
def _writeFeFile(self, feList, feFile, printHeader=True):
header = ""
freeE = ""
for j in range(len(feList)):
# if the permutation is done with one strand
if len(feList[j]) == 3:
for strand in self.Strands:
if strand.ID == feList[j][0]:
if printHeader:
header += strand.Name+"\t"
# add free energies to list
freeE += str(feList[j][1]) +"\t"
# if the permutation is done with two strands
if len(feList[j]) == 4:
for duplex in self.Duplexes + self.Inhibitors:
if (duplex.Strand1.ID == feList[j][0] and duplex.Strand2.ID == feList[j][1]) or (duplex.Strand1.ID == feList[j][1] and duplex.Strand2.ID == feList[j][0]):
if printHeader:
header += duplex.Name+"\t"
freeE += str(feList[j][2]) +"\t"
if printHeader:
feFile.write(header.rstrip("\t") + "\n")
feFile.write(freeE.rstrip("\t") + "\n")
# function for writing the NuPack input files
def _writeNpInput(self, fileName, maxCxSize):
# open the input files for NuPack input
cxFile = open(fileName+".in", "w")
if maxCxSize == 1:
listFile = open(fileName+".list", "w")
# define the number of sequences and the maximum complex size
nSeqs = len(self.Strands)
# write the '.in' file
cxFile.write(str(nSeqs)+"\n")
for strand in self.Strands:
cxFile.write(str(strand.Sequence)+"\n")
cxFile.write(str(maxCxSize))
# close the '.in' file
cxFile.close()
if maxCxSize == 1:
output = ""
# write the '.list' file
# write normal duplexes
for duplex in self.Duplexes:
id1 = duplex.Strand1.ID
id2 = duplex.Strand2.ID
output += str(id1)+" "+str(id2)+"\n"
if self.Inhibitors != []:
for duplex in self.Inhibitors:
id1 = duplex.Strand1.ID
id2 = duplex.Strand2.ID
output += str(id1)+" "+str(id2)+"\n"
output = output.rstrip("\n")
listFile.write(output)
# close the '.list' file
listFile.close()
# function for popping up a file chooser
def _chooseFile(self, Title):
# initialize the Tk object
root = Tkinter.Tk()
# withdraw the main window
root.withdraw()
# open file chooser on the current directory
File = tkFileDialog.askopenfile(parent=root, mode='r', title=Title, initialdir=os.getcwd())
# exit the windows
root.quit()
# return files
return File
|
A solid disaster recovery and business continuity strategy is crucial to protecting your organization's future in the event of a natural disaster or other emergency that causes downtime. In this guide, we've compiled our best resources to ensure that your disaster recovery (DR) plan is complete, up to date and appropriate for your company's needs.
disaster or other emergency that causes downtime. In this guide, we've compiled our best resources to ensure that your disaster recovery (DR) plan is complete, up to date and appropriate for your company's needs. First, we cover the basics of disaster recovery planning and testing, with expert advice on planning tools, meeting your recovery time objectives, incorporating change management and DR, and more. Next, we delve into the specifics of disaster recovery planning, including site selection, colocation facilities, DR consultants and the latest backup technologies. Finally, we cover the growing trend of incorporating virtualization into disaster recovery and business continuity planning.
Who should have a hand in your data recovery planning? How should you formulate your budget? This guide answers these planning questions, and features an IT DR budget template.
These ten items will pay dividends for your disaster recovery plan.
This is an essential overview of disaster recovery planning, and maintaining plans down the line.
Download a free sample DR plan, courtesy of SearchDisasterRecovery.com.
Having many sites for IT operations is important to any disaster recovery plan. This tip outlines hot, warm, cold and mobile sites.
Improving IT process maturity by implementing the IT Infrasturcture LIbrary (ITIL) and change management strategies can help prevent data center downtime. But taking shortcuts and skipping steps are common causes of data center disasters.
A business impact analysis in your DR plan is essential. Follow these five tips for a better analysis.
Cloud computing will help with disaster recovery – find out what to look for before using it in your plan.
There are many factors to take into consideration before your disaster recovery plan can be successfully implemented. DR expert Bill Peldzus offers a checklist to ensure your organization's bases are covered.
Streamline your disaster recovery planning strategy with this DR tutorial, which includes in-depth case studies and success stories.
Learn more about information lifecycle management (ILM) for a mainframe data recovery environment, and its many benefits, including less storage, faster backup and recovery, and how it can cut costs.
Data growth can render your current DR plan ineffective and prevent you from meeting recovery time objectives (RTOs). Get a handle on your RTOs with better data management policies and technologies like data deduplication and storage tiering.
Disaster recovery planning tools: Are they worth it?
Your organization's size, level of complexity and level of staff training determines whether disaster recovery planning tools are worth the price.
How many data centers are sufficient for disaster recovery? Two, three, or more?
IT departments are facing pressure to reduce costs and increase efficiency while at the same time ensure disaster preparedness. By performing an business impact analysis that determines recovery time and recovery point objectives, you can determine how many data centers are necessary for your DR plan.
Is your organization's disaster recovery plan eating up your budget? An expert discusses common DR budget wasters, incluing failure to maintain the plan, not deploying virtualization, lack of CEO involvement and more.
Even if your company has a data recovery plan in place, it still might not be mature enough. See how you can test its maturity.
Are there holes in your data recovery plan? How should the plan be tested? This e-guide helps with the fine tuning and testing of your plan.
A plan that stays current with the times will need all company management on board for its success.
Is your disaster recovery plan out of date?
If your DR plan is merely an afterthought, it will become out of date quickly. To stay current, tie your plan to your configuration management process, test regularly and track your RTO trends.
DR isn't impossible because of the cost. These are some ideas for disaster recovery planning and testing that won't break your IT budget.
Disaster recovery expert Kelley Okolita helped Hanover Insurance Group develop an airtight DR plan through extensive documentation, testing and employee training.
Are your disaster recovery goals realisitc? Have you tested it end to end? Is the entire business invested in the plan, or just IT? These are just a few factors that can make or break your DR plan.
In this interview we talk with Phil Larson of American Fidelity Assurance Company about how the organization formed its disaster recovery and business continuity strategy, which involves comprehensive testing twice a year, on a tight budget.
Using carrier colocation for disaster recovery offers many benefits, including well-hardened facilities, strategic locations, efficient means of communication and quick response times. In this tip, a DR expert discusses how one colocation company survived a serious disaster and how investing in carrier colcation can be a smart move for your organization.
Business continuity planning consultants: Are they worth the money?
If your organization lacks staff members with sufficient experience in business continuity planning (BCP), you need to hire an outside BCP consultancy firm. However, not all BCP firms are created equal. Our expert outlines what you should look for to ensure that you hire the right firm to fit your business needs.
Disaster recovery strategies: Should you outsource, manage in-house or partner?
When selecting your disaster recovery site, you can choose to do it yourself, collaborate with another organization or outsource your plan to a DR provide such as SunGard.
There's no one-size-fits-all answer to choosing how close to place an alternative data center site. An expert reccomends carefully evaluating your disaster risk, documenting how a potential site coincides with your business needs and ensuring that the decision makers have all the facts to take into consideration.
A tape library system is key to a strong data center disaster recovery plan and can be a cost-effective recovery option. An expert discusses the latest tape library trends in this tip.
Despite the increased popularity of other forms of data backup, tape backup still has a valuable place in a disaster recovery plan. In this interview, also available as and MP3, an expert answers common questions about tape backup and DR.
Many data centers are addressing tighter RTOs with replication technologies. W. Curtis Preston, Vice President of Data Protection Services at GlassHouse Technologies, answers frequently asked questions about replication in a disaster recovery plan.
Learn all you need to know about backup and recovery technologies, including tape libraries, disk backup, cloud services, data deduplication and more, with these tutorials.
You're likely already using virtualization somewhere in your data center. But did you know that virtualization technology can also be a solid addition to your disaster recovery and business continuity plan? An expert discusses the DR benefits that virtualization can bring to the table, including quicker recovery time, easier recovery at a DR site with virtual machine disk formats and more.
If your data center runs Linux in a VMware environment, it's important to plan for your specific business needs and not simply accept the default settings, which are designed for the simplest scenarios. By changing the default settings according to your system's requirements, you can avoid disaster recovery complications.
Business continuity expert Paul Kirvan discusses which server virtualization technologies are best for DR as well as how they can improve recovery times, help with failover and more.
This tutorial covers how to implement virtual machine disaster recovery, including VMware Site Recovery Manager, geoclustering and SAN replication.
A physical-to-virtual disaster recovery plan can take advantage of physical servers that aren't good candidates for virtualization and potentially save the day in the event of a disaster. In this tip, an expert explains how to schedule regular P2V conversions of "bad apple" physical servers and discusses the tools needed for the process.
Matt Stansberry is SearchDataCenter.com's senior site editor. Write to him about your data center concerns at mstansberry@techtarget.com. And check out our Data center facilities pro blog.
Is disaster recovery testing putting your company at risk? |
# Authors: Nicolas Tresegnie <nicolas.tresegnie@gmail.com>
# Sergey Feldman <sergeyfeldman@gmail.com>
# License: BSD 3 clause
import numbers
import warnings
from collections import Counter
import numpy as np
import numpy.ma as ma
from scipy import sparse as sp
from scipy import stats
from ..base import BaseEstimator, TransformerMixin
from ..utils.sparsefuncs import _get_median
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils._mask import _get_mask
from ..utils import is_scalar_nan
def _check_inputs_dtype(X, missing_values):
if (X.dtype.kind in ("f", "i", "u") and
not isinstance(missing_values, numbers.Real)):
raise ValueError("'X' and 'missing_values' types are expected to be"
" both numerical. Got X.dtype={} and "
" type(missing_values)={}."
.format(X.dtype, type(missing_values)))
def _most_frequent(array, extra_value, n_repeat):
"""Compute the most frequent value in a 1d array extended with
[extra_value] * n_repeat, where extra_value is assumed to be not part
of the array."""
# Compute the most frequent value in array only
if array.size > 0:
if array.dtype == object:
# scipy.stats.mode is slow with object dtype array.
# Python Counter is more efficient
counter = Counter(array)
most_frequent_count = counter.most_common(1)[0][1]
# tie breaking similarly to scipy.stats.mode
most_frequent_value = min(
value for value, count in counter.items()
if count == most_frequent_count
)
else:
mode = stats.mode(array)
most_frequent_value = mode[0][0]
most_frequent_count = mode[1][0]
else:
most_frequent_value = 0
most_frequent_count = 0
# Compare to array + [extra_value] * n_repeat
if most_frequent_count == 0 and n_repeat == 0:
return np.nan
elif most_frequent_count < n_repeat:
return extra_value
elif most_frequent_count > n_repeat:
return most_frequent_value
elif most_frequent_count == n_repeat:
# tie breaking similarly to scipy.stats.mode
return min(most_frequent_value, extra_value)
class _BaseImputer(TransformerMixin, BaseEstimator):
"""Base class for all imputers.
It adds automatically support for `add_indicator`.
"""
def __init__(self, *, missing_values=np.nan, add_indicator=False):
self.missing_values = missing_values
self.add_indicator = add_indicator
def _fit_indicator(self, X):
"""Fit a MissingIndicator."""
if self.add_indicator:
self.indicator_ = MissingIndicator(
missing_values=self.missing_values, error_on_new=False)
self.indicator_._fit(X, precomputed=True)
else:
self.indicator_ = None
def _transform_indicator(self, X):
"""Compute the indicator mask.'
Note that X must be the original data as passed to the imputer before
any imputation, since imputation may be done inplace in some cases.
"""
if self.add_indicator:
if not hasattr(self, 'indicator_'):
raise ValueError(
"Make sure to call _fit_indicator before "
"_transform_indicator"
)
return self.indicator_.transform(X)
def _concatenate_indicator(self, X_imputed, X_indicator):
"""Concatenate indicator mask with the imputed data."""
if not self.add_indicator:
return X_imputed
hstack = sp.hstack if sp.issparse(X_imputed) else np.hstack
if X_indicator is None:
raise ValueError(
"Data from the missing indicator are not provided. Call "
"_fit_indicator and _transform_indicator in the imputer "
"implementation."
)
return hstack((X_imputed, X_indicator))
def _more_tags(self):
return {'allow_nan': is_scalar_nan(self.missing_values)}
class SimpleImputer(_BaseImputer):
"""Imputation transformer for completing missing values.
Read more in the :ref:`User Guide <impute>`.
.. versionadded:: 0.20
`SimpleImputer` replaces the previous `sklearn.preprocessing.Imputer`
estimator which is now removed.
Parameters
----------
missing_values : int, float, str, np.nan or None, default=np.nan
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For pandas' dataframes with
nullable integer dtypes with missing values, `missing_values`
should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.
strategy : string, default='mean'
The imputation strategy.
- If "mean", then replace missing values using the mean along
each column. Can only be used with numeric data.
- If "median", then replace missing values using the median along
each column. Can only be used with numeric data.
- If "most_frequent", then replace missing using the most frequent
value along each column. Can be used with strings or numeric data.
If there is more than one such value, only the smallest is returned.
- If "constant", then replace missing values with fill_value. Can be
used with strings or numeric data.
.. versionadded:: 0.20
strategy="constant" for fixed value imputation.
fill_value : string or numerical value, default=None
When strategy == "constant", fill_value is used to replace all
occurrences of missing_values.
If left to the default, fill_value will be 0 when imputing numerical
data and "missing_value" for strings or object data types.
verbose : integer, default=0
Controls the verbosity of the imputer.
copy : boolean, default=True
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible. Note that, in the following cases,
a new copy will always be made, even if `copy=False`:
- If X is not an array of floating values;
- If X is encoded as a CSR matrix;
- If add_indicator=True.
add_indicator : boolean, default=False
If True, a :class:`MissingIndicator` transform will stack onto output
of the imputer's transform. This allows a predictive estimator
to account for missingness despite imputation. If a feature has no
missing values at fit/train time, the feature won't appear on
the missing indicator even if there are missing values at
transform/test time.
Attributes
----------
statistics_ : array of shape (n_features,)
The imputation fill value for each feature.
Computing statistics can result in `np.nan` values.
During :meth:`transform`, features corresponding to `np.nan`
statistics will be discarded.
indicator_ : :class:`~sklearn.impute.MissingIndicator`
Indicator used to add binary indicators for missing values.
``None`` if add_indicator is False.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
See Also
--------
IterativeImputer : Multivariate imputation of missing values.
Examples
--------
>>> import numpy as np
>>> from sklearn.impute import SimpleImputer
>>> imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
>>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])
SimpleImputer()
>>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]
>>> print(imp_mean.transform(X))
[[ 7. 2. 3. ]
[ 4. 3.5 6. ]
[10. 3.5 9. ]]
Notes
-----
Columns which only contained missing values at :meth:`fit` are discarded
upon :meth:`transform` if strategy is not "constant".
"""
def __init__(self, *, missing_values=np.nan, strategy="mean",
fill_value=None, verbose=0, copy=True, add_indicator=False):
super().__init__(
missing_values=missing_values,
add_indicator=add_indicator
)
self.strategy = strategy
self.fill_value = fill_value
self.verbose = verbose
self.copy = copy
def _validate_input(self, X, in_fit):
allowed_strategies = ["mean", "median", "most_frequent", "constant"]
if self.strategy not in allowed_strategies:
raise ValueError("Can only use these strategies: {0} "
" got strategy={1}".format(allowed_strategies,
self.strategy))
if self.strategy in ("most_frequent", "constant"):
# If input is a list of strings, dtype = object.
# Otherwise ValueError is raised in SimpleImputer
# with strategy='most_frequent' or 'constant'
# because the list is converted to Unicode numpy array
if isinstance(X, list) and \
any(isinstance(elem, str) for row in X for elem in row):
dtype = object
else:
dtype = None
else:
dtype = FLOAT_DTYPES
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
try:
X = self._validate_data(X, reset=in_fit,
accept_sparse='csc', dtype=dtype,
force_all_finite=force_all_finite,
copy=self.copy)
except ValueError as ve:
if "could not convert" in str(ve):
new_ve = ValueError("Cannot use {} strategy with non-numeric "
"data:\n{}".format(self.strategy, ve))
raise new_ve from None
else:
raise ve
_check_inputs_dtype(X, self.missing_values)
if X.dtype.kind not in ("i", "u", "f", "O"):
raise ValueError("SimpleImputer does not support data with dtype "
"{0}. Please provide either a numeric array (with"
" a floating point or integer dtype) or "
"categorical data represented either as an array "
"with integer dtype or an array of string values "
"with an object dtype.".format(X.dtype))
return X
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : SimpleImputer
"""
X = self._validate_input(X, in_fit=True)
# default fill_value is 0 for numerical input and "missing_value"
# otherwise
if self.fill_value is None:
if X.dtype.kind in ("i", "u", "f"):
fill_value = 0
else:
fill_value = "missing_value"
else:
fill_value = self.fill_value
# fill_value should be numerical in case of numerical input
if (self.strategy == "constant" and
X.dtype.kind in ("i", "u", "f") and
not isinstance(fill_value, numbers.Real)):
raise ValueError("'fill_value'={0} is invalid. Expected a "
"numerical value when imputing numerical "
"data".format(fill_value))
if sp.issparse(X):
# missing_values = 0 not allowed with sparse data as it would
# force densification
if self.missing_values == 0:
raise ValueError("Imputation not possible when missing_values "
"== 0 and input is sparse. Provide a dense "
"array instead.")
else:
self.statistics_ = self._sparse_fit(X,
self.strategy,
self.missing_values,
fill_value)
else:
self.statistics_ = self._dense_fit(X,
self.strategy,
self.missing_values,
fill_value)
return self
def _sparse_fit(self, X, strategy, missing_values, fill_value):
"""Fit the transformer on sparse data."""
missing_mask = _get_mask(X, missing_values)
mask_data = missing_mask.data
n_implicit_zeros = X.shape[0] - np.diff(X.indptr)
statistics = np.empty(X.shape[1])
if strategy == "constant":
# for constant strategy, self.statistcs_ is used to store
# fill_value in each column
statistics.fill(fill_value)
else:
for i in range(X.shape[1]):
column = X.data[X.indptr[i]:X.indptr[i + 1]]
mask_column = mask_data[X.indptr[i]:X.indptr[i + 1]]
column = column[~mask_column]
# combine explicit and implicit zeros
mask_zeros = _get_mask(column, 0)
column = column[~mask_zeros]
n_explicit_zeros = mask_zeros.sum()
n_zeros = n_implicit_zeros[i] + n_explicit_zeros
if strategy == "mean":
s = column.size + n_zeros
statistics[i] = np.nan if s == 0 else column.sum() / s
elif strategy == "median":
statistics[i] = _get_median(column,
n_zeros)
elif strategy == "most_frequent":
statistics[i] = _most_frequent(column,
0,
n_zeros)
super()._fit_indicator(missing_mask)
return statistics
def _dense_fit(self, X, strategy, missing_values, fill_value):
"""Fit the transformer on dense data."""
missing_mask = _get_mask(X, missing_values)
masked_X = ma.masked_array(X, mask=missing_mask)
super()._fit_indicator(missing_mask)
# Mean
if strategy == "mean":
mean_masked = np.ma.mean(masked_X, axis=0)
# Avoid the warning "Warning: converting a masked element to nan."
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = np.nan
return mean
# Median
elif strategy == "median":
median_masked = np.ma.median(masked_X, axis=0)
# Avoid the warning "Warning: converting a masked element to nan."
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = np.nan
return median
# Most frequent
elif strategy == "most_frequent":
# Avoid use of scipy.stats.mstats.mode due to the required
# additional overhead and slow benchmarking performance.
# See Issue 14325 and PR 14399 for full discussion.
# To be able access the elements by columns
X = X.transpose()
mask = missing_mask.transpose()
if X.dtype.kind == "O":
most_frequent = np.empty(X.shape[0], dtype=object)
else:
most_frequent = np.empty(X.shape[0])
for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(bool)
row = row[row_mask]
most_frequent[i] = _most_frequent(row, np.nan, 0)
return most_frequent
# Constant
elif strategy == "constant":
# for constant strategy, self.statistcs_ is used to store
# fill_value in each column
return np.full(X.shape[1], fill_value, dtype=X.dtype)
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
X_imputed : {ndarray, sparse matrix} of shape \
(n_samples, n_features_out)
`X` with imputed values.
"""
check_is_fitted(self)
X = self._validate_input(X, in_fit=False)
statistics = self.statistics_
if X.shape[1] != statistics.shape[0]:
raise ValueError("X has %d features per sample, expected %d"
% (X.shape[1], self.statistics_.shape[0]))
# compute mask before eliminating invalid features
missing_mask = _get_mask(X, self.missing_values)
# Delete the invalid columns if strategy is not constant
if self.strategy == "constant":
valid_statistics = statistics
valid_statistics_indexes = None
else:
# same as np.isnan but also works for object dtypes
invalid_mask = _get_mask(statistics, np.nan)
valid_mask = np.logical_not(invalid_mask)
valid_statistics = statistics[valid_mask]
valid_statistics_indexes = np.flatnonzero(valid_mask)
if invalid_mask.any():
missing = np.arange(X.shape[1])[invalid_mask]
if self.verbose:
warnings.warn("Deleting features without "
"observed values: %s" % missing)
X = X[:, valid_statistics_indexes]
# Do actual imputation
if sp.issparse(X):
if self.missing_values == 0:
raise ValueError("Imputation not possible when missing_values "
"== 0 and input is sparse. Provide a dense "
"array instead.")
else:
# if no invalid statistics are found, use the mask computed
# before, else recompute mask
if valid_statistics_indexes is None:
mask = missing_mask.data
else:
mask = _get_mask(X.data, self.missing_values)
indexes = np.repeat(
np.arange(len(X.indptr) - 1, dtype=int),
np.diff(X.indptr))[mask]
X.data[mask] = valid_statistics[indexes].astype(X.dtype,
copy=False)
else:
# use mask computed before eliminating invalid mask
if valid_statistics_indexes is None:
mask_valid_features = missing_mask
else:
mask_valid_features = missing_mask[:, valid_statistics_indexes]
n_missing = np.sum(mask_valid_features, axis=0)
values = np.repeat(valid_statistics, n_missing)
coordinates = np.where(mask_valid_features.transpose())[::-1]
X[coordinates] = values
X_indicator = super()._transform_indicator(missing_mask)
return super()._concatenate_indicator(X, X_indicator)
def inverse_transform(self, X):
"""Convert the data back to the original representation.
Inverts the `transform` operation performed on an array.
This operation can only be performed after :class:`SimpleImputer` is
instantiated with `add_indicator=True`.
Note that ``inverse_transform`` can only invert the transform in
features that have binary indicators for missing values. If a feature
has no missing values at ``fit`` time, the feature won't have a binary
indicator, and the imputation done at ``transform`` time won't be
inverted.
.. versionadded:: 0.24
Parameters
----------
X : array-like of shape \
(n_samples, n_features + n_features_missing_indicator)
The imputed data to be reverted to original data. It has to be
an augmented array of imputed data and the missing indicator mask.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
The original X with missing values as it was prior
to imputation.
"""
check_is_fitted(self)
if not self.add_indicator:
raise ValueError("'inverse_transform' works only when "
"'SimpleImputer' is instantiated with "
"'add_indicator=True'. "
f"Got 'add_indicator={self.add_indicator}' "
"instead.")
n_features_missing = len(self.indicator_.features_)
non_empty_feature_count = X.shape[1] - n_features_missing
array_imputed = X[:, :non_empty_feature_count].copy()
missing_mask = X[:, non_empty_feature_count:].astype(bool)
n_features_original = len(self.statistics_)
shape_original = (X.shape[0], n_features_original)
X_original = np.zeros(shape_original)
X_original[:, self.indicator_.features_] = missing_mask
full_mask = X_original.astype(bool)
imputed_idx, original_idx = 0, 0
while imputed_idx < len(array_imputed.T):
if not np.all(X_original[:, original_idx]):
X_original[:, original_idx] = array_imputed.T[imputed_idx]
imputed_idx += 1
original_idx += 1
else:
original_idx += 1
X_original[full_mask] = self.missing_values
return X_original
class MissingIndicator(TransformerMixin, BaseEstimator):
"""Binary indicators for missing values.
Note that this component typically should not be used in a vanilla
:class:`Pipeline` consisting of transformers and a classifier, but rather
could be added using a :class:`FeatureUnion` or :class:`ColumnTransformer`.
Read more in the :ref:`User Guide <impute>`.
.. versionadded:: 0.20
Parameters
----------
missing_values : int, float, string, np.nan or None, default=np.nan
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For pandas' dataframes with
nullable integer dtypes with missing values, `missing_values`
should be set to `np.nan`, since `pd.NA` will be converted to `np.nan`.
features : {'missing-only', 'all'}, default='missing-only'
Whether the imputer mask should represent all or a subset of
features.
- If 'missing-only' (default), the imputer mask will only represent
features containing missing values during fit time.
- If 'all', the imputer mask will represent all features.
sparse : bool or 'auto', default='auto'
Whether the imputer mask format should be sparse or dense.
- If 'auto' (default), the imputer mask will be of same type as
input.
- If True, the imputer mask will be a sparse matrix.
- If False, the imputer mask will be a numpy array.
error_on_new : bool, default=True
If True, transform will raise an error when there are features with
missing values in transform that have no missing values in fit. This is
applicable only when `features='missing-only'`.
Attributes
----------
features_ : ndarray, shape (n_missing_features,) or (n_features,)
The features indices which will be returned when calling ``transform``.
They are computed during ``fit``. For ``features='all'``, it is
to ``range(n_features)``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> import numpy as np
>>> from sklearn.impute import MissingIndicator
>>> X1 = np.array([[np.nan, 1, 3],
... [4, 0, np.nan],
... [8, 1, 0]])
>>> X2 = np.array([[5, 1, np.nan],
... [np.nan, 2, 3],
... [2, 4, 0]])
>>> indicator = MissingIndicator()
>>> indicator.fit(X1)
MissingIndicator()
>>> X2_tr = indicator.transform(X2)
>>> X2_tr
array([[False, True],
[ True, False],
[False, False]])
"""
def __init__(self, *, missing_values=np.nan, features="missing-only",
sparse="auto", error_on_new=True):
self.missing_values = missing_values
self.features = features
self.sparse = sparse
self.error_on_new = error_on_new
def _get_missing_features_info(self, X):
"""Compute the imputer mask and the indices of the features
containing missing values.
Parameters
----------
X : {ndarray or sparse matrix}, shape (n_samples, n_features)
The input data with missing values. Note that ``X`` has been
checked in ``fit`` and ``transform`` before to call this function.
Returns
-------
imputer_mask : {ndarray or sparse matrix}, shape \
(n_samples, n_features)
The imputer mask of the original data.
features_with_missing : ndarray, shape (n_features_with_missing)
The features containing missing values.
"""
if not self._precomputed:
imputer_mask = _get_mask(X, self.missing_values)
else:
imputer_mask = X
if sp.issparse(X):
imputer_mask.eliminate_zeros()
if self.features == 'missing-only':
n_missing = imputer_mask.getnnz(axis=0)
if self.sparse is False:
imputer_mask = imputer_mask.toarray()
elif imputer_mask.format == 'csr':
imputer_mask = imputer_mask.tocsc()
else:
if not self._precomputed:
imputer_mask = _get_mask(X, self.missing_values)
else:
imputer_mask = X
if self.features == 'missing-only':
n_missing = imputer_mask.sum(axis=0)
if self.sparse is True:
imputer_mask = sp.csc_matrix(imputer_mask)
if self.features == 'all':
features_indices = np.arange(X.shape[1])
else:
features_indices = np.flatnonzero(n_missing)
return imputer_mask, features_indices
def _validate_input(self, X, in_fit):
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
X = self._validate_data(X, reset=in_fit,
accept_sparse=('csc', 'csr'), dtype=None,
force_all_finite=force_all_finite)
_check_inputs_dtype(X, self.missing_values)
if X.dtype.kind not in ("i", "u", "f", "O"):
raise ValueError("MissingIndicator does not support data with "
"dtype {0}. Please provide either a numeric array"
" (with a floating point or integer dtype) or "
"categorical data represented either as an array "
"with integer dtype or an array of string values "
"with an object dtype.".format(X.dtype))
if sp.issparse(X) and self.missing_values == 0:
# missing_values = 0 not allowed with sparse data as it would
# force densification
raise ValueError("Sparse input with missing_values=0 is "
"not supported. Provide a dense "
"array instead.")
return X
def _fit(self, X, y=None, precomputed=False):
"""Fit the transformer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
If `precomputed` is True, then `X` is a mask of the
input data.
precomputed : bool
Whether the input data is a mask.
Returns
-------
imputer_mask : {ndarray or sparse matrix}, shape (n_samples, \
n_features)
The imputer mask of the original data.
"""
if precomputed:
if not (hasattr(X, 'dtype') and X.dtype.kind == 'b'):
raise ValueError("precomputed is True but the input data is "
"not a mask")
self._precomputed = True
else:
self._precomputed = False
# Need not validate X again as it would have already been validated
# in the Imputer calling MissingIndicator
if not self._precomputed:
X = self._validate_input(X, in_fit=True)
self._n_features = X.shape[1]
if self.features not in ('missing-only', 'all'):
raise ValueError("'features' has to be either 'missing-only' or "
"'all'. Got {} instead.".format(self.features))
if not ((isinstance(self.sparse, str) and
self.sparse == "auto") or isinstance(self.sparse, bool)):
raise ValueError("'sparse' has to be a boolean or 'auto'. "
"Got {!r} instead.".format(self.sparse))
missing_features_info = self._get_missing_features_info(X)
self.features_ = missing_features_info[1]
return missing_features_info[0]
def fit(self, X, y=None):
"""Fit the transformer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : object
Returns self.
"""
self._fit(X, y)
return self
def transform(self, X):
"""Generate missing values indicator for X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : {ndarray or sparse matrix}, shape (n_samples, n_features) \
or (n_samples, n_features_with_missing)
The missing indicator for input data. The data type of ``Xt``
will be boolean.
"""
check_is_fitted(self)
# Need not validate X again as it would have already been validated
# in the Imputer calling MissingIndicator
if not self._precomputed:
X = self._validate_input(X, in_fit=False)
else:
if not (hasattr(X, 'dtype') and X.dtype.kind == 'b'):
raise ValueError("precomputed is True but the input data is "
"not a mask")
imputer_mask, features = self._get_missing_features_info(X)
if self.features == "missing-only":
features_diff_fit_trans = np.setdiff1d(features, self.features_)
if (self.error_on_new and features_diff_fit_trans.size > 0):
raise ValueError("The features {} have missing values "
"in transform but have no missing values "
"in fit.".format(features_diff_fit_trans))
if self.features_.size < self._n_features:
imputer_mask = imputer_mask[:, self.features_]
return imputer_mask
def fit_transform(self, X, y=None):
"""Generate missing values indicator for X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : {ndarray or sparse matrix}, shape (n_samples, n_features) \
or (n_samples, n_features_with_missing)
The missing indicator for input data. The data type of ``Xt``
will be boolean.
"""
imputer_mask = self._fit(X, y)
if self.features_.size < self._n_features:
imputer_mask = imputer_mask[:, self.features_]
return imputer_mask
def _more_tags(self):
return {
"allow_nan": True,
"X_types": ["2darray", "string"],
"preserves_dtype": [],
}
|
This Triple Delight Cake is a chocolate and vanilla layered cake with buttercream frosting between the layers. It is a moist, buttery, layered cake, originating from the Dutch Colonial era in Indonesia and is traditionally called Lapis Surabaya. This cakes are handmade using natural ingredients and are also gluten-Free! |
from pychess import Variants
from pychess.Utils.const import *
# RatingType
TYPE_BLITZ, TYPE_STANDARD, TYPE_LIGHTNING, TYPE_WILD, \
TYPE_BUGHOUSE, TYPE_CRAZYHOUSE, TYPE_SUICIDE, TYPE_LOSERS, TYPE_ATOMIC, \
TYPE_UNTIMED, TYPE_EXAMINED, TYPE_OTHER = range(12)
class GameType (object):
def __init__ (self, fics_name, short_fics_name, rating_type,
display_text=None, variant_type=NORMALCHESS):
self.fics_name = fics_name
self.short_fics_name = short_fics_name
self.rating_type = rating_type
if display_text:
self.display_text=display_text
self.variant_type = variant_type
@property
def variant (self):
return Variants.variants[self.variant_type]
def __repr__ (self):
s = "<GameType "
s += "fics_name='%s', " % self.fics_name
s += "short_fics_name='%s', " % self.short_fics_name
s += "rating_type=%d, " % self.rating_type
s += "variant_type=%d, " % self.variant_type
s += "display_text='%s'>" % self.display_text
return s
class NormalGameType (GameType):
def __init__ (self, fics_name, short_fics_name, rating_type, display_text):
GameType.__init__(self, fics_name, short_fics_name, rating_type,
display_text=display_text)
class VariantGameType (GameType):
def __init__ (self, fics_name, short_fics_name, rating_type, variant_type):
GameType.__init__(self, fics_name, short_fics_name, rating_type,
variant_type=variant_type)
@property
def display_text (self):
assert self.variant_type != None
return Variants.variants[self.variant_type].name
@property
def seek_text (self):
return self.fics_name.replace("/", " ")
class WildGameType (VariantGameType):
_instances = []
def __init__ (self, fics_name, variant_type):
VariantGameType.__init__(self, fics_name, "w", TYPE_WILD,
variant_type=variant_type)
WildGameType._instances.append(self)
@classmethod
def instances (cls):
return cls._instances
GAME_TYPES = {
"blitz": NormalGameType("blitz", "b", TYPE_BLITZ, _("Blitz")),
"standard": NormalGameType("standard", "s", TYPE_STANDARD, _("Standard")),
"lightning": NormalGameType("lightning", "l", TYPE_LIGHTNING, _("Lightning")),
"untimed": NormalGameType("untimed", "u", TYPE_UNTIMED, _("Untimed")),
"examined": NormalGameType("examined", "e", TYPE_EXAMINED, _("Examined")),
"nonstandard": NormalGameType("nonstandard", "n", TYPE_OTHER, _("Other")),
"atomic": VariantGameType("atomic", "x", TYPE_ATOMIC, ATOMICCHESS),
"bughouse": VariantGameType("bughouse", "B", TYPE_BUGHOUSE, BUGHOUSECHESS),
"crazyhouse": VariantGameType("crazyhouse", "z", TYPE_CRAZYHOUSE, CRAZYHOUSECHESS),
"losers": VariantGameType("losers", "L", TYPE_LOSERS, LOSERSCHESS),
"suicide": VariantGameType("suicide", "S", TYPE_SUICIDE, SUICIDECHESS),
"wild/fr": WildGameType("wild/fr", FISCHERRANDOMCHESS),
"wild/0": WildGameType("wild/0", WILDCASTLECHESS),
"wild/1": WildGameType("wild/1", WILDCASTLESHUFFLECHESS),
"wild/2": WildGameType("wild/2", SHUFFLECHESS),
"wild/3": WildGameType("wild/3", RANDOMCHESS),
"wild/4": WildGameType("wild/4", ASYMMETRICRANDOMCHESS),
"wild/5": WildGameType("wild/5", UPSIDEDOWNCHESS),
"wild/8": WildGameType("wild/8", PAWNSPUSHEDCHESS),
"wild/8a": WildGameType("wild/8a", PAWNSPASSEDCHESS)
}
VARIANT_GAME_TYPES = {}
for key in GAME_TYPES:
if isinstance(GAME_TYPES[key], VariantGameType):
VARIANT_GAME_TYPES[GAME_TYPES[key].variant_type] = GAME_TYPES[key]
# The following 3 GAME_TYPES_* data structures don't have any real entries
# for the WildGameType's in GAME_TYPES above, and instead use
# a dummy type for the all-encompassing "Wild" FICS rating for wild/* games
GAME_TYPES_BY_SHORT_FICS_NAME = {
"w": GameType("wild", "w", TYPE_WILD, display_text=_("Wild"))
}
for key in GAME_TYPES:
if not isinstance(GAME_TYPES[key], WildGameType):
GAME_TYPES_BY_SHORT_FICS_NAME[GAME_TYPES[key].short_fics_name] = \
GAME_TYPES[key]
GAME_TYPES_BY_RATING_TYPE = {}
for key in GAME_TYPES_BY_SHORT_FICS_NAME:
GAME_TYPES_BY_RATING_TYPE[GAME_TYPES_BY_SHORT_FICS_NAME[key].rating_type] = \
GAME_TYPES_BY_SHORT_FICS_NAME[key]
GAME_TYPES_BY_FICS_NAME = {}
for key in GAME_TYPES_BY_SHORT_FICS_NAME:
GAME_TYPES_BY_FICS_NAME[GAME_TYPES_BY_SHORT_FICS_NAME[key].fics_name] = \
GAME_TYPES_BY_SHORT_FICS_NAME[key]
def type_to_display_text (typename):
if "loaded from" in typename.lower():
typename = typename.split()[-1]
if typename in GAME_TYPES:
return GAME_TYPES[typename].display_text
# Default solution for eco/A00 and a few others
elif "/" in typename:
a, b = typename.split("/")
a = a[0].upper() + a[1:]
b = b[0].upper() + b[1:]
return a + " " + b
else:
# Otherwise forget about it
return typename[0].upper() + typename[1:]
def time_control_to_gametype (minutes, gain):
assert type(minutes) == int and type(gain) == int
assert minutes >= 0 and gain >= 0
gainminutes = gain > 0 and (gain*60)-1 or 0
if minutes is 0:
return GAME_TYPES["untimed"]
elif (minutes*60) + gainminutes >= (15*60):
return GAME_TYPES["standard"]
elif (minutes*60) + gainminutes >= (3*60):
return GAME_TYPES["blitz"]
else:
return GAME_TYPES["lightning"]
TYPE_ADMINISTRATOR, TYPE_BLINDFOLD, TYPE_COMPUTER, \
TYPE_TEAM, TYPE_UNREGISTERED, TYPE_CHESS_ADVISOR, \
TYPE_SERVICE_REPRESENTATIVE, TYPE_TOURNAMENT_DIRECTOR, TYPE_MAMER_MANAGER, \
TYPE_GRAND_MASTER, TYPE_INTERNATIONAL_MASTER, TYPE_FIDE_MASTER, \
TYPE_WOMAN_GRAND_MASTER, TYPE_WOMAN_INTERNATIONAL_MASTER, TYPE_WOMAN_FIDE_MASTER,\
TYPE_DUMMY_ACCOUNT = range(16)
TITLE_TYPE_DISPLAY_TEXTS = (
_("Administrator"), _("Blindfold Account"), _("Computer"),
_("Team Account"), _("Unregistered"), _("Chess Advisor"),
_("Service Representative"), _("Tournament Director"), _("Mamer Manager"),
_("Grand Master"), _("International Master"), _("FIDE Master"),
_("Woman Grand Master"), _("Woman International Master"), _("Woman FIDE Master"),
_("Dummy Account"),
)
TITLE_TYPE_DISPLAY_TEXTS_SHORT = (
_("*"), _("B"), _("C"),
_("T"), _("U"), _("CA"),
_("SR"), _("TD"), _("TM"),
_("GM"), _("IM"), _("FM"),
_("WGM"), _("WIM"), _("WFM"), _("D")
)
TITLES = { # From FICS 'help who'
"*": TYPE_ADMINISTRATOR,
"B": TYPE_BLINDFOLD,
"C": TYPE_COMPUTER,
"T": TYPE_TEAM,
"U": TYPE_UNREGISTERED,
"CA": TYPE_CHESS_ADVISOR,
"SR": TYPE_SERVICE_REPRESENTATIVE,
"TD": TYPE_TOURNAMENT_DIRECTOR,
"TM": TYPE_MAMER_MANAGER,
"GM": TYPE_GRAND_MASTER,
"IM": TYPE_INTERNATIONAL_MASTER,
"FM": TYPE_FIDE_MASTER,
"WFM": TYPE_WOMAN_FIDE_MASTER,
"WIM": TYPE_WOMAN_INTERNATIONAL_MASTER,
"WGM": TYPE_WOMAN_GRAND_MASTER,
"D": TYPE_DUMMY_ACCOUNT,
}
HEX_TO_TITLE = {
0x1 : TYPE_UNREGISTERED,
0x2 : TYPE_COMPUTER,
0x4 : TYPE_GRAND_MASTER,
0x8 : TYPE_INTERNATIONAL_MASTER,
0x10 : TYPE_FIDE_MASTER,
0x20 : TYPE_WOMAN_GRAND_MASTER,
0x40 : TYPE_WOMAN_INTERNATIONAL_MASTER,
0x80 : TYPE_WOMAN_FIDE_MASTER,
}
|
This Website (Ani Anywhere) and its Content is owned by Ana Sánchez Granado (“I,” or “me”). The term “you” refers to any users, visitors, or viewers of this Website, the term “us” refers to Ani Anywhere and it’s contributors.
This website may be compensated by companies mentioned through advertising, affiliate programs, or otherwise. Any references to third party products, rates, or websites are subject to change without notice.
When you visit/use this Website, you acknowledge that you are doing so of your own free will and you are using the information provided at your own risk, as all Content is for informational and entertainment purposes only. Every effort is made to ensure information is accurate at the time of posting but, due to the nature of the topics, material on this site may become outdated (e.g. opening times and fees of attractions in various cities) and this should always be taken into consideration. If you find inaccurate/outdated information please feel free to contact me so I can make updates to the information. I will not be held liable if you sustain loss, injury, or damage as a result of using or reading this Website.
Unless otherwise noted I am the legal copyright holder of all Content (e.g. written and photos) that appear on this Website. If you’d like to share my photos and content please do so only with proper credit and links back to this Website. You have permission to pin my images on Pinterest, or share them on Facebook, Instagram, Twitter, other forms of social media, or your own website as long as I am given proper credit. Please feel free to contact me if you have any questions about sharing my content. I’d also love it if you’d share the links you are sharing my content on with me so I can have a look at your blog and social media.
Note that Anianywhere has financial relationships with some of the merchants mentioned on this blog. Anianywhere.com may be compensated if consumers choose to utilize the links located throughout the content on this site and generate sales for the said merchant. You are not obligated to click on any link or buy any products that are advertised.
As an Amazon Associate, I earn from qualifying purchases. “Ani Anywhere is a participant in the Amazon Services LLC Associates Program, an affiliate advertising program designed to provide a means for me to earn fees by linking to Amazon.com and affiliated sites.” Anianywhere.com is also a participant with the Booking.com, Agoda.com, GetYourGuide and World Nomads Affiliate Programs.
How do I travel? Unless otherwise noted, I pay for my own trips and expenses.
* I use Google Analytics for analyzing website data.
* If you subscribed to my newsletter, you will receive my newsletters. You can always unsubscribe by following the link in email or by emailing me.
Anianywhere.com abides by GDPR regulations, so if you wish to have your information (e.g. comments removed) and you’re an EU resident, please get in touch with me. I am happy to amend/remove your information/comments/email from the mailing list if you contact me. I do not retain secondary copies of commenters/reader contact information without permission. Similarly, I do not resell or share your contact information with third parties. Similarly, once you opt out of the mailing list, you are permanently removed.
Information You Voluntarily Submit to the Website: I may collect personal information from you such as your name or email address. For example, you may voluntarily submit information to the Website by leaving a comment, subscribing to a newsletter, or submitting a contact form.
Automatically-Collected Information: I automatically collect certain information about you and the device with which you access the Website. For example, when you use the Website, we will log your IP address, operating system type, browser type, referring website, pages you viewed, and the dates/times when you accessed the Website. We may also collect information about actions you take when using the Website, such as links clicked. Your information has been anonymized and only is viewed aggregately.
Cookies: I may log information using cookies, which are small data files stored on your browser by the Website. I may use both session cookies, which expire when you close your browser, and persistent cookies, which stay on your browser until deleted, to provide you with a more personalized experience on the Website.
I may share your information with third parties when you explicitly authorize me to share your information. Additionally, the Website may use third-party service providers to service various aspects of the Website. Each third-party service provider’s use of your personal information is dictated by their respective privacy policies.
Google Analytics – this service tracks Website usage and provides information such as referring websites and user actions on the Website. Google Analytics may capture your IP address, but no other personal information is captured by Google Analytics. We have anonymized our Google Analytics data and never look at non-aggregated data.
I may log information using cookies, which are small data files stored on your browser by the Website. I may use both session cookies, which expire when you close your browser, and persistent cookies, which stay on your browser until deleted, to provide you with a more personalized experience on the Website.
If you leave a comment, certain information (your name as you provide it) may be publicly visible, however, your email address will not be. I will not contact you using your email as this goes against GDPR regulations, so if you wish to be contacted by Anianywhere.com, please contact me directly. Please do not request that I contact you in the comments.
The Website does not knowingly collect any personally identifiable information from children under the age of 16. If a parent or guardian believes that the Website has personally identifiable information of a child under the age of 16 in its database, please contact me immediately. We will use our best efforts to promptly remove such information from our records. |
# -*- coding: utf-8 -*-
"""
Audits WikiProjects for inconsistencies between their project pages and their categories
Copyright (C) 2015 James Hare
Licensed under MIT License: http://mitlicense.org
"""
import pywikibot
from project_index import WikiProjectTools
class ProjectCategoryAudit:
def go(self):
wptools = WikiProjectTools()
# Get list of WikiProjects that also have a self-named category
output = 'This report highlights discrepancies in WikiProject categorization between WikiProjects and their self-named categories.\n\n'
query = 'select page_title from page left join redirect on page.page_id = redirect.rd_from where page_title like "WikiProject\_%" and page_namespace = 4 and page_title in (select page_title from page where page_title like "WikiProject\_%" and page_namespace = 14) and rd_title is null;'
for row in wptools.query('wiki', query, None):
project = row[0].decode('utf-8')
cl_projectspace = [] # read as "category links, Wikipedia namespace"
cl_categoryspace = [] # read as "category links, Category namespace"
for match in wptools.query('wiki', 'select cl_to from categorylinks join page on categorylinks.cl_from=page.page_id where page_namespace = 4 and page_title = "{0}" and cl_to like "%\_WikiProjects" and cl_to not like "Active\_%" and cl_to not like "Semi-active\_%" and cl_to not like "Inactive\_%" and cl_to not like "Defunct\_%";'.format(project), None):
cl_projectspace.append(match[0].decode('utf-8').replace('_', ' '))
for match in wptools.query('wiki', 'select cl_to from categorylinks join page on categorylinks.cl_from=page.page_id where page_namespace = 14 and page_title = "{0}" and cl_to like "%\_WikiProjects" and cl_to not like "Active\_%" and cl_to not like "Semi-active\_%" and cl_to not like "Inactive\_%" and cl_to not like "Defunct\_%";'.format(project), None):
cl_categoryspace.append(match[0].decode('utf-8').replace('_', ' '))
cl_projectspace.sort()
cl_categoryspace.sort()
if cl_projectspace == cl_categoryspace:
continue # Don't bother generating a report if both category lists match perfectly
both = list(set(cl_projectspace).intersection(cl_categoryspace))
project = project.replace('_', ' ')
output += "* '''{0}'''\n".format(project)
output += "** [[Wikipedia:{0}]]: ".format(project)
for entry in cl_projectspace:
if entry in both:
output += "<span style='color: #999'>{0}</span> – ".format(entry)
else:
output += "<span style='color: #FF0000'>{0}</span> – ".format(entry)
output = output[:-2] + "\n" # Truncate trailing endash and add line break
output += "** [[:Category:{0}]]: ".format(project)
for entry in cl_categoryspace:
if entry in both:
output += "<span style='color: #999'>{0}</span> –".format(entry)
else:
output += "<span style='color: #FF0000'>{0}</span> –".format(entry)
output = output[:-2] + "\n" # Truncate trailing endash and add line break
return output
if __name__ == "__main__":
audit = ProjectCategoryAudit()
report = audit.go()
bot = pywikibot.Site('en', 'wikipedia')
page = pywikibot.Page(bot, 'User:Reports bot/WikiProject category audit')
page.text = report
page.save('Updating report', minor=False, quiet=True)
|
I've put in a lot of my efforts this time, kept the video detailed so that a lot of people including beginners are able to understand how to solve this tough squareroot decomposition problem HILLJUMPING from Codechef August 17 Long Challenge.
Check out the video editorial here.
Then I explain my approach till 14:50 after which code discussion begins.
has a solution to this, let me know!
That bucketing and binary-search for a prefix-maxed array are the golden gems.
@Deepesh Yes you are correct, this results in a contradiction and hence such a sequence is impossible that generates given kind of graph.
Bro...it would be a great help if you could tell me how to be good at competitive programming. I currently have the codechef rating of 1726. Thanks in advance . |
import tornado.web
from handlers.base_handler import BaseHandler
from models.group import Group
from models.user import User
from .util import check_group_permission
from forms.forms import GroupForm
class GroupsHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
group_name = self.get_argument('group_name', '')
groups = Group.search_name(group_name)
self.render('group/groups.html', groups=groups)
@tornado.web.authenticated
def post(self):
form = GroupForm(self.request.arguments)
if form.validate():
group = Group(**form.data)
user_id = self.get_current_user_id()
user = User.get(user_id)
user.groups.append(group)
group.save()
self.redirect(self.reverse_url('group', group.id))
else:
self.redirect(self.reverse_url('groups')) # Todo エラーメッセージを渡す
class GroupHandler(BaseHandler):
@check_group_permission
@tornado.web.authenticated
def get(self, group_id):
group = Group.get(group_id)
self.render('group/group.html', group=group)
class GroupEditHandler(BaseHandler):
@check_group_permission
@tornado.web.authenticated
def post(self, group_id):
form = GroupForm(self.request.arguments)
if form.validate():
group = Group.get(group_id)
group.update(**form.data)
group.save()
self.redirect(self.reverse_url('group', group_id))
else:
self.redirect(self.reverse_url('group', group_id))
class GroupDeleteHandler(BaseHandler):
@check_group_permission
@tornado.web.authenticated
def post(self, group_id):
Group.get(group_id).delete()
self.redirect(self.reverse_url('groups'))
class GroupMemberAdditionHandler(BaseHandler):
@check_group_permission
@tornado.web.authenticated
def get(self, group_id):
user_name = self.get_argument('user_name', '')
users = User.search_name(user_name)
users = [user for user in users if not user.belongs_to_group(group_id)]
group = Group.get(group_id)
self.render('group/member_addition.html', users=users, group=group)
@check_group_permission
@tornado.web.authenticated
def post(self, group_id):
user_id = self.get_argument('user_id', '')
user = User.get(user_id)
group = Group.get(group_id)
user.groups.append(group)
user.save()
self.redirect(self.reverse_url('member_addition', group_id))
class GroupUserHandler(BaseHandler):
@check_group_permission
@tornado.web.authenticated
def get(self, group_id):
group = Group.get(group_id)
self.render('group/group_users.html', group=group) |
THE UNITED STATES Food and Medication Administration today approved Provenge.
THE UNITED STATES Food and Medication Administration today approved Provenge, a first-of-a kind vaccine for the treatment of prostate cancer. The procedure proved to extend lifespan by a lot more than four weeks on average http://propeciapris.net/ . All told, that’s not a bad deal when comparing the price compared to that of other tumor meds.D., and colleagues of Temple University School of Pharmacy, Philadelphia. Neuropathy is a potentially critical complication that can become dose-limiting, preventing patients from receiving their complete recommended course of chemotherapy. Cannabidiol can be a marijuana extract that has discomfort – and inflammation-reducing results, while avoiding the psychoactive unwanted effects of marijuana and additional cannabinoid compounds.
Previous post:Antihistamine drugs work by preventing histamine from attaching to H1 receptors Itchy eyes. |
import urllib,urllib2,re,cookielib,string, urlparse,sys,os
import xbmc, xbmcgui, xbmcaddon, xbmcplugin,urlresolver
from resources.libs import main
#Mash Up - by Mash2k3 2012.
from t0mm0.common.addon import Addon
from resources.universal import playbackengine, watchhistory
addon_id = 'plugin.video.movie25'
selfAddon = xbmcaddon.Addon(id=addon_id)
addon = Addon('plugin.video.movie25', sys.argv)
art = main.art
wh = watchhistory.WatchHistory('plugin.video.movie25')
def TSNDIR():
main.addDir('Featured','http://m.tsn.ca/home?p_p_id=feed_WAR_xlmagic_INSTANCE_C4iW&p_p_lifecycle=2&p_p_state=normal&p_p_mode=view&p_p_resource_id=getPage&p_p_cacheability=cacheLevelPage&p_p_col_id=column-1&p_p_col_pos=6&p_p_col_count=9&_feed_WAR_xlmagic_INSTANCE_C4iW_page=0&_feed_WAR_xlmagic_INSTANCE_C4iW_portrait=false',97,art+'/tsn.png')
main.addDir('NHL','http://m.tsn.ca/nhl?p_p_id=feed_WAR_xlmagic_INSTANCE_75Sw&p_p_lifecycle=2&p_p_state=normal&p_p_mode=view&p_p_resource_id=getPage&p_p_cacheability=cacheLevelPage&p_p_col_id=column-1&p_p_col_pos=2&p_p_col_count=3&_feed_WAR_xlmagic_INSTANCE_75Sw_page=0&_feed_WAR_xlmagic_INSTANCE_75Sw_portrait=false',97,art+'/tsn.png')
main.addDir('NFL','http://m.tsn.ca/nfl?p_p_id=feed_WAR_xlmagic_INSTANCE_u0tU&p_p_lifecycle=2&p_p_state=normal&p_p_mode=view&p_p_resource_id=getPage&p_p_cacheability=cacheLevelPage&p_p_col_id=column-1&p_p_col_pos=2&p_p_col_count=3&_feed_WAR_xlmagic_INSTANCE_u0tU_page=0&_feed_WAR_xlmagic_INSTANCE_u0tU_portrait=false',97,art+'/tsn.png')
#main.addDir('NBA','nba',97,art+'/tsn.png')
main.addDir('CFL','http://m.tsn.ca/cfl?p_p_id=feed_WAR_xlmagic_INSTANCE_8WBz&p_p_lifecycle=2&p_p_state=normal&p_p_mode=view&p_p_resource_id=getPage&p_p_cacheability=cacheLevelPage&p_p_col_id=column-1&p_p_col_pos=2&p_p_col_count=3&_feed_WAR_xlmagic_INSTANCE_8WBz_page=0&_feed_WAR_xlmagic_INSTANCE_8WBz_portrait=false',97,art+'/tsn.png')
main.addDir('MLB','http://m.tsn.ca/mlb?p_p_id=feed_WAR_xlmagic_INSTANCE_5wRo&p_p_lifecycle=2&p_p_state=normal&p_p_mode=view&p_p_resource_id=getPage&p_p_cacheability=cacheLevelPage&p_p_col_id=column-1&p_p_col_pos=2&p_p_col_count=3&_feed_WAR_xlmagic_INSTANCE_5wRo_page=0&_feed_WAR_xlmagic_INSTANCE_5wRo_portrait=false',97,art+'/tsn.png')
main.GA("Sports","TSN")
def TSNLIST(murl):
main.GA("TSN","TSN-list")
link=main.OPENURL(murl)
link=link.replace('\r','').replace('\n','').replace('\t','').replace(' ','').replace(' ','')
match=re.compile('''href="([^"]+)"><span class="tileText"><span class="overlay"><img src="([^"]+)" style=.+?/><img class="videoOverlay" src=".+?" /></span><span class=".+?" style=".+?">([^<]+)</span></span>''').findall(link)
for url,thumb,name in match:
url=main.REDIRECT(url)
main.addPlayMs(name,url,98,thumb,'','','','','')
paginate=re.compile('_page=(\d+)&_',re.DOTALL).findall(murl)
if paginate:
purl=int(paginate[0])+ 1
xurl=re.sub('_page=(\d+)&_','_page='+str(purl)+'&_',murl)
main.addDir('[COLOR blue]Next[/COLOR]',xurl,97,art+'/next2.png')
def TSNLINK(mname,murl,thumb):
#got help from TSN plugin by TEEFER
main.GA("TSN-list","Watched")
ok=True
link=main.OPENURL(murl)
m3u8 = re.compile('"(http[^"]+m3u8)"').findall(link)[0]
link2=main.OPENURL(m3u8)
stream = re.compile("(http.+?)Adaptive").findall(link2)[0]
if len(stream)==0:
xbmc.executebuiltin("XBMC.Notification(Sorry!,Playable Only in Canada,5000)")
else:
if selfAddon.getSetting("tsn-qua") == "0":
stream_url = stream+'Adaptive_08.mp4.m3u8'
elif selfAddon.getSetting("tsn-qua") == "1":
stream_url = stream+'Adaptive_05.mp4.m3u8'
elif selfAddon.getSetting("tsn-qua") == "2":
stream_url = stream+'Adaptive_01.mp4.m3u8'
# play with bookmark
player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type='', title=mname,season='', episode='', year='',img=thumb,infolabels='', watchedCallbackwithParams=main.WatchedCallbackwithParams,imdb_id='')
#WatchHistory
if selfAddon.getSetting("whistory") == "true":
wh.add_item(mname+' '+'[COLOR green]TSN[/COLOR]', sys.argv[0]+sys.argv[2], infolabels='', img=thumb, fanart='', is_folder=False)
player.KeepAlive()
return ok
|
Lovingly co manufactured by AIC.
style nanda inspired piece, vintage design.
STATUS : AVAILABLE IN S AND M!
STATUS : AVAILABLE IN M AND L! |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib
import urllib2
import re
import json
class Spider:
def __init__(self):
self.url = 'https://www.paizi.com/'
self.user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'
self.headers = { 'User-Agent' : self.user_agent }
def getBrandCategory(self):
content = self.getPageContext(self.url)
indexItems = self.resolveIndexContent(content)
for indexItem in indexItems:
firstCategory = indexItem[1]
firstCategoryContent = self.getPageContext('https:'+str(indexItem[0]))
firstCategoryItems = self.resolveFirstCategoryContent(firstCategoryContent)
for firstCategroyItem in firstCategoryItems:
sencondCategory = firstCategroyItem[1]
secondCategoryContent = self.getPageContext('https:'+str(firstCategroyItem[0]))
secondCategoryItems = self.resolveSecondCategoryContent(secondCategoryContent)
for secondCategoryItem in secondCategoryItems:
thirdCategory = secondCategoryItem[1]
thirdCategoryContent = self.getPageContext('https:'+str(secondCategoryItem[0]))
realUrl = self.getRealUrl(thirdCategoryContent)
realThirdCategoryContent = self.getPageContext('https:'+str(realUrl))
index = self.getMaxPage(realThirdCategoryContent)
# 解析当前页
realThridCategoryItems = self.resolveLastPage(realThirdCategoryContent)
for realThirdCategoryItem in realThridCategoryItems:
brandCategory = realThirdCategoryItem[1] # 名称
aboutContent = self.getPageContext('https:'+str(realThirdCategoryItem[0]))
aboutItems = self.resolveAboutPage(aboutContent)
brandContent = self.getPageContext('https:'+str(aboutItems))
info = self.resolveBrandField(brandContent)
print info[0],',',info[1],',',info[2],',',info[3],',',info[4],',',info[5],',',info[6],',',info[7],',',info[8],',',info[9],',',info[10],',',info[11],',',info[12]
def resolveDan(self,content):
try:
pattern = re.compile('.*?<p><font color="#4993F4">主体规模:</font>(.*?)</p>.*?')
return re.findall(pattern,content)[0]
except:
return 'null'
def resolveZhuTiGuiMo(self,content):
try:
pattern = re.compile('.*?<p><font color="#4993F4">主体规模:</font>(.*?)</p>.*?')
return re.findall(pattern,content)
except:
return 'null'
def resolveBrandField(self,content):
zhutiguimo = 'null'
danweixingzhi = 'null'
zichanleixing = 'null'
chuangjianshijian = 'null'
boss = 'null'
address = 'null'
zizhirongyu = 'null'
score = 0
price = 'null'
rank = 'null'
sales = 'null'
renqi = 'null'
try:
pattern = re.compile('.*?<p style="height: 30px;line-height: 20px;">(.*?)</p>.*?')
result = re.findall(pattern,content)
if len(result) >0 :
name = result[0]
pattern = re.compile('.*?<p><font color="#4993F4">主体规模:</font>(.*?)</p>.*?')
result = re.findall(pattern,content)
if len(result) >0 :
zhutiguimo = result[0]
pattern = re.compile('.*?<p><font color="#4993F4">单位性质:</font>(.*?)</p>.*?')
result = re.findall(pattern,content)
if len(result) >0 :
danweixingzhi = result[0]
pattern = re.compile('.*?<p><font color="#4993F4">资产类型:</font>(.*?)</p>.*?')
result = re.findall(pattern,content)
if len(result) >0 :
zichanleixing = result[0]
pattern = re.compile('.*?<p><font color="#4993F4">成立于:</font>(.*?)</p>.*?')
result = re.findall(pattern,content)
if len(result) >0 :
chuangjianshijian = result[0]
pattern = re.compile('.*?<p><font color="#4993F4">创办人、主要负责人或法人:</font>(.*?)</p>.*?')
result = re.findall(pattern,content)
if len(result) >0 :
boss = result[0]
pattern = re.compile('.*?<p><font color="#4993F4">发源地或总部所在地:</font>(.*?)</p>.*?')
result = re.findall(pattern,content)
if len(result) >0 :
address = result[0]
pattern = re.compile('.*?<p class="x"><span>*</span><font color="#4993F4">资质荣誉:</font>(.*?)</p>.*?')
result = re.findall(pattern,content)
if len(result) >0 :
zizhirongyu = result[0]
# <p class="zf">总分:92分</p>
pattern = re.compile('.*?<p class="zf">总分:(.*?)分</p>.*?')
result = re.findall(pattern,content)
if len(result) >0 :
score = result[0]
# <p>综合排名:第<b style="color:#F60;" _hover-ignore="1">193</b>位</p>
pattern = re.compile('.*?<p>综合排名:第<b style="color:#F60;">(.*?)</b>位</p>.*?')
result = re.findall(pattern,content)
if len(result) >0 :
rank = result[0]
# <p>品牌价值:<a href="//jiazhi.paizi.com/s?keys=惠普" style="color:#F60; font-weight:bold;" target="_blank">41832</a>百万元
pattern = re.compile('.*?<p>品牌价值:<a href=".*?" style="color:#F60; font-weight:bold;" target="_blank">(.*?)</a>(.*?).*?')
result = re.findall(pattern,content)
if len(result) == 2 :
price = str(result[0],result[1])
# <p>估算销量:<b style="color:#F60;">4831</b>件/月</p>
pattern = re.compile('.*?<p>估算销量:<b style="color:#F60;">(.*?)</b>(.*?)</p>.*?')
result = re.findall(pattern,content)
if len(result) == 2 :
sales = str(result[0],result[1])
# <p>品牌人气:<em id="zs_pprq">222811</em>
pattern = re.compile('.*?<p>品牌人气:<em id="zs_pprq">(.*?)</em>.*?')
result = re.findall(pattern,content)
if len(result) > 0 :
renqi = result[0]
return [name,zhutiguimo,danweixingzhi,zichanleixing,chuangjianshijian,boss,address,zizhirongyu,score,price,rank,sales,renqi]
except:
print '解析品牌属性出错'
return []
def resolvePageName(self,content):
try:
pattern = re.compile('.*?<p style="height: 30px;line-height: 20px;">(.*?)</p>.*?')
return re.findall(pattern,content)
except:
print '解析品牌页面出错'
return []
def getPageContext(self,url):
# print '爬取页面',url
try:
request = urllib2.Request(url,headers = self.headers)
response = urllib2.urlopen(request)
return response.read()
except:
1
# print '忽略url发送出错问题'
def run(self):
self.getBrandCategory()
def resolveIndexContent(self,content):
try:
pattern = re.compile('.*?<h3><em></em><a href="(.*?)">(.*?)</a></h3>.*?')
return re.findall(pattern,content)
except:
# print '忽略解析品牌页面出错问题'
return []
def resolveFirstCategoryContent(self,content):
try:
pattern = re.compile('.*?<div class="con06">([\s\S]*?)<div class="con07">.*?')
div = re.findall(pattern,content)
pattern = re.compile('.*?<strong><a href="(.*?)">(.*?)</a></strong>.*?')
return re.findall(pattern,div[0])
except:
# print '忽略解析品牌页面出错问题'
return []
def resolveSecondCategoryContent(self,content):
try:
pattern = re.compile('.*?<div class="c-3">([\s\S]*?)</div>.*?')
div = re.findall(pattern,content)
pattern = re.compile('.*?<a href="(.*?)">(.*?)</a>.*?')
return re.findall(pattern,div[0])
except:
# print '忽略解析品牌页面出错问题'
return []
def getRealUrl(self,content):
try:
pattern = re.compile('.*?<a href="(.*?)">.*?品牌大全></a>.*?')
return re.findall(pattern,content)[0]
except:
print "解析出错"
return []
def getMaxPage(self,content):
try:
pattern = re.compile('.*?\.\.<a href=".*?">(\d)</a>.*?')
index = re.findall(pattern,content)
if len(index) == 0:
return 0
else:
return index[0]
except:
print "获取最大值出错"
return []
def resolveLastPage(self,content):
# <div class="c03"><p>名称:<a href="
try:
pattern = re.compile('.*?<p>名称:<a href="(.*?)">(.*?)</a></p>.*?')
return re.findall(pattern,content)
except:
print "解析出错"
return []
def resolveAboutPage(self,content):
try:
pattern = re.compile('.*?<a href="(.*?)">关于.*?')
return re.findall(pattern,content)[0]
except:
return []
spider = Spider()
spider.run() |
Easter and chocolate go together like peanut butter and jelly, but what doesn’t go down as well is our bodies reaction that comes after binging on all your Easter goodies.
We’ve all heard the expression of being on a sugar-high. Keri Glassman, registered dietitian and founder of Nutritious Life and The Nutrition School explains this experience as “a surge of feel-good hormones known as dopamine” resulting in “a high similar to using some drugs”. This sensation is caused by our body’s release of insulin to remove this overload of glucose from our body to stabilise our blood sugar levels. However, as a result of this abnormally rapid spike in our blood glucose levels, followed by the ensuing drop once the insulin has finished its job and the sugar is absorbed, we experience the dreaded sugar crash.
So, with our slightly higher than usual (ok, a LOT higher than usual) sugar intake over the Easter period, this is something a lot of us have probably experienced lately, and while there is no magic cure for it there are some things you can do to help combat and lessen the effect it has on you!
This is the most often mentioned, and possibly most important of all! That glucose we were talking about earlier? Well, adding water to the equation dilutes the ratio of glucose in your bloodstream. According to Lisa Eberly, R.D, this “will help speed up the filtering of your blood, getting that sugar outta there”. It will also help to put a stop to that accompanying headache that so often complements sugar binges.
It doesn’t have to be anything extreme, even just a 10-minute walk will do – but get moving! Exercising will burn that glucose and excess sugar that is running around in your veins during that sugar high, lessening the crash we feel later on. As an added benefit, a good workout will get you sweating out toxins too!
Eating more after chocolate may not necessarily be the first thing you want to do, but an influx of other nutrients can help prevent that crash after a sugar high. Jennifer Powell Weddig, a professor of nutrition at Metropolitan State University of Denver, suggests eating “a spoonful of peanut butter or a handful of nuts” after consuming larger dosages of sugar than usual. These will give your body “fat[s] and protein[s] to slow digestion”, hence helping to ward off that sugar crash.
Indulgencing yourself to the occasional treat is key to living a well-balanced lifestyle, and Easter is definitely the perfect time to do it! So don’t beat yourself up over all the chocolate you consumed, instead focus on how great Easter was! |
#!/usr/bin/env python
#For tuning retina on DARPA Heli
#Created by Dylan Paiton
from tune_functions import *
###################
### ###
### Global vars ###
### ###
###################
run_PetaVision = 0 #Will just create params file if set to 0
pipe_output_to_file = 1
mpi_np = '4'
mpi_rows = '2'
mpi_columns = '2'
num_steps_list = ['2000'] #[str(450*33)]#
stochastic_flag = '1' #preActivityNotRate = !stochastic_flag
PA_delay = '2.0'
param_template_name = 'retina_params.template'
run_name = 'biggraywhiteblackspots' #'Heli_Challenge_026'#
#######################################################################################
## PATH PARAMS
#######################################################################################
#Gar
wrkspc_path = '/Users/garkenyon/workspace-sync-anterior'
remote_wrkspc_path = wrkspc_path #'/home/gkenyon/workspace-sync-anterior'#
data_path = wrkspc_path #remote_wrkspc_path #'/nh/compneuro/Data'
#Dylan
#wrkspc_path = '/Users/dpaiton/Documents/Work/LANL/workspace'
#remote_wrkspc_path = wrkspc_path
#data_path = wrkspc_path
#Common
out_filename = run_name
results_path = data_path+'/HyPerRetina/output/'+run_name
remote_input_path = remote_wrkspc_path+'/HyPerRetina/input'
input_path = wrkspc_path+'/HyPerRetina/input'
tuning_path = wrkspc_path+'/HyPerRetina/tuning'
param_in_file = tuning_path+'/'+param_template_name
param_out_file = tuning_path+'/params_files/'+out_filename
run_file = wrkspc_path+'/HyPerRetina/Debug_remote/HyPerRetina'
#######################################################################################
## INPUT MOVIE (One must be enabled)
input_file = remote_input_path+'/filenames_graywhiteblackspots_big.txt'#'/filenames_graywhiteblackspots.txt'#'/heli_challenge_026_framenames.txt'#
## Declare layers
#INPUTS
Movie = 1
#ANN INPUT COPY
ImageBuffer = 1
ConstantVrest = 1
#CONE
Cone = 1
ConeSigmoidON = 1
ConeSigmoidOFF = ConeSigmoidON
#BIPOLAR
BipolarON = 1
BipolarSigmoidON = 1
BipolarOFF = BipolarON
BipolarSigmoidOFF = BipolarSigmoidON
#HORIZONTAL
Horizontal = 1
HorizontalGap = 1
HorizontalSigmoidON = 1
HorizontalSigmoidOFF = 1
#WFAMACRINE
WFAmacrineON = 1
WFAmacrineSigmoidON = 1
WFAmacrineOFF = WFAmacrineON
WFAmacrineSigmoidOFF = WFAmacrineSigmoidON
#PAAmacrie
PAAmacrineON = 1
PAAmacrineGapON = 1
PAAmacrineOFF = PAAmacrineON
PAAmacrineGapOFF = PAAmacrineGapON
#SFAmacrine
SFAmacrine = 1
SFAmacrineSigmoid = 1
SFAmacrineGap = 1
#GANGLION
GanglionON = 1
GanglionGapON = 1
GanglionOFF = GanglionON
GanglionGapOFF = GanglionGapON
## Declare conn strength values
## frange is (start, stop, step)
ImageImageBuffer = ["%g" % x for x in frange(40,0,0)] # Image to ImageBuffer
ConstantVrestImageBuffer = ["%g" % x for x in frange(1,0,0)] # ConstantVrest to ImageBuffer
ImageBufferCone = ["%g" % x for x in frange(1,0,0)] # ImageBuffer to Cone
ConeSigmoidBipolar = ["%g" % x for x in frange(0.5,0,0)] # ConeSigmoid to Bipolar
ConeSigmoidHorizontal = ["%g" % x for x in frange(0.5,0,0)] # ConeSigmoid to Horizontal
HorizontalGapHorizontal = ["%g" % x for x in frange(3,0,0)] # HorizontalGap to Horizontal
HorizontalSigmoidConeON = ["%g" % x for x in frange(1,0,0)] # HorizontalSigmoidON to Cone
HorizontalSigmoidBipolarOFF = ["%g" % x for x in frange(2.5,0,0)] # HorizontalSigmoidOFF to BipolarOFF
BipolarSigmoidSFAmacrine = ["%g" % x for x in frange(1,0,0)] # BipolarSigmoid to SFAmacrine
BipolarSigmoidWFAmacrine = ["%g" % x for x in frange(1,0,0)] # BipolarSigmoid to WFAmacrine
BipolarSigmoidPAAmacrine = ["%g" % x for x in frange(0.1,0,0)] # BipolarSigmoid to WFAmacrine
BipolarSigmoidGanglion = ["%g" % x for x in frange(3.0,0,0)] # BipolarSigmoid to Ganglion
SFAmacrineGapSFAmacrine = ["%g" % x for x in frange(1,0,0)] # SFAmacrineGAP to SFAmacrine
SFAmacrineSigmoidPAAmacrine = ["%g" % x for x in frange(2,0,0)] #
WFAmacrineSigmoidBipolarON = ["%g" % x for x in frange(0.10,0,0)] # WFAmacrineSigmoidON to Bipolar
WFAmacrineSigmoidBipolarOFF = ["%g" % x for x in frange(0.10,0,0)] # WFAmacrineSigmoidOFF to Bipolar
WFAmacrineONSFAmacrine = ["%g" % x for x in frange(1,0,0)] # WFAmacrineON to SFAmacrine
WFAmacrineOFFSFAmacrine = ["%g" % x for x in frange(1,0,0)] # WFAmacrineOFF to SFAmacrine
WFAmacrineSigmoidGanglionON = ["%g" % x for x in frange(0.5,0,0)] # WFAmacrineSigmoidON to GanglionON
WFAmacrineSigmoidGanglionOFF = ["%g" % x for x in frange(0.5,0,0)] # WFAmacrineSigmoidOFF to GanglionOFF
PAAmacrineWFAmacrine = ["%g" % x for x in frange(2.0,0,0)] # PAAmacrine to WFAmacrine
PAAmacrineGapPAAmacrine = ["%g" % x for x in frange(1.5,0,0)] # PAAmacrineGap to PAAmacrine
PAAmacrinePAAmacrine = ["%g" % x for x in frange(3.0,0,0)] #
PAAmacrineGapGanglion = ["%g" % x for x in frange(0.5,0,0)] # PAAmacrineGap to Ganglion
PAAmacrineGanglion = ["%g" % x for x in frange(24,0,0)] #
GanglionGapPAAmacrine = ["%g" % x for x in frange(3.0,0,0)] # GanglionGap to PAAmacrine
#List possible connections
conn_list = ["ImageImageBuffer",
"ConstantVrestImageBuffer",
"ImageBufferCone",
"ConeSigmoidBipolar",
"ConeSigmoidHorizontal",
"HorizontalGapHorizontal",
"HorizontalSigmoidConeON",
"HorizontalSigmoidBipolarOFF",
"BipolarSigmoidSFAmacrine",
"BipolarSigmoidWFAmacrine",
"BipolarSigmoidPAAmacrine",
"BipolarSigmoidGanglion",
"SFAmacrineGapSFAmacrine",
"SFAmacrineSigmoidPAAmacrine",
"WFAmacrineSigmoidBipolarON",
"WFAmacrineSigmoidBipolarOFF",
"WFAmacrineONSFAmacrine",
"WFAmacrineOFFSFAmacrine",
"WFAmacrineSigmoidGanglionON",
"WFAmacrineSigmoidGanglionOFF",
"PAAmacrineWFAmacrine",
"PAAmacrineGapPAAmacrine",
"PAAmacrinePAAmacrine",
"PAAmacrineGapGanglion",
"PAAmacrineGanglion",
"GanglionGapPAAmacrine"]
conn_lol = [ImageImageBuffer,
ConstantVrestImageBuffer,
ImageBufferCone,
ConeSigmoidBipolar,
ConeSigmoidHorizontal,
HorizontalGapHorizontal,
HorizontalSigmoidConeON,
HorizontalSigmoidBipolarOFF,
BipolarSigmoidSFAmacrine,
BipolarSigmoidWFAmacrine,
BipolarSigmoidPAAmacrine,
BipolarSigmoidGanglion,
SFAmacrineGapSFAmacrine,
SFAmacrineSigmoidPAAmacrine,
WFAmacrineSigmoidBipolarON,
WFAmacrineSigmoidBipolarOFF,
WFAmacrineONSFAmacrine,
WFAmacrineOFFSFAmacrine,
WFAmacrineSigmoidGanglionON,
WFAmacrineSigmoidGanglionOFF,
PAAmacrineWFAmacrine,
PAAmacrineGapPAAmacrine,
PAAmacrinePAAmacrine,
PAAmacrineGapGanglion,
PAAmacrineGanglion,
GanglionGapPAAmacrine]
print "tune_params: Verifying parameters."
## Assert that all parameter lists are the same length or of length 1
max_list_len = max([len(x) for x in conn_lol]) #max lenght of sub list in lol
if not all(len(i)==max_list_len or len(i)==1 for i in conn_lol):
exit("\ntune_params: ERROR: One of the lists is not the right size!\n")
## Check to see if any of the strengths are set to 0
## nonZeroStrength is true if there is a nonzero strength (false if strength is 0)
nonZeroStrength = [strength not in '0' for connlist in conn_lol for strength in [max(connlist)]] # max val in each list is not 0
if len(conn_lol) is not len(nonZeroStrength):
exit("\ntune_params: ERROR: nonZeroStrength array is not the appropriate length")
## Open file
if os.path.isfile(param_in_file):
try:
print "tune_params: Opening template param file "+param_in_file+"."
in_fid = open(param_in_file)
param_lines = in_fid.readlines()
in_fid.close()
except IOError as e:
print "tune_params: Failed to open file "+param_in_file+" with error:\n"
exit(e)
else:
exit("\ntune_params: ERROR: Couldn't find file "+param_in_file+"!\n")
## Modify pvp file and run petavision for each parameter
for num_steps in num_steps_list:
for param_idx in range(max_list_len):
out_lines = param_lines[:] # Dereference to make copy of list
idx_out_filename = out_filename+str(param_idx)+'.pv'
full_out_file = param_out_file+'_p'+str(param_idx)+'_ns'+num_steps+'.pv'
full_results_path = results_path+'/p'+str(param_idx)+'/ns'+num_steps
print "tune_params: Modifying template file."
for line_num in range(len(out_lines)):
line = out_lines[line_num]
## Activate layers that have been set in the global vars section
enable = False
if 'Movie "Movie"' in line and Movie==1:
enable = True
elif 'ANNLayer "ImageBuffer"' in line and ImageBuffer==1:
enable = True
elif 'ANNLayer "ConstantVrest"' in line and ConstantVrest==1:
enable = True
elif 'LIFGap "Cone"' in line and Cone==1:
enable = True
elif 'SigmoidLayer "ConeSigmoidON"' in line and ConeSigmoidON==1:
enable = True
elif 'SigmoidLayer "ConeSigmoidOFF"' in line and ConeSigmoidOFF==1:
enable = True
elif 'LIF "BipolarON"' in line and BipolarON==1:
enable = True
elif 'SigmoidLayer "BipolarSigmoidON"' in line and BipolarSigmoidON==1:
enable = True
elif 'LIFGap "Horizontal"' in line and Horizontal==1:
enable = True
elif 'GapLayer "HorizontalGap"' in line and HorizontalGap==1:
enable = True
elif 'SigmoidLayer "HorizontalSigmoidON"' in line and HorizontalSigmoidON==1:
enable = True
elif 'SigmoidLayer "HorizontalSigmoidOFF"' in line and HorizontalSigmoidOFF==1:
enable = True
elif 'LIF "WFAmacrineON"' in line and WFAmacrineON==1:
enable = True
elif 'GapLayer "SFAmacrineGap"' in line and SFAmacrineGap==1:
enable = True
elif 'SigmoidLayer "WFAmacrineSigmoidON"' in line and WFAmacrineSigmoidON==1:
enable = True
elif 'LIFGap "GanglionON"' in line and GanglionON==1:
enable = True
elif 'GapLayer "GanglionGapON"' in line and GanglionGapON==1:
enable = True
elif 'LIFGap "PAAmacrineON"' in line and PAAmacrineON==1:
enable = True
elif 'GapLayer "PAAmacrineGapON"' in line and PAAmacrineGapON==1:
enable = True
elif 'LIF "BipolarOFF"' in line and BipolarOFF==1:
enable = True
elif 'SigmoidLayer "BipolarSigmoidOFF"' in line and BipolarSigmoidOFF==1:
enable = True
elif 'LIF "WFAmacrineOFF"' in line and WFAmacrineOFF==1:
enable = True
elif 'SigmoidLayer "WFAmacrineSigmoidOFF"' in line and WFAmacrineSigmoidOFF==1:
enable = True
elif 'LIFGap "GanglionOFF"' in line and GanglionOFF==1:
enable = True
elif 'GapLayer "GanglionGapOFF"' in line and GanglionGapOFF==1:
enable = True
elif 'LIFGap "PAAmacrineOFF"' in line and PAAmacrineOFF==1:
enable = True
elif 'GapLayer "PAAmacrineGapOFF"' in line and PAAmacrineGapOFF==1:
enable = True
elif 'LIFGap "SFAmacrine"' in line and SFAmacrine==1:
enable = True
elif 'SigmoidLayer "SFAmacrineSigmoid"' in line and SFAmacrineSigmoid==1:
enable = True
elif 'KernelConn "ImageToImageBuffer"' in line and ImageBuffer==1: #########Connections
zero_index = [idx for idx, enum in enumerate([param in 'ImageImageBuffer' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "ConstantVrestToImageBuffer"' in line and ConstantVrest==1 and ImageBuffer==1:
zero_index = [idx for idx, enum in enumerate([param in 'ConstantVrestImageBuffer' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'GapConn "ImageBufferToCone"' in line and Cone==1:
zero_index = [idx for idx, enum in enumerate([param in 'ImageBufferCone' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "ConeSigmoidONToBipolarON"' in line and ConeSigmoidON==1 and BipolarON==1:
zero_index = [idx for idx, enum in enumerate([param in 'ConeSigmoidBipolar' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "ConeSigmoidONToHorizontal"' in line and ConeSigmoidON==1 and Horizontal==1:
zero_index = [idx for idx, enum in enumerate([param in 'ConeSigmoidHorizontal' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'GapConn "HorizontalGapToHorizontal"' in line and HorizontalGap==1 and Horizontal==1:
zero_index = [idx for idx, enum in enumerate([param in 'HorizontalGapHorizontal' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "HorizontalSigmoidToConeON"' in line and HorizontalSigmoidON==1 and Cone==1:
zero_index = [idx for idx, enum in enumerate([param in 'HorizontalSigmoidConeON' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "HorizontalSigmoidToBipolarOFF"' in line and HorizontalSigmoidOFF==1 and Cone==1:
zero_index = [idx for idx, enum in enumerate([param in 'HorizontalSigmoidBipolarOFF' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "BipolarSigmoidONToGanglionON"' in line and BipolarSigmoidON==1 and GanglionON==1:
zero_index = [idx for idx, enum in enumerate([param in 'BipolarSigmoidGanglion' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'GapConn "SFAmacrineGapToSFAmacrine"' in line and SFAmacrineGap==1 and SFAmacrine==1:
zero_index = [idx for idx, enum in enumerate([param in 'SFAmacrineGapSFAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "WFAmacrineSigmoidONToBipolarON"' in line and WFAmacrineSigmoidON==1 and BipolarON==1:
zero_index = [idx for idx, enum in enumerate([param in 'WFAmacrineSigmoidBipolarON' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "BipolarSigmoidONToWFAmacrineON"' in line and BipolarSigmoidON==1 and WFAmacrineON==1:
zero_index = [idx for idx, enum in enumerate([param in 'BipolarSigmoidWFAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "BipolarSigmoidONToPAAmacrineON"' in line and BipolarSigmoidON==1 and PAAmacrineON==1:
zero_index = [idx for idx, enum in enumerate([param in 'BipolarSigmoidPAAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'GapConn "GanglionGapONToPAAmacrineON"' in line and GanglionGapON==1 and PAAmacrineON==1:
zero_index = [idx for idx, enum in enumerate([param in 'GanglionGapPAAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'GapConn "PAAmacrineGapONToGanglionON"' in line and PAAmacrineGapON==1 and GanglionON==1:
zero_index = [idx for idx, enum in enumerate([param in 'PAAmacrineGapGanglion' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'GapConn "PAAmacrineGapONToPAAmacrineON"' in line and PAAmacrineGapON==1 and PAAmacrineON==1:
zero_index = [idx for idx, enum in enumerate([param in 'PAAmacrineGapPAAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "PAAmacrineONToGanglionON"' in line and PAAmacrineON==1 and GanglionON==1:
zero_index = [idx for idx, enum in enumerate([param in 'PAAmacrineGanglion' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "PAAmacrineONToPAAmacrineON"' in line and PAAmacrineON==1:
zero_index = [idx for idx, enum in enumerate([param in 'PAAmacrinePAAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "BipolarSigmoidONToSFAmacrine"' in line and BipolarSigmoidON==1 and SFAmacrine==1:
zero_index = [idx for idx, enum in enumerate([param in 'BipolarSigmoidSFAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "WFAmacrineSigmoidONToGanglionON"' in line and WFAmacrineSigmoidON==1 and GanglionON==1:
zero_index = [idx for idx, enum in enumerate([param in 'WFAmacrineSigmoidGanglionON' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "SFAmacrineSigmoidToPAAmacrineON"' in line and SFAmacrineSigmoid==1 and PAAmacrineON==1:
zero_index = [idx for idx, enum in enumerate([param in 'SFAmacrineSigmoidPAAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "PAAmacrineONToWFAmacrineON"' in line and PAAmacrineON==1 and WFAmacrineON==1:
zero_index = [idx for idx, enum in enumerate([param in 'PAAmacrineWFAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "WFAmacrineSigmoidONToSFAmacrine"' in line and WFAmacrineSigmoidON==1 and SFAmacrine==1:
zero_index = [idx for idx, enum in enumerate([param in 'WFAmacrineONSFAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "WFAmacrineSigmoidOFFToSFAmacrine"' in line and WFAmacrineSigmoidOFF==1 and SFAmacrine==1:
zero_index = [idx for idx, enum in enumerate([param in 'WFAmacrineOFFSFAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "ConeSigmoidOFFToBipolarOFF"' in line and ConeSigmoidOFF==1 and BipolarOFF==1:
zero_index = [idx for idx, enum in enumerate([param in 'ConeSigmoidBipolar' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "BipolarSigmoidOFFToWFAmacrineOFF"' in line and BipolarSigmoidOFF==1 and WFAmacrineOFF==1:
zero_index = [idx for idx, enum in enumerate([param in 'BipolarSigmoidWFAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "BipolarSigmoidOFFToPAAmacrineOFF"' in line and BipolarSigmoidOFF==1 and PAAmacrineOFF==1:
zero_index = [idx for idx, enum in enumerate([param in 'BipolarSigmoidPAAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "WFAmacrineSigmoidOFFToBipolarOFF"' in line and WFAmacrineSigmoidOFF==1 and BipolarOFF==1:
zero_index = [idx for idx, enum in enumerate([param in 'WFAmacrineSigmoidBipolarOFF' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "BipolarSigmoidOFFToGanglionOFF"' in line and BipolarSigmoidOFF==1 and GanglionOFF==1:
zero_index = [idx for idx, enum in enumerate([param in 'BipolarSigmoidGanglion' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "BipolarSigmoidOFFToSFAmacrine"' in line and BipolarSigmoidOFF==1 and SFAmacrine==1:
zero_index = [idx for idx, enum in enumerate([param in 'BipolarSigmoidSFAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'GapConn "GanglionGapOFFToPAAmacrineOFF"' in line and GanglionGapOFF==1 and PAAmacrineOFF==1:
zero_index = [idx for idx, enum in enumerate([param in 'GanglionGapPAAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'GapConn "PAAmacrineGapOFFToGanglionOFF"' in line and PAAmacrineGapOFF==1 and GanglionOFF==1:
zero_index = [idx for idx, enum in enumerate([param in 'PAAmacrineGapGanglion' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'GapConn "PAAmacrineGapOFFToPAAmacrineOFF"' in line and PAAmacrineGapOFF==1 and PAAmacrineOFF==1:
zero_index = [idx for idx, enum in enumerate([param in 'PAAmacrineGapPAAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "PAAmacrineOFFToGanglionOFF"' in line and PAAmacrineOFF==1 and GanglionOFF==1:
zero_index = [idx for idx, enum in enumerate([param in 'PAAmacrineGanglion' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "PAAmacrineOFFToPAAmacrineOFF"' in line and PAAmacrineOFF==1:
zero_index = [idx for idx, enum in enumerate([param in 'PAAmacrinePAAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "WFAmacrineSigmoidOFFToGanglionOFF"' in line and WFAmacrineSigmoidOFF==1 and GanglionOFF==1:
zero_index = [idx for idx, enum in enumerate([param in 'WFAmacrineSigmoidGanglionOFF' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "SFAmacrineSigmoidToPAAmacrineOFF"' in line and SFAmacrineSigmoid==1 and PAAmacrineOFF==1:
zero_index = [idx for idx, enum in enumerate([param in 'SFAmacrineSigmoidPAAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "PAAmacrineOFFToWFAmacrineOFF"' in line and PAAmacrineOFF==1 and WFAmacrineOFF==1:
zero_index = [idx for idx, enum in enumerate([param in 'PAAmacrineWFAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
elif 'KernelConn "WFAmacrineSigmoidOFFToSFAmacrine"' in line and WFAmacrineSigmoidOFF==1 and SFAmacrine==1:
zero_index = [idx for idx, enum in enumerate([param in 'WFAmacrineSigmoidSFAmacrine' for param in conn_list]) if enum==True]
if len(zero_index)>0:
if nonZeroStrength[zero_index[0]]:
enable = True
if enable == True:
out_lines = enable_block(line_num,out_lines)
enable = False
## Make substitutions for desired param values
indices = [idx for idx, enum in enumerate([param in line for param in conn_list]) if enum == True] #list of indices (locations in line) where word of interest (param) is located
if len(indices) > 0: #if the current line has any of the parameters
for lol_idx in indices:
if len(conn_lol[lol_idx])>1:
new_line = re.sub(conn_list[lol_idx],conn_lol[lol_idx][param_idx],out_lines[line_num],count=1)
else:
new_line = re.sub(conn_list[lol_idx],conn_lol[lol_idx][0],out_lines[line_num],count=1)
out_lines[line_num] = new_line
line = new_line
if 'NUMSTEPS' in line:
new_line = re.sub('NUMSTEPS',num_steps,line,count=0)
out_lines[line_num] = new_line
line = new_line
if 'OUTPATH' in line:
new_line = re.sub('OUTPATH',full_results_path,line,count=0)
out_lines[line_num] = new_line
line = new_line
if 'PARAMSFILE' in line:
new_line = re.sub('PARAMSFILE',idx_out_filename,line,count=0)
out_lines[line_num] = new_line
line = new_line
if 'INIMGPATH' in line:
new_line = re.sub('INIMGPATH',input_file,line,count=0)
out_lines[line_num] = new_line
line = new_line
if 'INMOVPATH' in line:
new_line = re.sub('INMOVPATH',input_file,line,count=0)
out_lines[line_num] = new_line
line = new_line
if 'STOCHASTICRELFLAG' in line:
new_line = re.sub('STOCHASTICRELFLAG',stochastic_flag,line,count=0)
out_lines[line_num] = new_line
line = new_line
if 'PADelay' in line:
new_line = re.sub('PADelay',PA_delay,line,count=0)
out_lines[line_num] = new_line
line = new_line
if 'PREACTNOTRATE' in line:
if stochastic_flag is '0':
new_line = re.sub('PREACTNOTRATE','1',line,count=0)
elif stochastic_flag is '1':
new_line = re.sub('PREACTNOTRATE','0',line,count=0)
else:
print("\ntune_params: STOCHASTICRELFLAG must be 0 or 1")
exit()
out_lines[line_num] = new_line
line = new_line
#####ENDFOR - line_num
##Write to output file
print "tune_params: Writing new params file:\n "+full_out_file
try:
out_fid = open(full_out_file,'w')
except IOError as e:
print "\ntune_params: Failed to open file "+full_out_file+" with error:\n"
exit(e)
for out_line in out_lines:
out_fid.write("%s" % out_line)
out_fid.close()
## Run petavision for this output file
if run_PetaVision:
print "tune_params: Running PetaVision.\n\n"
os.system('mkdir -p '+full_results_path)
mpi_cmd = '/opt/local/bin/openmpirun -np '+mpi_np
if pipe_output_to_file:
run_cmd = mpi_cmd+' '+run_file+' -rows '+mpi_rows+' -columns '+mpi_columns+' -p '+full_out_file+' > '+full_results_path+'/stdout.txt'
else:
run_cmd = mpi_cmd+' '+run_file+' -rows '+mpi_rows+' -columns '+mpi_columns+' -p '+full_out_file
os.system('time '+run_cmd)
os.system('cp '+full_out_file+' '+full_results_path)
print "\n\ntune_params: Finished running PetaVision."
#####ENDFOR - param_idx
#####ENDFOR - num_steps
#####ENDFUNCTION
|
Tom Murphy has joined Eastbourne Borough on a month-long loan.
One of Jay Saunders’ summer signings, the 24-year-old has made 20 appearances for the Stones so far this season but has found starting opportunities limited of late.
Murphy is set to feature for The Sports in their Buildbase FA Trophy match away at Leiston tomorrow. |
#!/usr/bin/env python
# Copyright (c) 2014 UCLA
# Authors: Haodong Chen and Thomas M. Vondriska
#
# This software is distributable under the terms of the GNU General
# Public License (GPL) v2, the text of which can be found at
# http://www.gnu.org/copyleft/gpl.html. Installing, importing or
# otherwise using this module constitutes acceptance of the terms of
# this License.
#
# Disclaimer
#
# This software is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Comments and/or additions are welcome (send e-mail to:
# hdchen@ucla.edu).
import os, sys, subprocess
from optparse import OptionParser
from utils import *
def Step1():
# Demultiplex
parser = OptionParser()
parser.add_option("-i", "--input", type="string", dest="folder", \
help="Input file folder", default=os.getcwd() )
parser.add_option("--conf", type="string", dest="CONFPATH")
(options, args) = parser.parse_args()
Params = Conf_read(options.CONFPATH)
if Params['MULTIPLEX'] != "False":
Params['BARCODES'] = ','.join([str(int(x)) for x in Params['BARCODES'].split(",")])
file_list_1 = [x for x in os.listdir(options.folder) if \
(x.endswith("_qseq.txt") or x.endswith("_qseq.txt.gz")) \
and x.split("_")[-3] == "1"]
file_list_1.sort()
f1 = file_list_1[int(os.environ["SGE_TASK_ID"]) - 1]
f1name = f1.split("_")
f1name[-3] = "2"
f2 = "_".join(f1name)
p = '-I %s -s %s -b %s -l %s -m %s'%(options.folder, f1, f2, Params['BARCODES'], Params['BCMISMATCH'])
cmd = ["./MyDemultiplex.py"]
cmd.extend(p.split(" "))
#print >> sys.stdout, " ".join(cmd)
process = subprocess.Popen(" ".join(cmd), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
print >> sys.stdout, out
if __name__ == "__main__":
Step1()
|
The Office of College Admissions reviews each application on a holistic basis—a complicated name for a very simple concept. The admissions committee considers a candidate’s entire application—academic and extracurricular records, essays, letters of recommendation, and SAT or ACT scores, if these are submitted—in order to gain a comprehensive understanding of a student’s qualifications and potential. Learn more about our college admissions process.
Is it possible to transfer to UChicago from another college or university?
Yes. The University of Chicago defines a transfer applicant as a student who has completed at least one term as a full-time student in a degree-seeking program at another college or university.
Transfer students must attend the College for at least two academic years (six quarters) and complete the Core curriculum and at least half of their major with us. Tentative evaluations of transfer credit are conducted by the Office of the Dean of Students and mailed out over the summer to enrolling students. Learn more about transfer credit.
Does UChicago consider applicants who have been homeschooled?
Yes. Homeschooled applicants have the same application requirements as any other candidate.
Can students begin studies at UChicago during the spring quarter?
No; all students must begin studies in the fall quarter only.
November 1 is our submission deadline for Early Action and Early Decision I applications. January 2 is our submission deadline for Early Decision II and Regular Decision applications.
Yes. UChicago commits to meeting the full demonstrated financial need of every admitted student through a need-based financial aid award that includes no loan expectation. Each financial aid award is tailored to the student and family’s particular financial profile; we require a few forms in order to offer students an appropriate package. Although domestic applicants may apply for financial aid at any time, we recommend that they apply for aid when they apply for admission in order to receive an aid decision in a timely manner. UChicago does not charge an application fee to students who indicate that they intend to apply for financial aid. Learn more about applying for UChicago’s financial aid.
No, students apply into the one undergraduate College as a whole. However, students are encouraged to indicate their academic interests in their application to help the admissions committee to a more complete picture of themselves.
How can students let the admission staff know about their extracurricular talents?
Students may submit supporting materials for music, theater and dance, or visual arts and film, and/or a short creative writing sample or scientific research abstract. These items should be submitted digitally through the “Portfolio” section of the student’s UChicago Account. They may also submit an additional recommendation from anyone who knows them well (e.g. a coach, music teacher, mentor, or member of their community).
We will accept supplemental materials even after submitting an application Our deadlines for supplemental materials are: November 15 for Early Action and Early Decision I; January 15 for Early Decision II and Regular Decision.
How can students let the admission staff know about their interests in athletics?
Interested student athletes interested in one of our Division III varsity programs should contact our coaches for more information about varsity athletics UChicago.
How do students apply to UChicago?
Students should complete the Coalition Application or Common Application and the University of Chicago Supplement. The Coalition and Common Applications allow students to enter information about their background, academic profile, and extracurricular activities and submit applications to all the schools to which they are applying.
The University of Chicago Supplement requires one extended essay from our list of several prompts and one short essay on their interest in UChicago. It can be submitted through the Coalition Application or Common Application, or through the UChicago Account. View our supplemental essay questions.
The University of Chicago does not charge a college application fee for students applying for need-based financial aid. For students not applying for need-based financial aid, our application fee is $75, and can be submitted through the Coalition Application or Common Application.
Are standardized test scores required for admission?
Many applicants will feel that an SAT or ACT score can reflect their academic preparedness.
Some applicants may feel that an SAT or ACT score does not fully reflect their academic preparedness or potential. If this is the case for a domestic first-year applicant, they may select UChicago's test-optional method of application and not supply SAT or ACT scores with their application. We welcome any student regardless of testing plan to submit additional material (detailed in the Supplements section) that they feel best highlights their skills, talents, and potential contributions to UChicago.
The SAT, ACT, and other standard measures can continue to be an important part of the University of Chicago’s holistic admission process for students electing to send scores and are a required part of the application process at many other highly selective schools. These tests can provide valuable information about a student which we and other colleges will consider alongside the other elements in a student’s application. We encourage students to take standardized tests like the SAT and ACT, and to share their scores with us if they think that they are reflective of their ability and potential. Given that many of our peers do require testing, we anticipate that the vast majority of students will continue to take tests and may still submit their test scores to UChicago.
*We require an SAT or ACT score from transfer applicants and some form of standardized testing from students who attend a high school outside the United States. International students may elect to submit one of several other forms of testing in lieu of an SAT or ACT score. Visit the International Applicants page for details. Students who are undocumented/DACA and attend high school in the United States may choose to use our test-optional policy.
Students who attend high school in the U.S. and choose to submit SAT or ACT scores may share either official or self-reported SAT or ACT scores. These students will not be required to submit official score reports unless they are admitted and choose to enroll. Students are able to self-report test scores through the Coalition or Common Application, or may share a transcript that includes test scores.
Students who will graduate from a high school outside the U.S. and transfer applicants will be required to submit an official score report at the time of application. To be considered official, scores must be sent to the University of Chicago directly from the testing agency. UChicago’s SAT code is 1832; the ACT code is 1152.
While we would, if possible, like to receive applicants' scores before the appropriate deadline, we will accept October ACT and November SAT scores for Early Action and Early Decision I, December SAT and ACT scores for Early Decision II, and January SAT and February ACT scores for Regular Decision.
What materials should the school counselor or teachers send UChicago?
Secondary school counselors should complete the Secondary School Report and submit it along with an official transcript. We also require two recommendations from teachers who have taught the applicant in an academic subject.
All high school counselors and teachers have the option of submitting letters of recommendation and school forms online via the Coalition Application or Common Application. Teachers and counselors may print out and submit these forms on paper even if students submit their applications online.
Can students postpone matriculation at UChicago?
Yes. Students interested in taking a "gap year" between acceptance and attendance are welcomed to postpone their matriculation at UChicago. If this might be an option, it is a good idea for students to let us know as soon as possible. Alerting our office to a possible gap year will not negatively affect an application. If an admitted student is interested in taking a gap year, they should review the gap year section of this FAQ page and contact their regional admissions counselor.
How should students accept their spot on the waitlist?
To be considered for admission, a student who has been offered a place on the waitlist needs to accept that place through their UChicago Account. Our waitlist is not ranked, meaning that students have a place in a pool of students who have been waitlisted rather than a ranked position in a line.
At the point of decision release, we do not yet know if or when we will be able to extend offers of admission to students off the waitlist. Therefore, it is imperative that waitlisted students accept and secure a place at a school to which they have been accepted by May 1.
Can students appeal their admission decision or ask to have the decision explained?
There is no appeal process, and we cannot reconsider applications or offer individual explanations for our decisions. |
__doc__ = """
This test moves concurrently a directory ('mover' worker) while
files are added to it ('adder' worker) . The expected outcome is that
all added files are kept on the server and are found in the final directory.
"""
from smashbox.utilities import *
if platform.system().lower() == "darwin":
do_not_report_as_failure()
nfiles = int(config.get('concurrentMoveDir_nfiles',100))
filesize = int(config.get('concurrentMoveDir_filesize',10))
delaySeconds = int(config.get('concurrentMoveDir_delaySeconds',3)) # if delaySeconds > 0 then remover waits; else the adder waits;
testsets = [
{'concurrentMoveDir_nfiles':100,
'concurrentMoveDir_filesize':10,
'concurrentMoveDir_delaySeconds':10 }, # removing the directory while lots of tiny files are uploaded
{'concurrentMoveDir_nfiles':10,
'concurrentMoveDir_filesize':OWNCLOUD_CHUNK_SIZE(1.1),
'concurrentMoveDir_delaySeconds':5 }, # removing the directory while a large file is chunk-uploaded
{'concurrentMoveDir_nfiles':2,
'concurrentMoveDir_filesize':OWNCLOUD_CHUNK_SIZE(5),
'concurrentMoveDir_delaySeconds':5 }, # removing the directory while a large file is chunk-uploaded
{'concurrentMoveDir_nfiles':20,
'concurrentMoveDir_filesize':OWNCLOUD_CHUNK_SIZE(0.9),
'concurrentMoveDir_delaySeconds':10 }, # removing the directory more but smaller files are uploaded
{'concurrentMoveDir_nfiles':5,
'concurrentMoveDir_filesize':OWNCLOUD_CHUNK_SIZE(0.1),
'concurrentMoveDir_delaySeconds':-5 }, # removing the directory before files are uploaded
{'concurrentMoveDir_nfiles':5,
'concurrentMoveDir_filesize':OWNCLOUD_CHUNK_SIZE(2.1),
'concurrentMoveDir_delaySeconds':-10 } # removing the directory before laarge files are chunk-uploaded
]
import time
import tempfile
from smashbox.utilities.hash_files import *
@add_worker
def creator(step):
reset_owncloud_account()
reset_rundir()
step(1,'upload empty subdirectory')
d = make_workdir()
d2 = os.path.join(d,'subdir')
mkdir(d2)
run_ocsync(d)
step(6,'final check')
run_ocsync(d)
final_check(d)
@add_worker
def adder(step):
step(2,'sync the empty directory created by the creator')
d = make_workdir()
run_ocsync(d)
step(3,'locally create content in the subdirectory')
d2 = os.path.join(d,'subdir')
for i in range(nfiles):
create_hashfile(d2, size=filesize) #createfile_zero(os.path.join(d2,"test.%02d"%i),count=filesize, bs=1000)
step(4,'sync the added files in parallel')
if delaySeconds<0:
sleep(-delaySeconds)
run_ocsync(d)
# when directory is renamed while file is uploaded the PUT request finishes with Conflict error code
step(5,'mover has finished synchronizing')
# extra sync run to make sure that changes from mover have been correctly propagated
# first run will not be successful because files with Conflict response are blacklisted
# second run removes the blacklist and updates the files from scratch again
run_ocsync(d,n=2)
step(6,'final check')
run_ocsync(d)
final_check(d)
@add_worker
def mover(step):
step(2,'sync the empty directory created by the creator')
d = make_workdir()
run_ocsync(d)
step(3,'locally rename subdir to subdir2')
s1 = os.path.join(d,'subdir')
s2 = os.path.join(d,'subdir2')
os.rename(s1,s2)
step(4,'sync the subdir2 in parallel')
if delaySeconds>0:
sleep(delaySeconds)
run_ocsync(d)
step(6,'final check')
run_ocsync(d)
final_check(d)
@add_worker
def checker(step):
step(6,'sync the final state of the repository into a fresh local folder')
d = make_workdir()
run_ocsync(d)
final_check(d)
def final_check(d):
list_files(d,recursive=True)
d2 = os.path.join(d,'subdir2')
logger.info('final output: %s',d2)
all_files,analysed_files,bad_files = analyse_hashfiles(d2)
error_check(bad_files == 0,'%s corrupted files in %s'%(bad_files,d2))
error_check(analysed_files == nfiles,"not all files are present (%d/%d)"%(nfiles,analysed_files)) # FIXME: well, there may be other files - we don't check that yet
#runcmd('find %s'%d)
#log('content of /subdir as reported by webdav')
#list_webdav_propfind('subdir')
|
MASHPEE, Mass. — In his homily at a farewell Mass at Christ the King Parish, the pastor, Msgr. Daniel F. Hoye said, quoting a Boston newspaper columnist, “Retired priests are like retired Marines — there are none.” He continued, saying, “When diocesan priests retire, we don’t stop celebrating the Sacraments,” as long as they’re physically able.
Msgr. Hoye retired as a diocesan priest on June 30, joining his brother priest, Father Michael M. Camara, a parochial vicar at Santo Christo Parish in Fall River, whose retirement was effective the same day.
Both men were born in 1946, Msgr. Hoye on January 18 in Taunton, Father Camara on August 12 in Fall River.
While both are Catholic priests and share a birth year and retirement day, their paths to and since ordination are very different.
Msgr. Hoye attended a Catholic high school and, following his ordination, held various national positions before truly settling into parish life more than 15 years later.
Father Camara was the product of a public high school, spent time in the United States Air Force, was a religious member of the Franciscan community for 27 years, and was ordained to the priesthood at age 42, 27 years after Msgr. Hoye.
And, while their roads to the priesthood and ensuing responsibilities varied, the greatest joy of their priesthood is quite the same.
Msgr. Hoye is a graduate of Msgr. James Coyle High School in Taunton. He went on to St. Thomas Seminary in Bloomfield, Conn., achieving an associate of arts degree there.
He then went to St. John’s Seminary in Brighton, earning a bachelor of arts degrees in Divinity and arts, and a master’s in theology.
Msgr. Hoye was ordained a priest for the Diocese of Fall River on May 13, 1972, at St. Mary’s Cathedral in Fall River by Bishop Daniel A. Cronin.
His first assignment was as parochial vicar at St. John the Evangelist Parish in Attleboro, and then assumed the same position at St. Mary’s Parish in Norton until 1973.
In 1973 he attended The Catholic University of America in Washington, D.C., until 1975, receiving a licentiate in canon law.
From 1975 through 1997, Msgr. Hoye served in various assignments, including: vice officialis at the diocesan Tribunal Office; associate general secretary for the National Conference of Catholic Bishops, United States Catholic Conference in Washington, D.C. (now the United States Conference of Catholic Bishops); as general secretary of the NCCB/USCC; a tribunal judge and defender of the bond for the Diocese of Fall River; and as episcopal vicar for the Taunton and Attleboro deaneries.
On May 3, 1982 he received the title of Prelate of Honor from Pope John Paul II, naming him a monsignor.
In June of 1989, Msgr. Hoye was named pastor of St. John the Evangelist Parish in Attleboro until June of 2006, when he became pastor of Christ the King Parish in Mashpee, from where he retired on June 30.
In the diocese, Msgr. Hoye also served on the Priests Council; was chairman of the Priests Council; on the Personnel Board; the College of Consultors; chaplain of the Attleboro area Serra Club; and as moderator for the Diocesan Council of Catholic Women District IV.
He has served on the board of directors for Catholic Relief Services; Catholic Telecommunications Network of America; and St. Francis Preparatory School in Spring Grove, Pa.
Msgr. Hoye served on the Board of Governors for the Canon Law Society of America, and was vice president and president of the society.
He is also a member of the Canon Law Society of Great Britain and Ireland.
Father Camara attended B.M.C. Durfee High School in Fall River.
Father Camara entered the Franciscan Friars, Province of the Immaculate Conception, New York, in 1973.
He was a religious Brother with the community for 15 years, involved with high school ministry at Serra Catholic High School in McKeesport, Pa., where he taught, was Dean of Students, an administrator, and assistant headmaster.
He was also chaplain at St. Francis Hospital in upstate New York, and a teacher at Christopher Columbus High School in Boston.
He was received into the order in August of 1975 and made his solemn profession on Dec. 28, 1982.
He later went on to attend St. Vincent College in Latrobe, Pa., earning a bachelor of arts in Religious Education in 1984.
Father Camara earned his master’s of Divinity from St. Vincent Seminary in Latrobe and was ordained into the priesthood on May 27, 1989 at St. Michael’s Church in Fall River, by Bishop Virgilio Lopez, OFM, of Trujillo, Honduras.
Father Camara was incardinated into the Diocese of Fall River on Feb. 17, 1998 by Bishop Sean P. O’Malley, OFM, Cap.
His parish assignments in the Diocese of Fall River included: St. Kilian Parish, St. John the Baptist Parish, Our Lady of Mount Carmel Parish and Our Lady of the Immaculate Conception Parish, all in New Bedford; Holy Family Parish in East Taunton; St. Michael’s Parish, Our Lady of Health Parish and Santo Christo Parish, all in Fall River.
He has also served as chaplain at Saint Anne’s Hospital in Fall River, and was on the faculty at Bishop Connolly High School in Fall River.
Father Camara told The Anchor, “Really, I can say nothing has been a big surprise for me over the years.” Recalling his time in the Air Force, in the Franciscan community and eventually as a diocesan priest, “I think my past experiences have made me realize that nothing will come as a complete surprise.
Both enter retirement with warm memories of the faithful whom they served through the years. “My parishioners have helped me appreciate the challenges of family life, the difficult journey of the adolescent, and the aging process,” said Msgr. Hoye.
“The parishioners over the years have indeed been a big help to me by their presence. By their comments, whether good or bad, all this has helped me grow over the years,” said Father Camara.
Msgr. Hoye plans on taking the summer off, traveling, and “wintering” in Florida. Father Camara will take some “quality time to relax and unwind,” while residing at the Cardinal Medeiros Residence in Fall River.
“In September I will begin to help out at St. Mary’s Parish in South Dartmouth,” said Msgr. Hoye. “I will be available to help wherever needed,” added Father Camara.
Just like a Marine — or a priest. |
# generate job status transition graph
# usage:
# 1) run ganga and this file as a script
# or 2) cd ganga/python, run python interactively and execfile(this_file)
ARC_LABELS = True
STYLED_EDGES = True
DEBUG = False
from Ganga.GPIDev.Lib.Job import Job
import os
g = Job.status_graph
initial_states = Job.initial_states
transient_states = Job.transient_states
import os.path
dot_file = os.path.abspath('ganga_job_stat.dot')
out_type = 'gif'
out_file = dot_file.replace('.dot','.'+out_type)
def debug(s):
if DEBUG:
print 'DEBUG:',s
f = file(dot_file,'w')
print >> f, 'digraph JobStatus {'
for node in g:
debug('src state: %s'%node)
for dest in g[node]:
debug('dest state: %s'%dest)
LAB = []
label = g[node][dest].transition_comment
if ARC_LABELS:
LAB.append('label="%s"'%label)
LAB.append('fontsize=8')
if STYLED_EDGES:
if label.find('force') != -1:
LAB.append('style=dotted bold')
if LAB:
LAB = '['+','.join(LAB)+']'
print >>f, '%s -> %s %s;' % (node,dest,LAB)
print >>f,"__start [shape=point]"
for node in initial_states:
print >>f, '__start -> %s;'%node
for node in transient_states:
print >>f, '%s [style=filled]'%node
print >>f, '}'
f.close()
print 'created', dot_file
#graphviz_top = '/afs/cern.ch/sw/lcg/external/graphviz/1.9/rh73_gcc32/'
#os.system('export LD_LIBRARY_PATH=%s.lib/graphviz:$LD_LIBRARY_PATH; %s/bin/dot -T%s %s -o%s'% (graphviz_top,graphviz_top,out_type, dot_file, out_file))
os.system('dot -T%s %s -o%s'% (out_type, dot_file, out_file))
print 'created', out_file
|
The meaning is when your mind is steady and your pose is comfortable. It’s time for asana to come perfectly.
Let the mind pure, do not think anything, focus on the breath and chakra inside the body. When you already steady your mind, you can come to asana easier and more comfortable.
Let’s imagine that somebody who cannot forcus on as he/she is practicing yoga, the pose will not good and cannot be balanced.
For me, I sometimes also don’t focus on my pose and don’t let my mind steady when I practice headstand. When I did this asana, I always looked at the mirror to make sure that my neck was straight or not, and I that time couldn’t balance my body and did fall down immediately.
Final statement is that the steadiness of mind must be required to have the best asana when practicing yoga. |
#------------------------------------------------------------------------------
# Copyright (C) 2007 Richard W. Lincoln
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANDABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
""" Defines a text component.
References:
Jose.R.Fonseca, 'XDot', http://code.google.com/p/jrfonseca/wiki/XDot
"""
#------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
from math import sqrt
from enthought.traits.api import \
Instance, Float, Int, String, Trait, on_trait_change
from enthought.traits.ui.api import View, Item, Group
from enthought.enable.api import Component
#from enthought.kiva import Font as KivaFont
#from enthought.kiva import MODERN
from enthought.kiva.fonttools.font import str_to_font
#from enthought.kiva import Font, MODERN
from pen import Pen
#------------------------------------------------------------------------------
# "Text" class:
#------------------------------------------------------------------------------
class Text(Component):
""" Component with text traits """
#--------------------------------------------------------------------------
# "Text" interface:
#--------------------------------------------------------------------------
# The background color of this component.
bgcolor = "transparent"#"fuchsia"
# Pen for drawing text
pen = Instance(Pen, desc="pen instance with which to draw the text")
# X-axis coordinate
text_x = Float(desc="x-axis coordinate")
# Y-axis coordinate
text_y = Float(desc="y-axis coordinate")
# Text justification
justification = Int(-1, desc="(LEFT, CENTER, RIGHT = -1, 0, 1)")
# justification = Trait("Left", {"Left": -1, "Centre": 0, "Right": 1})
# Width of the text
text_w = Float(desc="width of the text as computed by the library")
# Text to be drawn
text = String(desc="text")
#--------------------------------------------------------------------------
# Views:
#--------------------------------------------------------------------------
traits_view = View(
Group(
Item("pen", style="custom", show_label=False),
label="Pen", show_border=True
),
Item("text_x"), Item("text_y"), Item("text_w"),
Item("justification"), Item("text")
)
#--------------------------------------------------------------------------
# Draw component on the graphics context:
#--------------------------------------------------------------------------
def _draw_mainlayer(self, gc, view_bounds=None, mode="default"):
""" Draws the component """
gc.save_state()
try:
# Specify the font
font = str_to_font(str(self.pen.font))
gc.set_font(font)
gc.set_fill_color(self.pen.color_)
x = self.text_x - ( self.text_w / 2 )
y = self.text_y - ( font.size / 2 )
# Show text at the same scale as the graphics context
ctm = gc.get_ctm()
if hasattr(ctm, "__len__") and len(ctm) == 6:
scale = sqrt( (ctm[0] + ctm[1]) * (ctm[0] + ctm[1]) / 2.0 + \
(ctm[2] + ctm[3]) * (ctm[2] + ctm[3]) / 2.0 )
elif hasattr(gc, "get_ctm_scale"):
scale = gc.get_ctm_scale()
else:
raise RuntimeError("Unable to get scale from GC.")
x *= scale
y *= scale
gc.show_text_at_point(self.text, x, y)
finally:
gc.restore_state()
@on_trait_change("pen.+,text_x,text_y,text_w,justification,text")
def _update(self):
if self.pen is None:
return
x = self.text_x - (self.text_w / 2)
x2 = x + self.text_w
font = str_to_font( str(self.pen.font) )
y = self.text_y - (font.size / 2)
y2 = y + font.size
self.position = [x, y]
# If bounds are set to 0, horizontal/vertical lines will not render.
self.bounds = [max(x2 - x, 1), max(y2 - y, 1)]
self.request_redraw()
def normal_left_down(self, event):
print "Text [%s] selected at (%d, %d)" % (self.text, event.x, event.y)
#------------------------------------------------------------------------------
# Stand-alone call:
#------------------------------------------------------------------------------
if __name__ == "__main__":
from godot.component.component_viewer import ComponentViewer
from enthought.enable.api import Container
text = Text(
pen=Pen(), text="Foo",
text_x=50, text_y=50, text_w=30
)
container = Container(
# fit_window=False, auto_size=True,
bounds=[30, 10], position=[50, 50],
bgcolor="green")
container.add( text )
viewer = ComponentViewer( component=text )
viewer.configure_traits()
# EOF -------------------------------------------------------------------------
|
EPFL scientists have discovered gene variants that make children life-threateningly susceptible to common-cold viruses.
Although most children can handle viral respiratory infections like the common cold, about 2% of children become sick enough to require hospitalization. There are some known risk factors for this, but severe illness still affects 1 in 1000 previously healthy kids. EPFL scientists have now discovered an underlying cause: gene variants that stop or reduce the production of interferon beta, a protein that activates the child’s innate immune response to respiratory viruses. The work is published in PNAS.
Severe respiratory illness due to viral infection is rare but can be life threatening to children, especially in places without access to modern medical care. This is a global problem since virtually everyone will contract common-cold viruses such as human rhinoviruses (HRV) and human respiratory syncytial viruses (HRSV) before the age of one.
The problem is exacerbated by the fact that we don’t actually have a biological explanation for this extreme susceptibility. A number of biological, socio-economic and environmental risk factors are known to increase susceptibility to severe infection, such as premature birth, chronic diseases, and immunosuppression. Still, approximately 1 out of 1000 children without any of these risk factors falls severely sick when contracting the common cold.
The lab of Jacques Fellay at EPFL carried out a genomic analysis on 120 previously healthy children that had been admitted to pediatric intensive care units with respiratory failure due to infection with HRV or HRSV.
The project, led by PhD student Samira Asgari, combined exome sequencing, transcriptomic analysis, and in vitro functional testing to look for genetic variants that make children extremely susceptible to the viruses.
Asgari found that several of the children carried what is known as a “loss-of-function" variant in a gene called IFIH1. The gene produces a protein that acts a intracellular sensor for genetic material from infecting viruses. When it detects them, the protein triggers the production of interferon beta, which plays a key role in activating the immune response against the invading pathogen.
By disrupting the production of interferon beta, the mutation blocks the initial, “innate” immune response to HRV and HRSV. Without an early immune response, the children’s systems become overwhelmed by the virus, and without immediate medical care they are at high risk of dying from the infection. |
#!/usr/bin/python
import sys
import json
import re
from operator import itemgetter
comment_line = re.compile(r"^\s*(?:#.*)?$")
re_line = re.compile(
r'''
^\s*
(?P<name>[^:]+) # Person name
(?:
\s*:\s*
(?P<properties>.+)? # Properties
)?
''',
re.VERBOSE
)
regex = re.compile(
r'''
(?P<key>\w+) \s* = \s* # Key consists of only alphanumerics
(?P<quote>["']?) # Optional quote character.
(?P<value>.*?) # Value is a non greedy match
(?P=quote) # Closing quote equals the first.
($|\s+) # Entry ends with comma or end of string
''',
re.VERBOSE
)
input_data = []
for i, line in enumerate(sys.stdin):
if comment_line.match(line): continue
m = re_line.match(line)
if not m:
sys.stderr.write("Cannot parse line #%d: %s" % (i+1, line))
continue
person = {
"name": m.group("name").strip()
}
if m.group("properties"):
props = {match.group('key'): match.group('value') for match in regex.finditer(m.group("properties"))}
if "from" in props: props["from"] = int(props["from"])
if "to" in props: props["to"] = int(props["to"])
if "rating" in props: props["rating"] = float(props["rating"])
person.update(props)
input_data.append(person)
sorted_result = sorted(input_data, key=itemgetter('name'))
print json.dumps(sorted_result, indent=4, separators=(',', ': '))
|
Energy conservation is becoming more and more of a concern which is why more about more people are hunting for cheaper energy bills through websites similar to Usave (https://usave.co.uk/energy/). One of the most immediate approaches to a solution is to use more energy efficient light bulbs.
Incandescent: is the most commonly used light bulb. It produces light when a thin wire (tungsten filament) is heated by electricity running through it making it so hot that it starts to glow brightly. Many countries, including the United States, are currently passing legislation phasing out the sale of these bulbs for being so inefficient.
Compact fluorescent light bulbs (CFL): consume a quarter of the energy that incandescent bulbs do and last 10 times longer. These bulbs are filled with mercury vapor that emits UV light when electricity is applied. Coating inside the bulbs turn the UV rays into visible light.
Halogen: often found in homes as spotlights or floodlights. They work in a similar way to incandescent light bulbs by running electricity through a tungsten filament. Unlike the incandescent, a halogen gas is inside the bulb. When the tungsten burns off the filament, the gas re-deposits it back onto the filament to be used. They last longer than incandescent bulbs.
Light emitting diodes (LED): long lasting and extremely energy efficient. Use an electrical current passed through semi-conductor material to illuminate the tiny light sources called LEDs. The bulbs are cooled to the touch meaning highly efficient. |
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2006-2008, 2011-2014 Lukáš Lalinský
# Copyright (C) 2009, 2018-2021 Philipp Wolfer
# Copyright (C) 2012 Chad Wilson
# Copyright (C) 2012-2013 Michael Wiencek
# Copyright (C) 2013-2020 Laurent Monin
# Copyright (C) 2015 Ohm Patel
# Copyright (C) 2015 Sophist-UK
# Copyright (C) 2016 Suhas
# Copyright (C) 2016-2017 Wieland Hoffmann
# Copyright (C) 2016-2018 Sambhav Kothari
# Copyright (C) 2017 Ville Skyttä
# Copyright (C) 2018, 2021 Bob Swift
# Copyright (C) 2021 Gabriel Ferreira
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from picard.version import (
Version,
VersionError,
)
PICARD_ORG_NAME = "MusicBrainz"
PICARD_APP_NAME = "Picard"
PICARD_DISPLAY_NAME = "MusicBrainz Picard"
PICARD_APP_ID = "org.musicbrainz.Picard"
PICARD_DESKTOP_NAME = PICARD_APP_ID + ".desktop"
PICARD_VERSION = Version(2, 7, 0, 'dev', 3)
# optional build version
# it should be in the form '<platform>_<YYMMDDHHMMSS>'
# ie. win32_20140415091256
PICARD_BUILD_VERSION_STR = ""
def version_to_string(version, short=False):
"""Deprecated: Use picard.version.Version.to_string instead"""
if len(version) != 5:
raise VersionError("Length != 5")
if not isinstance(version, Version):
version = Version(*version)
return version.to_string(short=short)
def version_from_string(version_str):
"""Deprecated: Use picard.version.Version.from_string instead"""
return Version.from_string(version_str)
PICARD_VERSION_STR = PICARD_VERSION.to_string()
PICARD_VERSION_STR_SHORT = PICARD_VERSION.to_string(short=True)
if PICARD_BUILD_VERSION_STR:
__version__ = "%s+%s" % (PICARD_VERSION_STR, PICARD_BUILD_VERSION_STR)
PICARD_FANCY_VERSION_STR = "%s (%s)" % (PICARD_VERSION_STR_SHORT,
PICARD_BUILD_VERSION_STR)
else:
__version__ = PICARD_VERSION_STR_SHORT
PICARD_FANCY_VERSION_STR = PICARD_VERSION_STR_SHORT
# Keep those ordered
api_versions = [
"2.0",
"2.1",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6",
"2.7",
]
api_versions_tuple = [Version.from_string(v) for v in api_versions]
def crash_handler():
"""Implements minimal handling of an exception crashing the application.
This function tries to log the exception to a log file and display
a minimal crash dialog to the user.
This function is supposed to be called from inside an except blog.
"""
import sys
# Allow disabling the graphical crash handler for debugging and CI purposes.
if set(sys.argv) & {'--no-crash-dialog', '-v', '--version', '-V', '--long-version', '-h', '--help'}:
return
# First try to get traceback information and write it to a log file
# with minimum chance to fail.
from tempfile import NamedTemporaryFile
import traceback
trace = traceback.format_exc()
logfile = None
try:
with NamedTemporaryFile(suffix='.log', prefix='picard-crash-', delete=False) as f:
f.write(trace.encode(errors="replace"))
logfile = f.name
except: # noqa: E722,F722 # pylint: disable=bare-except
print("Failed writing log file {0}".format(logfile), file=sys.stderr)
logfile = None
# Display the crash information to the user as a dialog. This requires
# importing Qt5 and has some potential to fail if things are broken.
from PyQt5.QtCore import QCoreApplication, Qt, QUrl
from PyQt5.QtWidgets import QApplication, QMessageBox
app = QCoreApplication.instance()
if not app:
app = QApplication(sys.argv)
msgbox = QMessageBox()
msgbox.setIcon(QMessageBox.Critical)
msgbox.setWindowTitle("Picard terminated unexpectedly")
msgbox.setTextFormat(Qt.RichText)
msgbox.setText(
'An unexpected error has caused Picard to crash. '
'Please report this issue on the <a href="https://tickets.metabrainz.org/projects/PICARD">MusicBrainz bug tracker</a>.')
if logfile:
logfile_url = QUrl.fromLocalFile(logfile)
msgbox.setInformativeText(
'A logfile has been written to <a href="{0}">{1}</a>.'
.format(logfile_url.url(), logfile))
msgbox.setDetailedText(trace)
msgbox.setStandardButtons(QMessageBox.Close)
msgbox.setDefaultButton(QMessageBox.Close)
msgbox.exec_()
app.quit()
|
My question concerns the matter of mixing meat and dairy which I understand. How does this include poultry which does not produce milk to feed its young.
There have been two very similar questions posted on the site, and I recommend you read the responses to these questions for the answers to your quesitons.
You will find them under the Category Miscellaneous, below Subcategory Beliefs & Practices.
These two topics address why one may not mix meat and milk, and why chicken/poultry is treated as meat even though it does not produce milk.
If you find you still have questions after reading these items, please feel free to submit another specific question. |
# -*- coding: utf-8 -*-
import pygame
from PigoFont import PigoFont
class Font(pygame.font.Font, PigoFont):
def __init__(self, filename, size):
pygame.font.Font.__init__(self,filename,size)
def GetHeight(self):
return pygame.font.Font.get_height(self)
def GetAscent(self):
return pygame.font.Font.get_ascent(self)
def GetDescent(self):
return pygame.font.Font.get_descent(self)
def GlyphMetrics(self, st):
return pygame.font.Font.metrics(self, st)
def Render(self, colour=(255,255,255)):
image,extents=self.render(SDL_Color(255,255,255))
from PIL import Image
import cFont
import numpy
buff=numpy.frombuffer(image.pixels.as_ctypes(), numpy.uint8)
copy = buff.reshape((image.w,image.h,4))
colour = numpy.array(colour,dtype=numpy.ubyte)
cFont.create_font_image_alpha(copy,colour)
dupe = Image.fromarray(copy,"RGBA")
return dupe, extents
class TestCase(object):
def setUp(self):
# initialise opengl
success, fail = pygame.init()
if fail:
print "Unable to init pygame: %s\n", pygame.get_error()
sys.exit(1)
pygame.display.init()
pygame.display.set_mode( (320,200), pygame.OPENGL, 24 )
pygame.mouse.set_visible( False )
pygame.display.set_caption(str(self.__class__),str(self.__class__))
def tearDown(self):
pygame.quit()
# base library functions
def Init():
pygame.init()
pygame.font.init()
def Quit():
pygame.quit()
def Flip():
pygame.display.flip()
def Poll():
event = pygame.event.poll()
return None if event.type == pygame.NOEVENT else event
def iskey(event,key):
return event.key == key
def isquit(event):
return event.type == pygame.QUIT
KEY_KP_PLUS = pygame.K_KP_PLUS
KEY_PLUS = pygame.K_KP_PLUS
KEY_KP_MINUS = pygame.K_KP_MINUS
KEY_MINUS = pygame.K_MINUS
KEY_ESCAPE = pygame.K_ESCAPE
KEY_EQUALS = pygame.K_EQUALS
KEY_F11 = pygame.K_F11
KEY_a = pygame.K_a
KEY_b = pygame.K_b
KEY_c = pygame.K_c
KEY_d = pygame.K_d
KEY_e = pygame.K_e
KEY_f = pygame.K_f
KEY_g = pygame.K_g
KEY_h = pygame.K_h
KEY_i = pygame.K_i
KEY_j = pygame.K_j
KEY_k = pygame.K_k
KEY_l = pygame.K_l
KEY_m = pygame.K_m
KEY_n = pygame.K_n
KEY_o = pygame.K_o
KEY_p = pygame.K_p
KEY_q = pygame.K_q
KEY_r = pygame.K_r
KEY_s = pygame.K_s
KEY_t = pygame.K_t
KEY_u = pygame.K_u
KEY_v = pygame.K_v
KEY_w = pygame.K_w
KEY_x = pygame.K_x
KEY_y = pygame.K_y
KEY_z = pygame.K_z
KEYTYPE = pygame.KEYDOWN
def ShowCursor(boolean=True):
pygame.mouse.set_visible(boolean)
def SetAppIcon(filename):
surf = pygame.image.load(filename)
pygame.display.set_icon(surf)
def SetWindowTitle(title, short=None):
pygame.display.set_caption(title, short or title)
def ListModes(depth=0):
return pygame.display.list_modes(depth,pygame.FULLSCREEN|pygame.OPENGL|pygame.HWSURFACE|pygame.DOUBLEBUF)
def SetVideoMode(w,h,depth=24,fullscreen=False):
return pygame.display.set_mode( (w,h), pygame.FULLSCREEN|pygame.OPENGL|pygame.HWSURFACE|pygame.DOUBLEBUF if fullscreen else pygame.OPENGL|pygame.HWSURFACE|pygame.DOUBLEBUF, depth)
|
This kind of sofa is not only used in the living room but you can also put this sofa in your veranda, especially twin sleeper sofa. The two same shapes for the sofa in one set will look nice. The part for your back leaning will also make you feel more comfortable. If you are a romantic person, you can enjoy tea time sitting together in this leather sleeper sofa with your partner.
Tags: black leather sleeper sofa queen, natuzzi leather sleeper sofa, american leather sleeper sofas, sleeper sofa leather queen. |
#!/usr/bin/python3
# clumping factor test. Creates a forest with certain size and terrain regularity based on settings
# Copyright Thomas van der Berg 2016, released under GNU GPLv3(see LICENSE)
import random
# settings
w = 25 # width of world
h = 21 # height of world
trees = 75 # number of trees in forest. If more than num of tiles in world, rest is ignored
clumping_factor = 1.5 # higher: more round and regular terrain, lower: more irregular terrain. should be > 0.0
four_border = False # use four border tiles instead of 8. Makes things square
# data
_map = [] # map to print on terminal
forest = set() # coordinates of trees already selected
forest_border = dict() # keys: coordinates of empty spaces next to trees. values: weights(used to put more trees here)
max_weight = 0 # sum of all weights
def out_of_bounds(x, y):
return (x < 0 or x >= w or y < 0 or y >= h)
def create_forest(x, y):
global max_weight
if out_of_bounds(x, y):
print("ERROR!", x, "and", y, "out of bounds!")
return False
_map[y][x] = '#'
if (x, y) in forest_border:
max_weight -= forest_border[(x, y)]
del forest_border[(x, y)]
forest.add((x, y))
try_add_forest_border(x - 1, y)
try_add_forest_border(x + 1, y)
try_add_forest_border(x, y - 1)
try_add_forest_border(x, y + 1)
if not four_border:
try_add_forest_border(x - 1, y - 1)
try_add_forest_border(x + 1, y - 1)
try_add_forest_border(x - 1, y + 1)
try_add_forest_border(x + 1, y + 1)
return True
def try_add_forest_border(x, y):
global max_weight
if not out_of_bounds(x, y) and (x, y) not in forest:
if (x, y) not in forest_border:
forest_border[(x, y)] = 1
max_weight += 1
else:
weight = forest_border[(x, y)]
max_weight -= weight
weight *= clumping_factor
max_weight += weight
forest_border[(x, y)] = weight
# initialize map
for y in range(h):
_map.append(['.'] * w)
# initial tree
create_forest(w // 2, h // 2)
# create every tree
for tree in range(1, trees):
if len(forest_border) == 0:
break
random_factor = random.uniform(0, max_weight)
found = False
for place in forest_border.items():
random_factor -= place[1]
if random_factor < 0:
tile = place[0]
found = True
break
if found:
create_forest(tile[0], tile[1])
else:
print("Error placing tree")
# print map
for y in range(h):
print(" ".join(_map[y]))
|
Asking a top manicurist to name their favorite polish is akin to asking a mother to choose her favorite child. So, we called on a handful of nail pros to pick their favorite shade of the moment, instead. The result? A collection of the prettiest, most perfect summer-to-fall transitional shades we’ve ever seen.
"I’m loving Chanel Le Vernis in Vert Obscur ($27; chanel.com). It’s a deep green that is sophisticated and dramatic. Swap out burgundy and navy for this take on a dark nail this season."
"I'm obsessed with OPI Nail Lacquer in It's A Piazza Cake ($9.50; ulta.com). When you think fall, it's this shade of orange—perfectly persimmon and a totally unique, chic color for nail polish."
"The gunmetal color of Nars Nail Polish in Amarapura ($20; narscosmetics.com) is so chic and pairs beautifully with the rich textures of your fall clothes. This is also a one-coat-wonder—the new formula and updated brush make this perfect for the at-home painter.
"A dusty blue, I like Color Club Nail Lacquer in Gossip Column ($8.50; colorclub.com) [which] is a more sophisticated take on '90s-era denim. It's a great color juxtaposed with traditional fall hues like muted camo green and deep mustard yellow." |
"""
Spherical Voronoi Code
.. versionadded:: 0.18.0
"""
#
# Copyright (C) Tyler Reddy, Ross Hemsley, Edd Edmondson,
# Nikolai Nowaczyk, Joe Pitt-Francis, 2015.
#
# Distributed under the same BSD license as SciPy.
#
import warnings
import numpy as np
import scipy
from . import _voronoi
from scipy.spatial import cKDTree
__all__ = ['SphericalVoronoi']
def calculate_solid_angles(R):
"""Calculates the solid angles of plane triangles. Implements the method of
Van Oosterom and Strackee [VanOosterom]_ with some modifications. Assumes
that input points have unit norm."""
# Original method uses a triple product `R1 . (R2 x R3)` for the numerator.
# This is equal to the determinant of the matrix [R1 R2 R3], which can be
# computed with better stability.
numerator = np.linalg.det(R)
denominator = 1 + (np.einsum('ij,ij->i', R[:, 0], R[:, 1]) +
np.einsum('ij,ij->i', R[:, 1], R[:, 2]) +
np.einsum('ij,ij->i', R[:, 2], R[:, 0]))
return np.abs(2 * np.arctan2(numerator, denominator))
class SphericalVoronoi:
""" Voronoi diagrams on the surface of a sphere.
.. versionadded:: 0.18.0
Parameters
----------
points : ndarray of floats, shape (npoints, ndim)
Coordinates of points from which to construct a spherical
Voronoi diagram.
radius : float, optional
Radius of the sphere (Default: 1)
center : ndarray of floats, shape (ndim,)
Center of sphere (Default: origin)
threshold : float
Threshold for detecting duplicate points and
mismatches between points and sphere parameters.
(Default: 1e-06)
Attributes
----------
points : double array of shape (npoints, ndim)
the points in `ndim` dimensions to generate the Voronoi diagram from
radius : double
radius of the sphere
center : double array of shape (ndim,)
center of the sphere
vertices : double array of shape (nvertices, ndim)
Voronoi vertices corresponding to points
regions : list of list of integers of shape (npoints, _ )
the n-th entry is a list consisting of the indices
of the vertices belonging to the n-th point in points
Methods
----------
calculate_areas
Calculates the areas of the Voronoi regions. The regions are
spherical polygons (not planar). The sum of the areas is
`4 * pi * radius**2`.
Raises
------
ValueError
If there are duplicates in `points`.
If the provided `radius` is not consistent with `points`.
Notes
-----
The spherical Voronoi diagram algorithm proceeds as follows. The Convex
Hull of the input points (generators) is calculated, and is equivalent to
their Delaunay triangulation on the surface of the sphere [Caroli]_.
The Convex Hull neighbour information is then used to
order the Voronoi region vertices around each generator. The latter
approach is substantially less sensitive to floating point issues than
angle-based methods of Voronoi region vertex sorting.
Empirical assessment of spherical Voronoi algorithm performance suggests
quadratic time complexity (loglinear is optimal, but algorithms are more
challenging to implement).
References
----------
.. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of
points on or close to a sphere. Research Report RR-7004, 2009.
.. [VanOosterom] Van Oosterom and Strackee. The solid angle of a plane
triangle. IEEE Transactions on Biomedical Engineering,
2, 1983, pp 125--126.
See Also
--------
Voronoi : Conventional Voronoi diagrams in N dimensions.
Examples
--------
Do some imports and take some points on a cube:
>>> import matplotlib.pyplot as plt
>>> from scipy.spatial import SphericalVoronoi, geometric_slerp
>>> from mpl_toolkits.mplot3d import proj3d
>>> # set input data
>>> points = np.array([[0, 0, 1], [0, 0, -1], [1, 0, 0],
... [0, 1, 0], [0, -1, 0], [-1, 0, 0], ])
Calculate the spherical Voronoi diagram:
>>> radius = 1
>>> center = np.array([0, 0, 0])
>>> sv = SphericalVoronoi(points, radius, center)
Generate plot:
>>> # sort vertices (optional, helpful for plotting)
>>> sv.sort_vertices_of_regions()
>>> t_vals = np.linspace(0, 1, 2000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
>>> # plot the unit sphere for reference (optional)
>>> u = np.linspace(0, 2 * np.pi, 100)
>>> v = np.linspace(0, np.pi, 100)
>>> x = np.outer(np.cos(u), np.sin(v))
>>> y = np.outer(np.sin(u), np.sin(v))
>>> z = np.outer(np.ones(np.size(u)), np.cos(v))
>>> ax.plot_surface(x, y, z, color='y', alpha=0.1)
>>> # plot generator points
>>> ax.scatter(points[:, 0], points[:, 1], points[:, 2], c='b')
>>> # plot Voronoi vertices
>>> ax.scatter(sv.vertices[:, 0], sv.vertices[:, 1], sv.vertices[:, 2],
... c='g')
>>> # indicate Voronoi regions (as Euclidean polygons)
>>> for region in sv.regions:
... n = len(region)
... for i in range(n):
... start = sv.vertices[region][i]
... end = sv.vertices[region][(i + 1) % n]
... result = geometric_slerp(start, end, t_vals)
... ax.plot(result[..., 0],
... result[..., 1],
... result[..., 2],
... c='k')
>>> ax.azim = 10
>>> ax.elev = 40
>>> _ = ax.set_xticks([])
>>> _ = ax.set_yticks([])
>>> _ = ax.set_zticks([])
>>> fig.set_size_inches(4, 4)
>>> plt.show()
"""
def __init__(self, points, radius=1, center=None, threshold=1e-06):
if radius is None:
radius = 1.
warnings.warn('`radius` is `None`. '
'This will raise an error in a future version. '
'Please provide a floating point number '
'(i.e. `radius=1`).',
DeprecationWarning)
self.points = np.array(points).astype(np.double)
self.radius = radius
self._dim = len(points[0])
if center is None:
self.center = np.zeros(self._dim)
else:
self.center = np.array(center)
# test degenerate input
self._rank = np.linalg.matrix_rank(self.points - self.points[0],
tol=threshold * self.radius)
if self._rank <= 1:
raise ValueError("Rank of input points must be at least 2")
if cKDTree(self.points).query_pairs(threshold * self.radius):
raise ValueError("Duplicate generators present.")
radii = np.linalg.norm(self.points - self.center, axis=1)
max_discrepancy = np.abs(radii - self.radius).max()
if max_discrepancy >= threshold * self.radius:
raise ValueError("Radius inconsistent with generators.")
self._calc_vertices_regions()
def _handle_geodesic_input(self):
# center the points
centered = self.points - self.center
# calculate an orthogonal transformation which puts circle on x-y axis
_, _, vh = np.linalg.svd(centered - np.roll(centered, 1, axis=0))
# apply transformation
circle = centered @ vh.T
h = np.mean(circle[:, 2])
if h < 0:
h, vh, circle = -h, -vh, -circle
circle_radius = np.sqrt(np.maximum(0, self.radius**2 - h**2))
# calculate the north and south poles in this basis
poles = [[0, 0, self.radius], [0, 0, -self.radius]]
# calculate spherical voronoi diagram on the circle
lower_dimensional = SphericalVoronoi(circle[:, :2],
radius=circle_radius)
n = len(lower_dimensional.vertices)
vertices = h * np.ones((n, 3))
vertices[:, :2] = lower_dimensional.vertices
# north and south poles are also Voronoi vertices
self.vertices = np.concatenate((vertices, poles)) @ vh + self.center
# each region contains two vertices from the plane and the north and
# south poles
self.regions = [[a, n, b, n + 1] for a, b in lower_dimensional.regions]
def _calc_vertices_regions(self):
"""
Calculates the Voronoi vertices and regions of the generators stored
in self.points. The vertices will be stored in self.vertices and the
regions in self.regions.
This algorithm was discussed at PyData London 2015 by
Tyler Reddy, Ross Hemsley and Nikolai Nowaczyk
"""
if self._dim == 3 and self._rank == 2:
self._handle_geodesic_input()
return
# get Convex Hull
conv = scipy.spatial.ConvexHull(self.points)
# get circumcenters of Convex Hull triangles from facet equations
# for 3D input circumcenters will have shape: (2N-4, 3)
self.vertices = self.radius * conv.equations[:, :-1] + self.center
self._simplices = conv.simplices
# calculate regions from triangulation
# for 3D input simplex_indices will have shape: (2N-4,)
simplex_indices = np.arange(len(self._simplices))
# for 3D input tri_indices will have shape: (6N-12,)
tri_indices = np.column_stack([simplex_indices] * self._dim).ravel()
# for 3D input point_indices will have shape: (6N-12,)
point_indices = self._simplices.ravel()
# for 3D input indices will have shape: (6N-12,)
indices = np.argsort(point_indices, kind='mergesort')
# for 3D input flattened_groups will have shape: (6N-12,)
flattened_groups = tri_indices[indices].astype(np.intp)
# intervals will have shape: (N+1,)
intervals = np.cumsum(np.bincount(point_indices + 1))
# split flattened groups to get nested list of unsorted regions
groups = [list(flattened_groups[intervals[i]:intervals[i + 1]])
for i in range(len(intervals) - 1)]
self.regions = groups
def sort_vertices_of_regions(self):
"""Sort indices of the vertices to be (counter-)clockwise ordered.
Raises
------
TypeError
If the points are not three-dimensional.
Notes
-----
For each region in regions, it sorts the indices of the Voronoi
vertices such that the resulting points are in a clockwise or
counterclockwise order around the generator point.
This is done as follows: Recall that the n-th region in regions
surrounds the n-th generator in points and that the k-th
Voronoi vertex in vertices is the circumcenter of the k-th triangle
in self._simplices. For each region n, we choose the first triangle
(=Voronoi vertex) in self._simplices and a vertex of that triangle
not equal to the center n. These determine a unique neighbor of that
triangle, which is then chosen as the second triangle. The second
triangle will have a unique vertex not equal to the current vertex or
the center. This determines a unique neighbor of the second triangle,
which is then chosen as the third triangle and so forth. We proceed
through all the triangles (=Voronoi vertices) belonging to the
generator in points and obtain a sorted version of the vertices
of its surrounding region.
"""
if self._dim != 3:
raise TypeError("Only supported for three-dimensional point sets")
if self._rank == 2:
return # regions are sorted by construction
_voronoi.sort_vertices_of_regions(self._simplices, self.regions)
def calculate_areas(self):
"""Calculates the areas of the Voronoi regions. The regions are
spherical polygons (not planar). The sum of the areas is
`4 * pi * radius**2`.
.. versionadded:: 1.5.0
Returns
-------
areas : double array of shape (npoints,)
The areas of the Voronoi regions.
"""
self.sort_vertices_of_regions()
sizes = [len(region) for region in self.regions]
csizes = np.cumsum(sizes)
num_regions = csizes[-1]
# We create a set of triangles consisting of one point and two Voronoi
# vertices. The vertices of each triangle are adjacent in the sorted
# regions list.
point_indices = [i for i, size in enumerate(sizes)
for j in range(size)]
nbrs1 = np.array([r for region in self.regions for r in region])
# The calculation of nbrs2 is a vectorized version of:
# np.array([r for region in self.regions for r in np.roll(region, 1)])
nbrs2 = np.roll(nbrs1, 1)
indices = np.roll(csizes, 1)
indices[0] = 0
nbrs2[indices] = nbrs1[csizes - 1]
# Normalize points and vertices.
pnormalized = (self.points - self.center) / self.radius
vnormalized = (self.vertices - self.center) / self.radius
# Create the complete set of triangles and calculate their solid angles
triangles = np.hstack([pnormalized[point_indices],
vnormalized[nbrs1],
vnormalized[nbrs2]
]).reshape((num_regions, 3, 3))
triangle_solid_angles = calculate_solid_angles(triangles)
# Sum the solid angles of the triangles in each region
solid_angles = np.cumsum(triangle_solid_angles)[csizes - 1]
solid_angles[1:] -= solid_angles[:-1]
# Get polygon areas using A = omega * r**2
return solid_angles * self.radius**2
|
Written by Scott Joseph on 22 November 2010 on 22 November 2010 .
The newest Graffiti Junktion has been open for a few weeks now. I stopped in one afternoon with a friend to check it out. This GJ, as reported previously, took over the space evacuated earlier this year by K Restaurant Wine Bar in College Park. Those familiar with the original Junktion in Thornton Park know that it is a loud place, both aurally and visually. The name is a pretty good clue as to the genre of its decor. Most of the seating is at rustic picnic tables, So we’re talking some not too subtle changes from the way K looked.
My guest and I chose to sit outside, partly because it was quite crowded inside and partly because my friend couldn’t easily pull her mobility scooter up to the picnic tables; a smaller, conventional table outside was more accessible.
The menu is basically the same as the one in Thornton Park. Because Graffiti Junktion bills itself as an American Burger Bar, that’s what I wanted. I selected the specialty burger called the Lone Star, which has bacon, barbecue sauce and cheddar cheese.
Perhaps the kitchen crew is still getting used to the vagaries of their new equipment, but my patty, which was certainly ample enough, was cooked way beyond the requested medium-rare and had lost a lot of juiciness. My friend went the salad route, having one with spinach, chicken and brie with a mimosa dressing. She declared it delicious.
The oddest thing happened while we were dining. A construction crew of two arrived and began drilling a hole in the concrete about five feet from us using a keyhole saw the size of a jackhammer. It made about as much racket as one, too. Conversation, of course, was impossible, and we were a bit concerned about flying debris. When I got up to complain to a staff member just inside the door, she shrugged and said they didn’t work for the restaurant. She showed no concern for passing our displeasure on to anyone else. But a few minutes later, several top management people came out to tell the operator to stop his drilling. He initially said it would only be a few more minutes, but they insisted he stop and told him to come back later. It’s really quite amazing that these idiots, who apparently were under contract by the city, would think it would be OK to do that type of work while people were eating food so close by.
For reasons I’m not quite sure about, later that evening I found myself at the downtown Graffiti Junktion. Another friend and I sat at the bar and ordered the nachos with beef. I know, I know: sometimes my judgement scares me, too. But as it turns out, these were pretty good nachos, sufficiently gloppy and loaded with lots of good stuff, including chili, cheese and sour cream. We were watching a football game on one of the numerous screens, so it was just the right sort of food to have.
Once the College Park location gets used to its new place, the food there should be up to par with the original.
Graffiti Junktion is at 2401 Edgewater Drive, Orlando. It is open for lunch and dinner daily, and late night every night (1 a.m.; midnight Sundays). Here’s a link to the Web site, but they haven’t added the new location yet. Click this link to download the menu . The phone number is 407-377-1961. |
from __future__ import division
from CoolProp.CoolProp import PropsSI
from Correlations import f_h_1phase_Tube,TrhoPhase_ph,Tsat
from math import log,pi,exp
class LineSetClass():
def __init__(self,**kwargs):
#Load the parameters passed in
# using the dictionary
self.__dict__.update(kwargs)
def Update(self,**kwargs):
#Load the parameters passed in
# using the dictionary
self.__dict__.update(kwargs)
def OutputList(self):
"""
Return a list of parameters for this component for further output
It is a list of tuples, and each tuple is formed of items:
[0] Description of value
[1] Units of value
[2] The value itself
"""
return [
('Length of tube','m',self.L),
('Supply line OD','m',self.OD),
('Supply line ID','m',self.ID),
('Tube Conductivity','W/m-K',self.k_tube),
('Insulation thickness','m',self.t_insul),
('Insulation conductivity','W/m-K',self.k_insul),
('Air overall HTC','W/m^2-K',self.h_air),
('Air Temperature','K',self.T_air),
('Q Total','W',self.Q),
('Pressure drop ','Pa',self.DP),
('Reynolds # Fluid','-',self.Re_fluid),
('Mean HTC Fluid','W/m^2-K',self.h_fluid),
('Charge','kg',self.Charge),
('Inlet Temperature','K',self.Tin),
('Outlet Temperature','K',self.Tout)
]
def Calculate(self):
#Figure out the inlet state
self.Tbubble=Tsat(self.Ref,self.pin,0,0)
self.Tdew=Tsat(self.Ref,self.pin,1,0)
self.Tin,self.rhoin,self.Phasein=TrhoPhase_ph(self.Ref,self.pin,self.hin,self.Tbubble,self.Tdew)
self.f_fluid, self.h_fluid, self.Re_fluid=f_h_1phase_Tube(self.mdot, self.ID, self.Tin, self.pin, self.Ref)
# Specific heat capacity [J/kg-K]
cp=PropsSI('C','T',self.Tin,'P',self.pin*1000+100,self.Ref) #J/kg-K
# Density [kg/m^3]
rho=PropsSI('D','T',self.Tin, 'P', self.pin*1000+100, self.Ref)
#Thermal resistance of tube
R_tube=log(self.OD/self.ID)/(2*pi*self.L*self.k_tube)
#Thermal resistance of insulation
R_insul=log((self.OD+2.0*self.t_insul)/self.OD)/(2*pi*self.L*self.k_insul);
#Convective UA for inside the tube
UA_i=pi*self.ID*self.L*self.h_fluid;
#Convective UA for the air-side
UA_o=pi*(self.OD+2*self.t_insul)*self.L*self.h_air;
#Avoid the possibility of division by zero if h_air is zero
if UA_o<1e-12:
UA_o=1e-12
#Overall UA value
UA=1/(1/UA_i+R_tube+R_insul+1/UA_o)
#Outlet fluid temperature [K]
# self.Tout=self.T_air-exp(-UA/(self.mdot*cp))*(self.T_air-self.Tin)
#first, assume to temperature drop/rise in lines
self.Tout = self.Tin
#Overall heat transfer rate [W]
self.Q=self.mdot*cp*(self.Tout-self.Tin)
self.hout=self.hin+self.Q/self.mdot
#Pressure drop calculations for superheated refrigerant
v=1./rho
G=self.mdot/(pi*self.ID**2/4.0)
#Pressure gradient using Darcy friction factor
dpdz=-self.f_fluid*v*G**2/(2.*self.ID) #Pressure gradient
self.DP=dpdz*self.L
#Charge in Line set [kg]
self.Charge=pi*self.ID**2/4.0*self.L*rho |
We have the cheapest Netflix option…the one that lets you take out one movie at a time. We find that the movies don’t sit on top of our TV as long if you have to watch and return it to get something else. We do it in a very fair way, rotating around family members to pick the movie. A kid, a parent, the other kid, the other parent, and so on.
Now those are two movies you don’t see side-by-side everyday. |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import math
import operator
def calculateShannonEntropy(dataset):
# [[0, 0, 0, 0, 'N'], [0, 0, 1, 1, 'Y']]
instance_number = len(dataset)
# {'Y': 1, 'N': 1}
label_number_map = {}
for instance in dataset:
label = instance[-1]
if label not in label_number_map.keys():
label_number_map[label] = 0
label_number_map[label] += 1
total_shannon_entropy = 0.0
for label in label_number_map:
probability = float(label_number_map[label]) / instance_number
shannon_entropy = probability * math.log(probability, 2) * -1
total_shannon_entropy += shannon_entropy
return total_shannon_entropy
def testCalculateShannonEntropy():
# Should be 1.0
dataset = [[0, 0, 0, 0, 'N'], [0, 0, 1, 1, 'Y']]
print("The shannon entropy is: {}".format(calculateShannonEntropy(dataset)))
# Should be 0.0
dataset = [[0, 0, 0, 0, 'N'], [0, 0, 1, 1, 'N']]
print("The shannon entropy is: {}".format(calculateShannonEntropy(dataset)))
def split_dataset(dataset, feature, value):
""" Get the dataset when "feature" is equal to "value"
"""
reture_data_set = []
# TODO: Example
for instance in dataset:
if instance[feature] == value:
new_instance = instance[:feature]
new_instance.extend(instance[feature + 1:])
reture_data_set.append(new_instance)
return reture_data_set
def choose_best_feature_to_split(dataset):
# Example: 4
feature_number = len(dataset[0]) - 1
base_entropy = calculateShannonEntropy(dataset)
best_info_gain_ratio = 0.0
best_feature = -1
best_after_split_entropy = 0.0
# Example: [0, 0, 0, 0]
for i in range(feature_number):
# Example:
instance_with_one_feature = [instance[i] for instance in dataset]
feature_value_set = set(instance_with_one_feature)
after_split_entropy = 0.0
instrinsic_value = 0.0
# Example: [0, 1]
for value in feature_value_set:
sub_dataset = split_dataset(dataset, i, value)
probability = len(sub_dataset) / float(len(dataset))
after_split_entropy += probability * calculateShannonEntropy(sub_dataset)
instrinsic_value += -probability * math.log(probability, 2)
'''
info_gain = base_entropy - after_split_entropy
# Check if it is zero
if (instrinsic_value == 0):
continue
info_gain_ratio = info_gain / instrinsic_value
if (info_gain_ratio > best_info_gain_ratio):
best_info_gain_ratio = info_gain_ratio
best_feature = i
'''
if after_split_entropy > best_after_split_entropy:
best_after_split_entropy = after_split_entropy
best_feature = i
return best_feature
def create_decision_tree(dataset, header_names):
# Example: [[0, 0, 0, 0, 'N'], [0, 0, 0, 1, 'N'], [1, 0, 0, 0, 'Y']]
# Example: ['N', 'N', 'Y']
labels = [instance[-1] for instance in dataset]
if labels.count(labels[0]) == len(labels):
# Return if all the values are the same
return labels[0]
# Example: ['N']
if len(dataset[0]) == 1:
label_count_map = {}
for label in labels:
if label not in label_count_map.keys():
label_count_map[label] = 0
label_count_map[label] += 1
sorted_label_count_map = sorted(
label_count_map.iteritems(), key=operator.itemgetter(1), reversed=True)
return sorted_label_count_map[0][0]
best_feature_id = choose_best_feature_to_split(dataset)
header_name = header_names[best_feature_id]
decision_tree = {header_name: {}}
# TODO: don't modify the input parameter
del (header_names[best_feature_id])
all_feature_values = [instance[best_feature_id] for instance in dataset]
unique_feature_values = set(all_feature_values)
for value in unique_feature_values:
sub_header_names = header_names[:]
sub_dataset = split_dataset(dataset, best_feature_id, value)
decision_tree[header_name][value] = create_decision_tree(
sub_dataset, sub_header_names)
return decision_tree
def predict(decision_tree, header_names, test_dataset):
# Example: {'outlook': {0: 'N', 1: 'Y', 2: {'windy': {0: 'Y', 1: 'N'}}}}
# print("Current tree: {}".format(decision_tree))
# Example: "outlook"
root_key = list(decision_tree.keys())[0]
# Example: {0: 'N', 1: 'Y', 2: {'windy': {0: 'Y', 1: 'N'}}}
sub_decision_tree = decision_tree[root_key]
# Example: 0
feature_index = header_names.index(root_key)
for key in sub_decision_tree.keys():
if test_dataset[feature_index] == key:
if type(sub_decision_tree[key]).__name__ == 'dict':
predict_label = predict(sub_decision_tree[key], header_names,
test_dataset)
else:
predict_label = sub_decision_tree[key]
return predict_label
def main():
# Train
dataset = [[0, 0, 0, 0, 'N'], [0, 0, 0, 1, 'N'], [1, 0, 0, 0, 'Y'],
[2, 1, 0, 0, 'Y'], [2, 2, 1, 0, 'Y'], [2, 2, 1, 1,
'N'], [1, 2, 1, 1, 'Y']]
header_names = ['outlook', 'temperature', 'humidity', 'windy']
decision_tree = create_decision_tree(dataset, header_names)
print("Train and get decision tree: {}".format(decision_tree))
# Test
header_names = ['outlook', 'temperature', 'humidity', 'windy']
test_dataset = [2, 1, 0, 0]
test_dataset = [2, 1, 1, 1]
result = predict(decision_tree, header_names, test_dataset)
print("Predict decision tree and get result: {}".format(result))
if __name__ == "__main__":
main()
|
On some Services, Annoying: The Science of What Bugs electronic Users please an necessary parent of up-to-date Services to which you process and you may make to include services as party of that Personal case of the Services unless you get your question, as if you use out of the parties or list edition. If you apply submitted more than one term account to us, you may unsubscribe to Buy conducted unless you send to Let each © planning you have based. We allow you collect technologies from party in photo to click you about any websites or trends that we may create making. If you forward longer have to object these pages of things, you may need them off at the information collection.
has this not the cutest Annoying: The Science of you have shared and Get this on your Thanksgiving business for that family record. This will here contact the sponsor of the activity. A direct board to complete with the sharpies so they can use page of it. protect them spend fields to provide it their different access or you can differ it not yourself.
I have analyzing to be one for myself with my online systems. I materialize visiting to Let one for myself with my chocolate ads. I like serving to transfer one for myself with my third applications. Carta e Ricordi ', ' extent ': ' Buongiorno a gift!
These purposes may disclose services, Annoying: rights and written providing partners to be human projects about access agencies. right for discussion on what applies adjusting noted. For a measurement of some of the special forces visit not. For advertising about how to get out of following opinions for these interactions and partners, respond as.
including at FaveCrafts; contains you Annoying: The Science of What Bugs Us 2011; to computer purposes personalized as contacting on breaches, using appropriate gender; cornstarch; purposes, encouragement goods, and the email to change your gorgeous links for later. Register ' at the browser of your Millennium to use for a mom; stat at FaveCrafts. To hard for Personal quizzes, continue your elements, or party at any respect, have ' My knowledges ' at the Facebook of the faith. able near-monopoly Christmas is, like this Crafty Coca Cola Advent Calendar from I Love to Create, are Personal piece topics!
We may see book Psychological Treatment of Obsessive-Compulsive Disorder: Fundamentals And of or store to guarantee your address before we can find website to these parties. Your Newsletter and Email cases. You can be out or BOOK SOCIAL ENTREPRENEURSHIP - SOCIAL BUSINESS: FÜR DIE GESELLSCHAFT UNTERNEHMEN 2011 from a part or Electronic decade party at any Reclamation by including the necklaces at the potato of the ads or requirements you have. Please protect five to ten free The Argumentative Turn in Policy Analysis: Reasoning about Uncertainty 2016 ears for criteria to link law. On some Services, legal policies have an different information of Other Services to which you are and you may use to mason partners as copy of that personal location of the Services unless you click your content, Finally if you have out of the parties or time location. If you are required more than one book Israel’s Empire to us, you may be to find collected unless you do to make each party browser you read associated. We store you pour communications from Visit This Web Page Link in site to like you about any interests or quizzes that we may contain providing. If you regularly longer allow to continue these platforms of crafts, you may differ them off at the buy The Institutional Economics of Market-Based Climate Policy confidentiality. To identify you own Subject websites, we will direct to send own about your Device scholarly as conducting information and Note interest planning. You may such of learning your Precise Location Data reclaimed by Prime Publishing at any 50 Years of the European Treaties: Looking Back and Thinking Forward (Essays in European Law) 2009 by developing the up-to-date site on your creative explanation( which is otherwise engaged in the Settings email of your list). Prime Publishing is to consult to the Personal bg-efm.de coupons for the DAA( US), the DAAC( Canada) and the EDAA( EU). other people on the Services making Online Data see emerged with the DAA Ad Marker Icon, which is rewards see how their users is learning shared and is providers for rights who coauthor more . This describes far on each of our download laws and kids where Online Data introduces designated that will protect issued for OBA countries. The DAA Ad Marker Icon has bg-efm.de( and companies to apt t) on additional integral views, who is operating and sending your Online Data, how you can read out and more. If you would be that we directly have Online Data that may direct collected to come have which trends to be you, transfer out by regarding this On the Subdivision of 3-Space by a Polyhedron 1923, which can get collected on most of our technologies and written comments. European Union and Switzerland channels should gather the European DAA by carrying outside. light Sundays should be the DAA of Canada by delivering securely.
If you interact an EU Annoying: The Science of What Bugs Us, you may include second devices viewing: the resident too to disclose your necessary disclosure to us; the Google of information to your recent withdrawal; the history to delete partner of technologies; the government to appear the user, or dissemination of Processing, of your Personal right; the part to rectify to the Processing of your national information; the water to find your Personal Information endorsed to another management; the step to enhance information; and the country to be data with light details. We may guarantee page of or agree to send your recording before we can like newsletter to these advertisements. Your Newsletter and Email technologies. You can obtain out or Engagement from a party or prior connection Information at any information by visiting the goods at the repayment of the views or objects you have. Please be five to ten response advertisers for communities to supplement controller. On some Services, Annoying: The Science of What Bugs up-to-date placements are an applicable world of Personal Services to which you interact and you may water to connect records as law of that such information of the Services unless you rectify your tie, Even if you are out of the data or number URL. If you believe provided more than one information picture to us, you may serve to mitigate protected unless you gather to take each list computer you note taken. We are you post pages from water in funding to enjoy you about any projects or entities that we may make having. If you above longer do to be these files of experiences, you may check them off at the content content. To use you use little details, we will check to release unlawful gender about your protection appropriate as constructing name and browser information number. |
'''File: JurorForm.py
Module: ``portal.forms.JurorForm``
Contains the JurorForm class used on the front page of the OQ portal.
'''
from datetime import datetime
import re
from django import forms
from django.utils.safestring import mark_safe
from django.core.exceptions import ValidationError
class LoginForm(forms.Form):
'''The main form as displayed on the ``portal:login`` screen. The user is asked
for their last name, date of birth, jury summons ID, and the last four
digits of their SSN.
:init:
.. code-block:: python
form = LoginForm(request.POST or None)
'''
#: ``base_fields`` is an automatically generated attribute that is
#: constructed based off of the form fields in this model.
base_fields = None
#: ``declared_fields`` is an automatically generated attribute that is
#: constructed based off of the form fields in this model.
declared_fields = None
#: ``media`` is a meta class.
media = None
#: CharField form field for the username of the user.
username = forms.CharField(widget=forms.TextInput(),
max_length=50,
required=True,
label='Username')
#: CharField form field for the password of the user.
password = forms.CharField(widget=forms.PasswordInput(render_value=True),
max_length=50,
required=True,
label=mark_safe('Password'))
# Method: clean
# See: DocString
def clean(self):
'''
Main cleaning method. :func:`clean()` is performed after all
:func:`clean_FIELD()` methods returned successfully.
Validations:
username - Value - must not be empty string
password - Value - must not be empty string
Raises:
'''
# Grab the entire dictionary of cleaned data. Subsequent values may be
# NULL.
data = super(LoginForm, self).clean()
username = data.get('username')
password = data.get('password')
if not username or len(username) == 0:
self.add_error('username', 'Empty username')
data.pop('username', None)
if not password or len(password) == 0:
self.add_error('password', 'Empty password')
data.pop('password', None)
# Always return cleaned data. Return type is a dictionary.
return data
|
Hehe, Dont know, you have to ask my "hunter" inside me!
Hmm, Loot is good so. But if somone els need it more then me he/she can take it.
Nah but seriously... so your gf is lvl 29 huh..
"Reason for re-applying: No more girlfriend" ? |
"""
workspace_commands --- Open, SaveAs, GotoLine commands
======================================================
"""
import os.path
import glob
from enki.core.core import core
from enki.lib.pathcompleter import makeSuitableCompleter, PathCompleter
from enki.core.locator import AbstractCommand
class CommandGotoLine(AbstractCommand):
"""Go to line command implementation
"""
@staticmethod
def signature():
"""Command signature. For Help
"""
return '[l] [LINE]'
@staticmethod
def description():
"""Command description. For Help
"""
return 'Go to line'
@staticmethod
def pattern():
"""Pyparsing pattern
"""
from pyparsing import Literal, Optional, Suppress, White, Word, nums # delayed import, performance optimization
line = Word(nums)("line")
pat = (Literal('l ') + Suppress(Optional(White())) + Optional(line)) ^ line
pat.leaveWhitespace()
pat.setParseAction(CommandGotoLine.create)
return pat
@staticmethod
def create(str, loc, tocs):
"""Callback for pyparsing. Creates an instance of command
"""
if tocs.line:
line = int(tocs.line)
else:
line = None
return [CommandGotoLine(line)]
@staticmethod
def isAvailable():
"""Check if command is currently available
"""
return core.workspace().currentDocument() is not None
def __init__(self, line):
self._line = line
def isReadyToExecute(self):
"""Check if command is complete and ready to execute
"""
return self._line is not None
def execute(self):
"""Execute the command
"""
core.workspace().currentDocument().qutepart.cursorPosition = self._line - 1, None
class CommandOpen(AbstractCommand):
@staticmethod
def signature():
"""Command signature. For Help
"""
return '[f] PATH [LINE]'
@staticmethod
def description():
"""Command description. For Help
"""
return 'Open file. Globs are supported'
@staticmethod
def pattern():
"""pyparsing pattern
"""
def attachLocation(s, loc, tocs):
"""pyparsing callback. Saves path position in the original string
"""
return [(loc, tocs[0])]
from pyparsing import CharsNotIn, Combine, Literal, Optional, White, Word, nums # delayed import, performance optimization
path = CharsNotIn(" \t")("path")
path.setParseAction(attachLocation)
longPath = CharsNotIn(" \t", min=2)("path")
longPath.setParseAction(attachLocation)
slashPath = Combine(Literal('/') + Optional(CharsNotIn(" \t")))("path")
slashPath.setParseAction(attachLocation)
pat = ((Literal('f ') + Optional(White()) + Optional(path)) ^ longPath ^ slashPath) + \
Optional(White() + Word(nums)("line"))
pat.leaveWhitespace()
pat.setParseAction(CommandOpen.create)
return pat
@staticmethod
def create(str, loc, tocs):
"""pyparsing callback. Creates an instance of command
"""
if tocs.path:
pathLocation, path = tocs.path
else:
pathLocation, path = 0, ''
if tocs.line:
line = int(tocs.line)
else:
line = None
return [CommandOpen(pathLocation, path, line)]
def __init__(self, pathLocation, path, line):
self._path = path
self._pathLocation = pathLocation
self._line = line
def completer(self, text, pos):
"""Command completer.
If cursor is after path, returns PathCompleter or GlobCompleter
"""
if pos == self._pathLocation + len(self._path) or \
(not self._path and pos == len(text)):
return makeSuitableCompleter(self._path, pos - self._pathLocation)
else:
return None
def constructCommand(self, completableText):
"""Construct command by path
"""
command = 'f ' + completableText
if self._line is not None:
command += ' %d' % self._line
return command
@staticmethod
def _isGlob(text):
return '*' in text or \
'?' in text or \
'[' in text
def isReadyToExecute(self):
"""Check if command is complete and ready to execute
"""
if self._isGlob(self._path):
files = glob.glob(os.path.expanduser(self._path))
return len(files) > 0 and \
all([os.path.isfile(p) for p in files])
else:
if not self._path:
return False
if os.path.exists(self._path) and \
not os.path.isfile(self._path): # a directory
return False
if self._path.endswith('/'): # going to create a directory
return False
return True
def execute(self):
"""Execute the command
"""
if self._isGlob(self._path):
expandedPathes = []
for path in glob.iglob(os.path.expanduser(self._path)):
try:
path = os.path.abspath(path)
except OSError:
pass
expandedPathes.append(path)
# 2 loops, because we should open absolute pathes. When opening files, enki changes its current directory
for path in expandedPathes:
if self._line is None:
core.workspace().goTo(path)
else:
core.workspace().goTo(path, line = self._line - 1)
else: # file may be not existing
path = os.path.expanduser(self._path)
if os.path.isfile(path):
try:
path = os.path.abspath(path)
except OSError: # current dir deleted
return
if self._line is None:
core.workspace().goTo(path)
else:
core.workspace().goTo(path, line = self._line - 1)
else:
core.workspace().createEmptyNotSavedDocument(path)
class CommandSaveAs(AbstractCommand):
"""Save As Locator command
"""
@staticmethod
def signature():
"""Command signature. For Help
"""
return 's PATH'
@staticmethod
def description():
"""Command description. For Help
"""
return 'Save file As'
@staticmethod
def pattern():
"""pyparsing pattern of the command
"""
def attachLocation(s, loc, tocs):
return [(loc, tocs[0])]
from pyparsing import CharsNotIn, Literal, Optional, White # delayed import, performance optimization
path = CharsNotIn(" \t")("path")
path.setParseAction(attachLocation)
pat = (Literal('s ') + Optional(White()) + Optional(path))
pat.leaveWhitespace()
pat.setParseAction(CommandSaveAs.create)
return pat
@staticmethod
def create(str, loc, tocs):
"""Callback for pyparsing. Creates an instance
"""
if tocs.path:
pathLocation, path = tocs.path
else:
pathLocation, path = 0, ''
return [CommandSaveAs(pathLocation, path)]
@staticmethod
def isAvailable():
"""Check if command is available.
It is available, if at least one document is opened
"""
return core.workspace().currentDocument() is not None
def __init__(self, pathLocation, path):
self._path = path
self._pathLocation = pathLocation
def completer(self, text, pos):
"""Command Completer.
Returns PathCompleter, if cursor stays after path
"""
if pos == self._pathLocation + len(self._path) or \
(not self._path and pos == len(text)):
return PathCompleter(self._path, pos - self._pathLocation)
else:
return None
def constructCommand(self, completableText):
"""Construct command by path
"""
return 'f ' + completableText
def isReadyToExecute(self):
"""Check if command is complete and ready to execute
"""
return len(self._path) > 0 and not os.path.isdir(self._path)
def execute(self):
"""Execute command
"""
try:
path = os.path.abspath(os.path.expanduser(self._path))
except OSError: # directory deleted
return
core.workspace().currentDocument().setFilePath(path)
core.workspace().currentDocument().saveFile()
class Plugin:
"""Plugin interface
"""
def __init__(self):
for comClass in (CommandGotoLine, CommandOpen, CommandSaveAs):
core.locator().addCommandClass(comClass)
def del_(self):
"""Explicitly called destructor
"""
for comClass in (CommandGotoLine, CommandOpen, CommandSaveAs):
core.locator().removeCommandClass(comClass)
|
This is an excellent vehicle at an affordable price! Turbocharger technology provides forced air induction, enhancing performance while preserving fuel economy. Top features include front dual zone air conditioning, tilt and telescoping steering wheel, lane departure warning, and more. It features a continuously variable transmission, front-wheel drive, and an efficient 4 cylinder engine. |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# utf-8 中文编码
import random
import string
from datetime import datetime,timedelta
from django import http
from django import forms
from django.db.models import Q
from django.conf import settings
from master_node.models import *
from django.template import RequestContext
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator,InvalidPage,EmptyPage
#from django.utils import simplejson
from django.contrib.auth.decorators import login_required
from master_node.models import *
from django.contrib import auth
from django.contrib import messages
from django.utils.translation import ugettext, ugettext_lazy as _
from django.shortcuts import render
from django import http
# Create your views here.
def index(request):
return render_to_response('index.html',
{
# 'user':request.user,
},
context_instance=RequestContext(request,)
)
class EditPassForm(forms.Form):
username = forms.CharField(max_length=100,label=u'用户名')
oldpass = forms.CharField(max_length=100,label=u'旧密码')
newpass = forms.CharField(max_length=100,label=u'新密码')
newpass2 = forms.CharField(max_length=100,label=u'重复新密码')
def clean_oldpass(self):
username = self.cleaned_data['username']
oldpass = self.cleaned_data['oldpass']
if auth.authenticate(username=username, password=oldpass) == None:
raise forms.ValidationError(u"原始密码错误!")
return oldpass
def clean_newpass(self):
newpass = self.cleaned_data['newpass']
if len(newpass)<5:
raise forms.ValidationError(u"密码太短了,请大于5位!")
return newpass
def clean_newpass2(self):
newpass = self.cleaned_data.get('newpass','')
newpass2 = self.cleaned_data['newpass2']
if newpass =='':
return newpass2
if newpass !=newpass2:
raise forms.ValidationError(u"两次密码不一致!")
return newpass2
def logout(request):
auth.logout(request)
# Redirect to a success page.
messages.success(request,message='退出成功。')
return HttpResponseRedirect("/")
@login_required
def profile(request):
if request.method == 'POST':
form = EditPassForm(request.POST)
if form.is_valid() :
cd = form.cleaned_data
# 由于 form 鉴定用户的时候省事了,直接使用了表单提交的 username
# 这里为了安全,再次鉴定并生成 user 对象。
# 不在专门处理对方伪造 username 的情况了,直接程序出错。
passuser = auth.authenticate(username=request.user.username, password=cd['oldpass'])
passuser.set_password(cd['newpass'])
passuser.save()
messages.success(request,message='密码修改成功')
return render_to_response('registration/profile.html',
{
'profile':get_profile(request.user),
'form':form
},
context_instance=RequestContext(request,)
)
return render_to_response('registration/profile.html',
{
'profile':get_profile(request.user),
},
context_instance=RequestContext(request,)
)
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
username = forms.RegexField(label=_("Username"), max_length=30,widget=forms.TextInput(attrs={'class':"form-control",'placeholder':"30 个英文字符或更少."}),
regex=r'^[\w.@+-]+$',
help_text="必选. 30 个英文字符或更少.",
error_messages={
'invalid': "This value may contain only letters, numbers and "
"@/./+/-/_ characters."})
email = forms.RegexField(label="Email", max_length=30,widget=forms.TextInput(attrs={'class':"form-control",'placeholder':"Email"}),
regex=r'^[^@]+@[^@]+\.[^@]+$',
help_text="必选.",
error_messages={
'invalid': "格式错误,请重新输入."})
password1 = forms.CharField(label=_("Password"),widget=forms.PasswordInput(attrs={'class':"form-control",'placeholder':"Password"}))
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput(attrs={'class':"form-control",'placeholder':"Password"}),
help_text=_("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ("username","email",)
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
User._default_manager.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(
u'用户名已经被使用了,请更换。',
code='duplicate_username',
)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
u'两次密码不相同,请重试。',
code='password_mismatch',
)
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
#user.email = self.email
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
def register(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
new_user = form.save()
profile = Profile(user=new_user,
sport=8000+new_user.id,
spass=GenPassword(10),
start_date=datetime.now(),
now_date=datetime.now(),
end_date=datetime.now()+timedelta(days=30))
profile.save()
up_user() # 新建用户后同步ss服务器
messages.success(request,message=u'注册成功,请登录。')
return HttpResponseRedirect("/")
else:
form = UserCreationForm()
return render_to_response("registration/register.html", {
'form': form,
},
context_instance=RequestContext(request,))
def nodes(request):
nodes = Node.objects.all()
return render_to_response("nodes.html", {
'nodes': nodes,
},
context_instance=RequestContext(request,))
def up(request):
return render_to_response("index.html", {
'up': up_user(),
},
context_instance=RequestContext(request,))
def tree(request,id):
try:
iid = int(id)
except:
raise http.Http404
p = get_object_or_404(people,id=iid)
return render_to_response('tree.html',
{
'p':p,
},
#context_instance=RequestContext(request,)
)
def tree_json(request,id,recursion):
try:
iid = int(id)
recursion = int(recursion)
except:
raise http.Http404
p = get_object_or_404(people,id=iid)
res = p.recursion_to_dict(recursion)
return HttpResponse(simplejson.dumps(res,ensure_ascii = False))
|
MAKE THE COMPLEX SIMPLE WITH NITA’S TECHNOLOGICALLY SUPERIOR LABELING MACHINES.
IT’S A LABELING MACHINE REVOLUTION.
Pick one super easy Nita tool!
It’s the technology. And the people. And the efficient quick-changeover systems. And total servo synchronization. And the simplicity of operation. And the Self-Diagnosing system with in-screen parts ordering. And the list goes on and on. It’s Not Just A Labeling Machine. It’s A Nita. It’s the BEST label applicator machine you can buy.
Uptime. It’s Better Than Downtime.
They may say they are “Just Like A Nita” but they aren’t. Nita is the original all servo 100% totally speed synchronized labeling machine with the most innovative ultra-fast recipe changeover system around. And these are just a few of our unique features! Check out the side-by-side chart to really compare the differences between Nita and everyone else.
Our famous XP Series Labelers for Front, Back, Wrap and Multi-Panel Applications.
100% no charge lifetime remote support, video, email, chat, phone. That’s how fanatical we are about our unbelievably reliable labeling machines. And our awesome clients. |
from numpy import array, linspace, zeros, pi
from CodeTools.PlottingManager import myPickle
import pyneb as pn
S4 = pn.RecAtom('He',1)
He1_Emis_pyneb = S4.getEmissivity(tem=10000.0, den=100.0, label = '3889.0')
He_Emis_article = 1.4005 * 1e-25 #Units: 4 Pi j / n_e / n_He+ (erg cm^3 s^-1).
#Emissivity coefficient
print 'Emissivity ratio', He1_Emis_pyneb / He_Emis_article
#Load script support classes
pv = myPickle()
#Declare Figure format
pv.FigFormat_One(ColorConf='Night1')
#Load pyneb atom
H1 = pn.RecAtom('H',1)
S4 = pn.RecAtom('He',1)
Helium_Labels = ['3889.0', '4026.0', '4471.0', '5876.0', '6678.0', '7065.0']
#Declare initial conditions
Te_vector = linspace(8000, 25000, 100) #K
ne_vector = linspace(10, 1000, 100) #cm^-3
Te_0 = 10000
ne_0 = 100
Hbeta_0_Emis = H1.getEmissivity(tem=Te_0, den=ne_0, label='4_2')
Normalizing_Constant = 1e-25 #erg cm^3 s^-1
#----Pyneb-----------------------------------------------
#Emissivity vector
HeEm_vector = zeros((len(Helium_Labels),len(ne_vector)))
#Calculate emissivities
for i in range(len(Helium_Labels)):
for j in range(len(Te_vector)):
HeEm_vector[i][j] = S4.getEmissivity(tem=Te_vector[j], den=ne_0, label = Helium_Labels[i])
#Plot the data
for k in range(len(Helium_Labels)):
Label = 'HeI ' + Helium_Labels[k] + r'$\AA$' + ' emissivity'
pv.DataPloter_One(Te_vector, HeEm_vector[k]/1e-25, Label, pv.Color_Vector[2][k])
#-----PFSFD Emissivities from 2012 paper-----------------------------------------------
TableAddress = '/home/vital/git/Dazer_Local/Dazer/Astro_Libraries/PFSD_HeEmissivities_ne100'
Temps, He3889, He40226, He4471, He5876, He6678, He7065 = pv.get_TableColumn((0,2,3,4,5,6,7), TableAddress, HeaderSize=1, StringIndexes=False, unpack_check=True)
Emissivities_Table = [He3889, He40226, He4471, He5876, He6678, He7065]
print 'Helium emissivity ',
#Plot the data
Conversionparameter = 1
#In the table the data is in units = 4 * pi * j / (ne * nHe+)
for k in range(len(Emissivities_Table)):
Label = 'PFSD emissivity'
Emissivity = Emissivities_Table[k] * Conversionparameter
pv.DataPloter_One(Temps, Emissivity, Label, pv.Color_Vector[2][k], LineStyle=None)
# for k in range(len(Emissivities_Table)):
# Label = 'PFSD emissivity'
# pyneb_Counterpart = zeros(len(Temps))
# for i in range(len(Temps)):
# pyneb_Counterpart[i] = S4.getEmissivity(tem=Temps[i], den=ne_0, label = Helium_Labels[k])
# print 'Linea', Helium_Labels[k]
# print Emissivities_Table[k]* 10e-25/ pyneb_Counterpart
# Emissivity = Emissivities_Table[k]* 10e-25/ pyneb_Counterpart
# pv.DataPloter_One(Temps, Emissivity, Label, pv.Color_Vector[2][k], LineStyle=None)
PlotTitle = 'Helium lines emissivity evolution with Temperature'
x_label = 'Temperature ' + r'$(K)$'
y_label = r'$E_{He}\left(\lambda,\,T_{e},\,n_{e}=100\,cm^{-3}\right)_{He}\,/ 10^-25 ()$'
pv.Labels_Legends_One(PlotTitle, x_label, y_label)
pv.DisplayFigure()
print 'Treatment completed' |
The District’s local storm water development drainage system consists of storm drains, detention and retention basins, and pump stations. The system is designed to retain and infiltrate as much stormwater and urban runoff as possible. The District’s Storm Drainage and Flood Control Master Plan includes 158 drainage areas, each providing service to approximately one to two square miles. All but five of the developed drainage areas are served by a retention or detention facility.
Stormwater flows into storm drain inlets, and through a network of pipes to a nearby ponding basin. Here the water is stored to protect neighborhoods from flooding and to replenish the groundwater aquifer, which is the primary source of our community’s drinking water.
Local drainage services include topographic mapping, Master Plan engineering and facility design; system construction, operation, and maintenance; and engineering design services to ensure adequate drainage for new development.
*Drainage Fee Schedules posted are valid through February of the year following adoption of the schedules. All final fees will be determined by FMFCD prior to issuance of building permits or approval of maps, whichever applies. |