text
stringlengths 29
850k
|
---|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to manage all aspects of student assessments."""
__author__ = 'pgbovine@google.com (Philip Guo)'
import datetime
import logging
import httplib, urllib, logging, base64
from utils import BaseHandler
from utils import HUMAN_READABLE_DATETIME_FORMAT
import json, uuid, os
import urllib2
import base64
from controllers import lessons
from models import courses
from models import models
from models import review
from models import student_work
from models import transforms
from models import utils
from models.models import Student
from models.models import StudentAnswersEntity
from tools import verify
from google.appengine.ext import db
def store_score(course, student, assessment_type, score):
"""Stores a student's score on a particular assessment.
Args:
course: the course containing the assessment.
student: the student whose data is stored.
assessment_type: the type of the assessment.
score: the student's score on this assessment.
Returns:
the result of the assessment, if appropriate.
"""
# FIXME: Course creators can edit this code to implement custom
# assessment scoring and storage behavior
# TODO(pgbovine): Note that the latest version of answers are always saved,
# but scores are only saved if they're higher than the previous attempt.
# This can lead to unexpected analytics behavior. Resolve this.
existing_score = course.get_score(student, assessment_type)
# remember to cast to int for comparison
if (existing_score is None) or (score > int(existing_score)) or (1>0):
utils.set_score(student, assessment_type, score)
## Aqui deberiamos crear nuestro json
tincan_activity_uri = os.environ.get('HTTP_REFERER', 'urn:uuid:' + str(uuid.uuid4()))
Xapi_statement = {
"id": "12345678-1234-5678-1234-567812345678",
"actor":{
"mbox":"mailto:"+student.key().name()
},
"verb":{
"id":"http://adlnet.gov/expapi/verbs/created",
"display":{
"en-US":"created"
}
},
"object":{
"id" : tincan_activity_uri,
"definition": {
"name":{
"en-US": "Multiquiz"
},
"description": {
"en-US": "multipart activity description"
}
},
},
'result': {
'completion': True,
'score': {
'scaled': 0
}
}
}
statement=json.dumps(Xapi_statement)
print(statement)
print( '%(LRS_USERNAME)s:%(LRS_PASSWORD)s' % os.environ)
connection = httplib.HTTPConnection("127.0.0.1:9000")
connection.request('POST', "/xapi/statements", statement, {
'x-experience-api-version': '1.0',
'Authorization': 'Basic ' + base64.b64encode('%(LRS_USERNAME)s:%(LRS_PASSWORD)s' % os.environ),
'content-type': 'application/json',
})
response = connection.getresponse()
print ('%s %s' % (response.status, response.reason), response.read())
connection.close()
# response = http.request('POST', 'http:127.0.0.1:7000/statements', body=statement)
# webapp.request('http:127.0.0.1:7000/statements', 'POST', statement, {'content-type': 'application/json'})
#response_stream = urllib2.urlopen(req)
#requests.post('http:127.0.0.1:9000/xapi/statements', payload=json.dumps(statement))
class AnswerHandler(BaseHandler):
"""Handler for saving assessment answers."""
# Find student entity and save answers
@db.transactional(xg=True)
def update_assessment_transaction(
self, email, assessment_type, new_answers, score):
"""Stores answer and updates user scores.
Args:
email: the student's email address.
assessment_type: the title of the assessment.
new_answers: the latest set of answers supplied by the student.
score: the numerical assessment score.
Returns:
the student instance.
"""
student = Student.get_enrolled_student_by_email(email)
course = self.get_course()
# It may be that old Student entities don't have user_id set; fix it.
if not student.user_id:
student.user_id = self.get_user().user_id()
answers = StudentAnswersEntity.get_by_key_name(student.user_id)
if not answers:
answers = StudentAnswersEntity(key_name=student.user_id)
answers.updated_on = datetime.datetime.now()
utils.set_answer(answers, assessment_type, new_answers)
store_score(course, student, assessment_type, score)
student.put()
answers.put()
# Also record the event, which is useful for tracking multiple
# submissions and history.
models.EventEntity.record(
'submit-assessment', self.get_user(), transforms.dumps({
'type': 'assessment-%s' % assessment_type,
'values': new_answers, 'location': 'AnswerHandler'}))
return student
def get(self):
"""Handles GET requests.
This method is here because if a student logs out when on the
reviewed_assessment_confirmation page, that student is redirected to
the GET method of the corresponding handler. It might be a good idea to
merge this class with lessons.AssessmentHandler, which currently only
has a GET handler.
"""
self.redirect('/course')
# pylint: disable=too-many-statements
def post(self):
"""Handles POST requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'assessment-post'):
return
course = self.get_course()
assessment_type = self.request.get('assessment_type')
if not assessment_type:
self.error(404)
logging.error('No assessment type supplied.')
return
unit = course.find_unit_by_id(assessment_type)
if unit is None or unit.type != verify.UNIT_TYPE_ASSESSMENT:
self.error(404)
logging.error('No assessment named %s exists.', assessment_type)
return
self.template_value['navbar'] = {'course': True}
self.template_value['assessment'] = assessment_type
self.template_value['assessment_name'] = unit.title
self.template_value['is_last_assessment'] = (
course.is_last_assessment(unit))
self.template_value['unit_id'] = unit.unit_id
# Convert answers from JSON to dict.
answers = self.request.get('answers')
answers = transforms.loads(answers) if answers else []
grader = unit.workflow.get_grader()
# Scores are not recorded for human-reviewed assignments.
score = 0
if grader == courses.AUTO_GRADER:
score = int(round(float(self.request.get('score'))))
# Record assessment transaction.
student = self.update_assessment_transaction(
student.key().name(), assessment_type, answers, score)
if grader == courses.HUMAN_GRADER:
rp = course.get_reviews_processor()
# Guard against duplicate submissions of a human-graded assessment.
previously_submitted = rp.does_submission_exist(
unit.unit_id, student.get_key())
if not previously_submitted:
# Check that the submission due date has not passed.
time_now = datetime.datetime.now()
submission_due_date = unit.workflow.get_submission_due_date()
if time_now > submission_due_date:
self.template_value['time_now'] = time_now.strftime(
HUMAN_READABLE_DATETIME_FORMAT)
self.template_value['submission_due_date'] = (
submission_due_date.strftime(
HUMAN_READABLE_DATETIME_FORMAT))
self.template_value['error_code'] = (
'assignment_deadline_exceeded')
self.render('error.html')
return
submission_key = student_work.Submission.write(
unit.unit_id, student.get_key(), answers)
rp.start_review_process_for(
unit.unit_id, submission_key, student.get_key())
# Record completion event in progress tracker.
course.get_progress_tracker().put_assessment_completed(
student, assessment_type)
self.template_value['previously_submitted'] = previously_submitted
matcher = unit.workflow.get_matcher()
self.template_value['matcher'] = matcher
if matcher == review.PEER_MATCHER:
self.template_value['review_dashboard_url'] = (
'reviewdashboard?unit=%s' % unit.unit_id
)
self.render('reviewed_assessment_confirmation.html')
return
else:
# Record completion event in progress tracker.
course.get_progress_tracker().put_assessment_completed(
student, assessment_type)
# Save the submission in the datastore, overwriting the earlier
# version if it exists.
submission_key = student_work.Submission.write(
unit.unit_id, student.get_key(), answers)
course.update_final_grades(student)
parent_unit = course.get_parent_unit(unit.unit_id)
if parent_unit:
unit_contents = lessons.UnitHandler.UnitLeftNavElements(
course, parent_unit)
next_url = unit_contents.get_url_by(
'assessment', unit.unit_id, 0) + '&confirmation'
self.redirect('/' + next_url)
else:
self.template_value['result'] = course.get_overall_result(
student)
self.template_value['score'] = score
self.template_value['overall_score'] = course.get_overall_score(
student)
self.render('test_confirmation.html')
|
THE future is bright, the future is orange, if you adapt that well-known advertising slogan to reflect the colour of David Moyes’ hair.
What the future is not at Old Trafford is yellow-and-green. Those bright colours of protest against the Glazer family’s controversial ownership can be quietly folded away now.
In choosing their next manager, Manchester United have opted for a man with the same accent and the same spirit as Sir Alex Ferguson. There could be no more emphatic way to spike the guns of what is left of the revolt among the supporters.
They could have settled upon a quick fix which would have traded the fabric and legacy of the club for more quick profit ahead of a cash-laden dash for the exit some time in the future. They could have gone for a hired gun like Jose Mourinho, whose only mission would have been to guarantee an instant, till-jangling continuation of the success upon which they have built the most mighty commercial operation in world sport. It’s a popular way of modern business. It is what, in truth, I expected.
Instead, Moyes represents a dynastic appointment. He may not wish to be portrayed as an identikit Fergie, and he will have his own plans and ideas. But the core football values of the two men are as similar as their Glaswegian backgrounds.
Out in Glazerville, Florida, they have agreed to land the man most of the fans would have chosen.
It means that under the Glazers’ command, the big cash-in will continue and the sponsorship deals will continue to roll in. The list of the club’s backers on the official website is nearly as long as the roll-call of honours.
But the codes of the club, established and nurtured by both Ferguson and Sir Matt Busby before him, will remain in place too. Tradition is an excellent selling point. The future, if it works, will be rooted in a training-ground, red-brick, man-of-the-people’s view of the game and how it should operate as well as in the concrete canyons of Wall Street.
That represents a pretty smart double act put in place by the American family whose methods of taking control of United – loading them with debt – made them so vilified.
Of course the Glazers are in it for the money, like nearly all football moguls. But which United fan will be unhappy if the club’s financial power continues to grow and grow with a serious football man in charge of the team and its development? In fact, it simply has to if they are to keep pace in Europe with the new wealth of clubs like the Champions League finalists, Bayern Munich, the oligarch-funded clubs of the east and the ever-expanding Spanish giants.
The acid test will come if the handover of power in the Old Trafford dugout and its adjustments leads to any prolonged period without success. Say, two seasons.
These are the financial figures which Moyes has to support on the field and which will test the Glazers’ nerve if the pots dry up for a while.
According to the experts Deloitte yesterday, United’s annual revenue for 2011/12 was £320.3million. That’s an increase of 14 per cent every season from the £25.2m brought in during Ferguson’s first title-winning season in 1992-93.
The Glazers’ massive expansion of business operations has happened during the most successful period in the club’s history. The pressure on Moyes – unprecedented for a newly appointed manager – will be to keep those noughts ticking over on the balance sheets.
This is where the continued presence of Ferguson at the club as director and ambassador may prove critical.
He will remain a business beacon for United. His name will attract even more deals. He will represent, too, a vast fund of Champions League and transfer market knowledge on tap at Moyes’ disposal.
The fear may be that Fergie will overshadow the new man. But surely, these two kindred spirits have worked out already how to handle that.
Everything they have done in football suggests the shrewdness required to resolve that worry.
Ferguson was a centre-forward, Moyes a centre-back. They seem made for each other, however. |
#!/usr/bin/python2.5
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author slamm@google.com (Stephen Lamm)
import logging
import os
import re
import subprocess
def GetSubversionExternals():
subversion_externals = []
svn_cmd = ['svn', 'propget', 'svn:externals', '.']
output = subprocess.Popen(svn_cmd, stdout=subprocess.PIPE).communicate()[0]
for external_entry in output.splitlines():
if external_entry:
local_path, svn_url = external_entry.split()
if local_path.startswith('third_party/'):
subversion_externals.append((local_path, svn_url))
return dict(subversion_externals)
def GetThirdPartyDirectoriesToCheck(ignore_dirs):
ignore_dirs = set(ignore_dirs)
ignore_dirs.add('third_party/.svn')
check_dirs = []
for third_party_dir in os.listdir('third_party'):
relative_dir = 'third_party/%s' % third_party_dir
if (relative_dir not in ignore_dirs and
os.path.isdir(relative_dir)):
check_dirs.append(relative_dir)
return check_dirs
def CheckVersion(third_party_dir):
readme_file = open(os.path.join(third_party_dir, 'README.browserscope'))
print '--------------------------------------------------'
print 'Checking directory: %s' % third_party_dir
for line in readme_file.readlines():
line.strip()
match = re.match(
r'(VERSION|CHECK_VERSION|CHECK_VERSION_MANUALLY|URL):\s*(.*)', line)
if match:
readme_key, value = match.groups()
if readme_key == 'URL':
print 'URL: %s' % value
elif readme_key == 'VERSION':
print 'Local version: %s' % value
elif readme_key == 'CHECK_VERSION':
print 'Remote version:',
print subprocess.Popen(
value, shell=True, stdout=subprocess.PIPE).communicate()[0].strip()
else:
print 'Check manually: %s' % value
print
if __name__ == '__main__':
if 'third_party' not in os.listdir('.'):
os.chdir('..')
if 'third_party' not in os.listdir('.'):
logging.error('Must run from the application root.')
subversion_externals = GetSubversionExternals()
for skipping_dirs in sorted(subversion_externals.keys()):
print "Skipping directory managed by svn:externals: %s" % skipping_dirs
check_dirs = GetThirdPartyDirectoriesToCheck(subversion_externals.keys())
for third_party_dir in check_dirs:
CheckVersion(third_party_dir)
|
One of the most common reasons given for ordering a lumbar spine MRI examination is to rule out disc herniation. MRI is a very efficient tool for this condition. It is helpful, however, to define several terms to ensure a clear understanding of the described elements of the report. In the past there has been considerable variation in the terms used from one radiologist to the next to describe disc herniations. It was routine to see a disc herniation described as a “disc bulging” by one radiologist and a “disc herniation” by a second. I have found the article in the journal Spine (vol. 26, no. 5, pp E93-E113) to be very helpful in differentiating a true disc herniation from a disk bulge and to clarify the differences in herniation types.
Basically, a “disc bulge” is defined as a prominence of the periphery of the disc that exceeds 50% of the circumference of the disc. A “disc herniation” is defined as a discal prominence that does not exceed 50% of the disc circumference. For comparison purposes, please refer to figure 1. The illustration in the upper left corner demonstrates a diffuse disc bulge. The dotted line represents the margin of the underlying vertebral endplate. The solid outer line represents the margins of the bulging disc. The margins of the disc are increased over 100% of the disc circumference.
In image 1, the arrow points to an extrusion type herniation in which the nuclear material maintains contact with the parent disc. In image 2, there is an oval extruded fragment that has lost connection with the disc. This is an example of a sequestered disc.
Lumbar disc herniations are also graded according to size. An AP dimension of the disc that measures less than 5.0 mm is small, 5.0-10.0 mm is termed moderate and greater than 10 mm is a large herniation.
The final element in description of disc herniations is the effect on the thecal sac. If the herniation encroaches upon the central canal of the segment with 50% or greater compromise of the contents, the effect is said to be severe. Encroachment of 25-50% is moderate and 25% or less is minor. If there is compression of a descending nerve root, this needs to be described as well. Image 3 reflects compression of the right descending nerve root. The left root is seen as a hypodense circular shape adjacent to the left facet joint. A white arrow head points to a right paracentral disc herniation. The normally hypointense nerve root is obliterated by the herniation.
Universal usage of the terms outlined in this article would help to alleviate the confusion that results from the use of inconsistent terminology and would allow a more uniform understanding of the information contained within the report. |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from Small import views
from .views import *
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
#from core.views import shop
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from core.views import shop
from django.conf import settings
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^accounting/',include('accounting.urls')),
url(r'^core/',include('core.urls')),
#url(r'^purchase/',include('purchase.urls')),
url(r'^product/',include('product.urls')),
url(r'^warehouse/',include('warehouse.urls')),
url(r'^procurement/',include('procurement.urls')),
# url(r'^fulfillment/',include('fulfillment.urls')),
url(r'^$', Home.as_view(), name="home"),
url(r'^index$', Home.as_view(), name="home"),
url(r'^pricing$', Pricing.as_view(), name="pricing"),
url(r'^services$', Services.as_view(), name="services"),
url(r'^about$', About.as_view(), name="about"),
url(r'^profile$', views.Profile, name='profile'),
url(r'^$', Home.as_view(), name="home"),
url(r'^index$', Home.as_view(), name="home"),
url(r'^pricing$', Pricing.as_view(), name="pricing"),
url(r'^services$', Services.as_view(), name="services"),
url(r'^about$', About.as_view(), name="about"),
url(r'^accounts/', include('registration.backends.default.urls')),
)
urlpatterns += patterns('django.contrib.auth.views',
url(r'^login/$', 'login',
{'template_name': 'registration/login.html'},
name = 'login'
),
url(r'^logout/$', 'logout',
{'template_name': 'registration/logout.html'},
name = 'logout'
),
)
urlpatterns += staticfiles_urlpatterns()
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, }),
)
|
To Educate and Train Socially Responsive Managers to face the day-to-day challenges of the Competitive Global Work Environment.
To be one of the best management schools by developing individuals through socially relevant management training, conducting applied research, and developing instructional resources.
The purpose of this workshop was to provide an in depth look at the basics of biostatistics among the healthcare professionals. These 2 days of workshop aimed to provide theoretical background clarity on the aspects of data handling and analysis using free resources.
Admission Open for our Two Year Full Time MBA program in Healthcare Management. It is the 11th Batch of the University.
The Symposium started with inaugural Ceremony. Dr. Subhasish Chatterjee, Principal, DoM Dr. Pinkal Shah, Coordinator, Center for Healthcare EntrepreneurshipSV and Dr. A. K. Seth, Dean Allied Science graced the occasion as guest of hour and chief guest respectively.
The workshop provides insight into the freely and easily available platform of GOOGLE that has enhanced feature of Google forms. This unexplored area of Google form enabled the users to prepare questionnaire in any format for their survey, effectively manage the workplace even at remote distance through their smart devices, online assessment and work collaboration. |
# Copyright (C) 2007-2016 Giampaolo Rodola' <g.rodola@gmail.com>.
# Use of this source code is governed by MIT license that can be
# found in the LICENSE file.
"""
Start a stand alone anonymous FTP server from the command line as in:
$ python -m pyftpdlib
"""
import logging
import optparse
import os
import sys
from . import __ver__
from ._compat import getcwdu
from .authorizers import DummyAuthorizer
from .handlers import FTPHandler
from .log import config_logging
from .servers import FTPServer
class CustomizedOptionFormatter(optparse.IndentedHelpFormatter):
"""Formats options shown in help in a prettier way."""
def format_option(self, option):
result = []
opts = self.option_strings[option]
result.append(' %s\n' % opts)
if option.help:
help_text = ' %s\n\n' % self.expand_default(option)
result.append(help_text)
return ''.join(result)
def main():
"""Start a stand alone anonymous FTP server."""
usage = "python -m pyftpdlib [options]"
parser = optparse.OptionParser(usage=usage, description=main.__doc__,
formatter=CustomizedOptionFormatter())
parser.add_option('-i', '--interface', default=None, metavar="ADDRESS",
help="specify the interface to run on (default all "
"interfaces)")
parser.add_option('-p', '--port', type="int", default=2121, metavar="PORT",
help="specify port number to run on (default 2121)")
parser.add_option('-w', '--write', action="store_true", default=False,
help="grants write access for the anonymous user "
"(default read-only)")
parser.add_option('-d', '--directory', default=getcwdu(), metavar="FOLDER",
help="specify the directory to share (default current "
"directory)")
parser.add_option('-n', '--nat-address', default=None, metavar="ADDRESS",
help="the NAT address to use for passive connections")
parser.add_option('-r', '--range', default=None, metavar="FROM-TO",
help="the range of TCP ports to use for passive "
"connections (e.g. -r 8000-9000)")
parser.add_option('-v', '--version', action='store_true',
help="print pyftpdlib version and exit")
parser.add_option('-V', '--verbose', action='store_true',
help="activate a more verbose logging")
options, args = parser.parse_args()
if options.version:
sys.exit("pyftpdlib %s" % __ver__)
if options.verbose:
config_logging(level=logging.DEBUG)
passive_ports = None
if options.range:
try:
start, stop = options.range.split('-')
start = int(start)
stop = int(stop)
except ValueError:
parser.error('invalid argument passed to -r option')
else:
passive_ports = list(range(start, stop + 1))
# On recent Windows versions, if address is not specified and IPv6
# is installed the socket will listen on IPv6 by default; in this
# case we force IPv4 instead.
if os.name in ('nt', 'ce') and not options.interface:
options.interface = '0.0.0.0'
authorizer = DummyAuthorizer()
perm = options.write and "elradfmwM" or "elr"
authorizer.add_anonymous(options.directory, perm=perm)
handler = FTPHandler
handler.authorizer = authorizer
handler.masquerade_address = options.nat_address
handler.passive_ports = passive_ports
ftpd = FTPServer((options.interface, options.port), FTPHandler)
try:
ftpd.serve_forever()
finally:
ftpd.close_all()
if __name__ == '__main__':
main()
|
SuperGuardian keeps abreast of technology and products in the sector, and works in an environment of continuous improvement.
“We never stand still. We deliver an excellent service already, but continually seek ways to give clients extra value,” SuperGuardian chief executive Olivia Long says.
According to Long, this year the industry faced one of the most significant changes to businesses it has seen: the removal of the accountants’ exemption, which was replaced with the new licensing regime.
“We wanted to retain complete control over the way we do things so we applied to ASIC (Australian Securities and Investments Commission) for our own Australian financial services licence,” she explains.
But rather than seeing the removal as a challenge, SuperGuardian approached it as an opportunity, particularly as financial advisers were finding it increasingly difficult to work with accountants who did not specialise in SMSFs and did not keep data up-to-date during the financial year.
“And the timing could not have been better with the legislation of the super reforms that became law late last year,” she reveals.
“We were thrilled to be able to meet with our clients one-on-one, discuss their situation and provide them with a personalised statement of advice on what action to take because of the reforms.
Long believes one of the business’s biggest achievements in the past 12 months has been its efforts to truly go above and beyond the traditional day-to-day accounting and compliance services, and give proactive advice.
“We survey our clients every two years and, in addition, I always seek adviser feedback every time I meet with them,” she says.
Long commends the firm’s team for its success at this year’s SMSF awards.
“We pride ourselves on the quality and longevity of our client manager team. We have staff that have been with us for more than a decade, who have grown and developed over that time as a close-knit team,” she says.
“We all share a client service ethic second to none, and I attribute this award to the quality of the service they provide day in, day out.
“This award is important to us because it’s based on voting by clients, not just a judging panel, so the fact that advisers actually took the time to vote shows they value us enough to do something about it. |
# Trades messages with a remote host/client
# Essentially provides a layer to transform generic socket datastreams
# into messages (strings) with messagetypes
# Messagetype will be used to invoke a registered callback for that type
# with the message content as its argument
import socket
import select
import struct
mode = "I"
length_size = struct.calcsize(mode)
def recv_message(sock):
data = sock.recv(length_size, socket.MSG_WAITALL)
if not data:
return None, None
msg_size = struct.unpack(mode, data)[0]
messagetype = ''
if msg_size > 0:
messagetype = sock.recv(msg_size, socket.MSG_WAITALL)
data = sock.recv(length_size, socket.MSG_WAITALL)
msg_size = struct.unpack(mode, data)[0]
message = ''
if msg_size > 0:
message = sock.recv(msg_size, socket.MSG_WAITALL)
print messagetype
return messagetype, message
def send_message(sock, messagetype, message):
try:
mtl = struct.pack(mode, len(messagetype))
mcl = struct.pack(mode, len(message))
sock.sendall(mtl + messagetype + mcl + message)
except socket.error, e:
print 'Error sending data'
print e
class Messager():
def __init__(self):
self.clients = []
self.callbacks = dict()
self.listening = False
def __del__(self):
if self.listening:
self.hostSock.close()
for sock in clients:
sock.close()
def setupHost(self, address, port):
self.hostSock = socket.socket()
self.hostSock.bind((address, port))
self.hostSock.listen(5)
self.hostSock.setblocking(0)
self.listening = True
def processHost(self):
try:
if self.listening:
conn, address = self.hostSock.accept()
self.clients.append(conn)
except socket.timeout:
pass
except socket.error, e:
if e[0] == 11:
pass
else:
print e
inputs, outputs, errors = select.select(self.clients, [], self.clients, 0)
for sock in inputs:
messagetype, message = recv_message(sock)
if messagetype is None:
self.clients.remove(sock)
sock.close()
elif messagetype in self.callbacks:
self.callbacks[messagetype](sock, message)
for sock in errors:
self.clients.remove(sock)
sock.close()
# def send(self, target, messagetype, message):
# pass
def register(self, messagetype, callback):
self.callbacks[messagetype] = callback
def unregister(self, messagetype):
del self.callbacks[messagetype]
def connect(self, remoteHost, remotePort):
# Connect to a remote host and add its socket to the client list
# i.e. we don't listen for connection, but initiate it
#This allows two-way communication between two messager objects
sock = socket.socket()
sock.connect((remoteHost, remotePort))
self.clients.append(sock)
return sock
|
Ben speaks with Erick Erickson, editor-in-chief of Red State, on this episode of The Federalist Radio Hour. The two discussed 2016, the old Iowa Straw Poll, and the new Red State Gathering. Domenech also got a sense of the man behind the microphone asking Erickson about how his faith influences his politics. |
#! /usr/bin/env python
"""PgQ ticker and maintenance.
"""
import sys
import skytools
from pgq.ticker import SmartTicker
from pgq.status import PGQStatus
#from pgq.admin import PGQAdmin
"""TODO:
pgqadm ini check
"""
command_usage = """
%prog [options] INI CMD [subcmd args]
commands:
ticker start ticking & maintenance process
status show overview of queue health
install install code into db
create QNAME create queue
drop QNAME drop queue
register QNAME CONS install code into db
unregister QNAME CONS install code into db
config QNAME [VAR=VAL] show or change queue config
"""
config_allowed_list = {
'queue_ticker_max_count': 'int',
'queue_ticker_max_lag': 'interval',
'queue_ticker_idle_period': 'interval',
'queue_rotation_period': 'interval',
}
class PGQAdmin(skytools.DBScript):
def __init__(self, args):
skytools.DBScript.__init__(self, 'pgqadm', args)
self.set_single_loop(1)
if len(self.args) < 2:
print "need command"
sys.exit(1)
int_cmds = {
'create': self.create_queue,
'drop': self.drop_queue,
'register': self.register,
'unregister': self.unregister,
'install': self.installer,
'config': self.change_config,
}
cmd = self.args[1]
if cmd == "ticker":
script = SmartTicker(args)
elif cmd == "status":
script = PGQStatus(args)
elif cmd in int_cmds:
script = None
self.work = int_cmds[cmd]
else:
print "unknown command"
sys.exit(1)
if self.pidfile:
self.pidfile += ".admin"
self.run_script = script
def start(self):
if self.run_script:
self.run_script.start()
else:
skytools.DBScript.start(self)
def init_optparse(self, parser=None):
p = skytools.DBScript.init_optparse(self, parser)
p.set_usage(command_usage.strip())
return p
def installer(self):
objs = [
skytools.DBLanguage("plpgsql"),
skytools.DBFunction("txid_current_snapshot", 0, sql_file="txid.sql"),
skytools.DBSchema("pgq", sql_file="pgq.sql"),
]
db = self.get_database('db')
curs = db.cursor()
skytools.db_install(curs, objs, self.log)
db.commit()
def create_queue(self):
qname = self.args[2]
self.log.info('Creating queue: %s' % qname)
self.exec_sql("select pgq.create_queue(%s)", [qname])
def drop_queue(self):
qname = self.args[2]
self.log.info('Dropping queue: %s' % qname)
self.exec_sql("select pgq.drop_queue(%s)", [qname])
def register(self):
qname = self.args[2]
cons = self.args[3]
self.log.info('Registering consumer %s on queue %s' % (cons, qname))
self.exec_sql("select pgq.register_consumer(%s, %s)", [qname, cons])
def unregister(self):
qname = self.args[2]
cons = self.args[3]
self.log.info('Unregistering consumer %s from queue %s' % (cons, qname))
self.exec_sql("select pgq.unregister_consumer(%s, %s)", [qname, cons])
def change_config(self):
if len(self.args) < 3:
list = self.get_queue_list()
for qname in list:
self.show_config(qname)
return
qname = self.args[2]
if len(self.args) == 3:
self.show_config(qname)
return
alist = []
for el in self.args[3:]:
k, v = el.split('=')
if k not in config_allowed_list:
qk = "queue_" + k
if qk not in config_allowed_list:
raise Exception('unknown config var: '+k)
k = qk
expr = "%s=%s" % (k, skytools.quote_literal(v))
alist.append(expr)
self.log.info('Change queue %s config to: %s' % (qname, ", ".join(alist)))
sql = "update pgq.queue set %s where queue_name = %s" % (
", ".join(alist), skytools.quote_literal(qname))
self.exec_sql(sql, [])
def exec_sql(self, q, args):
self.log.debug(q)
db = self.get_database('db')
curs = db.cursor()
curs.execute(q, args)
db.commit()
def show_config(self, qname):
fields = []
for f, kind in config_allowed_list.items():
if kind == 'interval':
sql = "extract('epoch' from %s)::text as %s" % (f, f)
fields.append(sql)
else:
fields.append(f)
klist = ", ".join(fields)
q = "select " + klist + " from pgq.queue where queue_name = %s"
db = self.get_database('db')
curs = db.cursor()
curs.execute(q, [qname])
res = curs.dictfetchone()
db.commit()
if res is None:
print "no such queue:", qname
return
print qname
for k in config_allowed_list:
n = k
if k[:6] == "queue_":
n = k[6:]
print " %s\t=%7s" % (n, res[k])
def get_queue_list(self):
db = self.get_database('db')
curs = db.cursor()
curs.execute("select queue_name from pgq.queue order by 1")
rows = curs.fetchall()
db.commit()
list = []
for r in rows:
list.append(r[0])
return list
if __name__ == '__main__':
script = PGQAdmin(sys.argv[1:])
script.start()
|
Antoinette Grimes, recipient of the 2013 L.I.F.E scholarship, was recently featured in the East Valley Tribune, a newspaper in her hometown of Phoenix. The article puts into focus her difficult but inspiring journey with lupus- how she struggled with telling her family, was forced to leave the military, lost university scholarships but ultimately did not let lupus ruin her life and hopes that others with lupus don't either. She continues to make a difference in others' lives through working as a lupus support group facilitator, advocate for female veterans, and a personal trainer for people with disabilities. Check out the full article here!
Newer PostL.I.F.E is now on Facebook! |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The niftyseg module provides classes for interfacing with `niftyseg
<https://sourceforge.net/projects/niftyseg/>`_ command line tools.
These are the base tools for working with niftyseg.
EM Statistical Segmentation tool is found in niftyseg/em.py
Fill lesions tool is found in niftyseg/lesions.py
Mathematical operation tool is found in niftyseg/maths.py
Patch Match tool is found in niftyseg/patchmatch.py
Statistical operation tool is found in niftyseg/stats.py
Label Fusion and CalcTopNcc tools are in niftyseg/steps.py
Examples
--------
See the docstrings of the individual classes for examples.
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from ..niftyreg.base import no_nifty_package
from ..niftyfit.base import NiftyFitCommand
import subprocess
import warnings
warn = warnings.warn
warnings.filterwarnings('always', category=UserWarning)
class NiftySegCommand(NiftyFitCommand):
"""
Base support interface for NiftySeg commands.
"""
_suffix = '_ns'
_min_version = None
def __init__(self, **inputs):
super(NiftySegCommand, self).__init__(**inputs)
def get_version(self):
if no_nifty_package(cmd=self.cmd):
return None
# exec_cmd = ''.join((self.cmd, ' --version'))
exec_cmd = 'seg_EM --version'
# Using seg_EM for version (E.G: seg_stats --version doesn't work)
return subprocess.check_output(exec_cmd, shell=True).strip('\n')
|
The Wounded Knee Massacre on December 29, 1890 was one of the US Government’s first attempts to disarm the Native Americans.
The Lakota’s lands were being seized.
Their main food source, bison, were already hunted nearly to extinction by English settlers.
And when the Lakota began to resist the men who were forcing them out of their homes, killing their people, and destroying their lives, the government ordered that they be disarmed.
US cavalry slaughtered hundreds of Lakota, including unarmed men, women, and children.
This was not a battle.
This was a massacre of an inconvenient racial minority in order to steal their land and resources.
And it was sanctioned by the US Government.
While not strictly a firearm confiscation, the roots of gun control lie in yet more racism.
Before the Civil War, the Slave Codes made it illegal for black people to own guns, vote, assemble anywhere, or even learn to read or write.
After the Civil War, the Black Codes replaced the Slave Codes to continue to restrict Freedman and keep them subjugated.
Free men and women were restricted from leaving their plantations, forbidden to assemble or practice religion in groups, and, you guessed it, barred from owning weapons.
Interestingly enough, these laws might also be the origin of business licenses, since blacks had to obtain licenses from white people in order to begin any kind of business.
Even today, some of the effects of those laws restricting black people’s rights echo through our culture.
It wasn’t long ago that we were marching to stop the government-enforced separation of races…but that’s another blog post.
Let’s jump ahead to the first World War.
You probably know about the Japanese internment camps of World War II, but did you know that at least 4 internment camps were built by the US government during WWI?
Germans were classified as “enemy aliens” (who were prohibited from owning firearms) and rounded up for imprisonment just for being German or German-American, even those that were already US citizens.
Anyone who was German-born, even if they were no longer a German citizen, was soon forced to register and carry identification with them everywhere.
They were tracked, restricted, and detained.
During this time, the government “confiscated” hundreds of millions of dollars in German property, which ultimately helped to finance the war and the camps where undesirable citizens were kept out of the way.
Expressing dissent was a quick way to find yourself behind barbed wire in a camp if you were German, and behind bars in jail if you weren’t.
And we haven’t gotten much further today.
We don’t call them “enemy aliens” anymore.
We just put them on the “terrorist watch list” and do the same thing.
Germans were still interred in the second World War, but you probably know more about the Japanese Internment Camps because those are actually mentioned in public schools.
It started with a general fear and distrust of the Japanese.
That led to restrictions for anyone of Japanese descent, including a prohibition from owning any “potentially dangerous items” including firearms, curfews, and an impressive label that inspired even more fear: The New Enemy.
Then it progressed to home seizures and raids.
Of course, the raids and seizures included firearm confiscations if anything was left.
After all, those guns were now illegally owned because the owner was Japanese.
And then we forced innocent people into prison camps, stole their possessions, and used their personal property to fund it all.
As an interesting side note, this, like most of the grossly discriminatory laws mentioned in this post, was made law by executive order.
The political climate creates fear around a particular racial/national group.
That group is given some kind of name to make them scarier, like savages or enemy aliens.
Once an Us vs. Them mentality is established widely enough, the government issues restrictions to that group’s personal rights, including their gun rights.
If nobody steps in to stop the momentum, that group of people, along with any political dissenters, is forced to forfeit their property (and perhaps their lives) which finances further actions.
But we’re sure there’s nothing else to worry about.
It’s not like there’s a lot of fear of a particular racial group that we’re at war against.
And it’s not like we’re assigning scary names to people like Domestic Terrorist or Political Extremist.
Surely, people with dissenting political opinions aren’t finding themselves included on anything like a Terrorist Watch List that has the potential to strip them of liberty, property, and life. |
# This file is part of rinohtype, the Python document preparation system.
#
# Copyright (c) Brecht Machiels.
#
# Use of this source code is subject to the terms of the GNU Affero General
# Public License v3. See the LICENSE file or http://www.gnu.org/licenses/.
import re
from collections import OrderedDict
from configparser import ConfigParser
from io import StringIO
from itertools import chain
from pathlib import Path
from token import NUMBER, ENDMARKER, MINUS, PLUS, NAME, NEWLINE
from tokenize import generate_tokens
from warnings import warn
from .util import (NamedDescriptor, WithNamedDescriptors,
NotImplementedAttribute, class_property, PeekIterator,
cached)
__all__ = ['AttributeType', 'AcceptNoneAttributeType', 'OptionSet',
'OptionSetMeta', 'Attribute', 'OverrideDefault',
'AttributesDictionary', 'Configurable', 'RuleSet', 'RuleSetFile',
'Bool', 'Integer', 'ParseError', 'Var']
class AttributeType(object):
def __eq__(self, other):
return type(self) == type(other) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
@classmethod
def check_type(cls, value):
return isinstance(value, cls)
@classmethod
def from_string(cls, string, source=None):
return cls.parse_string(string, source)
@classmethod
def parse_string(cls, string, source):
tokens = TokenIterator(string)
value = cls.from_tokens(tokens, source)
if next(tokens).type != ENDMARKER:
raise ParseError('Syntax error')
return value
@classmethod
def from_tokens(cls, tokens, source):
raise NotImplementedError(cls)
@classmethod
def validate(cls, value):
if isinstance(value, str):
value = cls.from_string(value)
if not cls.check_type(value):
raise TypeError("{} is not of type {}".format(value, cls.__name__))
return value
@classmethod
def doc_repr(cls, value):
return '``{}``'.format(value) if value else '(no value)'
@classmethod
def doc_format(cls):
warn('Missing implementation for {}.doc_format'.format(cls.__name__))
return ''
class AcceptNoneAttributeType(AttributeType):
"""Accepts 'none' (besides other values)"""
@classmethod
def check_type(cls, value):
return (isinstance(value, type(None))
or super(__class__, cls).check_type(value))
@classmethod
def from_string(cls, string, source=None):
if string.strip().lower() == 'none':
return None
return super(__class__, cls).from_string(string, source)
@classmethod
def doc_repr(cls, value):
return '``{}``'.format('none' if value is None else value)
class OptionSetMeta(type):
def __new__(metacls, classname, bases, cls_dict):
cls = super().__new__(metacls, classname, bases, cls_dict)
cls.__doc__ = (cls_dict['__doc__'] + '\n\n'
if '__doc__' in cls_dict else '')
cls.__doc__ += 'Accepts: {}'.format(cls.doc_format())
return cls
def __getattr__(cls, item):
if item == 'NONE' and None in cls.values:
return None
string = item.lower().replace('_', ' ')
if item.isupper() and string in cls.values:
return string
raise AttributeError(item)
def __iter__(cls):
return iter(cls.values)
class OptionSet(AttributeType, metaclass=OptionSetMeta):
"""Accepts the values listed in :attr:`values`"""
values = ()
@classmethod
def check_type(cls, value):
return value in cls.values
@class_property
def value_strings(cls):
return ['none' if value is None else value.lower()
for value in cls.values]
@classmethod
def _value_from_tokens(cls, tokens):
if tokens.next.type != NAME:
raise ParseError('Expecting a name')
token = next(tokens)
_, start_col = token.start
while tokens.next and tokens.next.exact_type in (NAME, MINUS):
token = next(tokens)
_, end_col = token.end
return token.line[start_col:end_col].strip()
@classmethod
def from_tokens(cls, tokens, source):
option_string = cls._value_from_tokens(tokens)
try:
index = cls.value_strings.index(option_string.lower())
except ValueError:
raise ValueError("'{}' is not a valid {}. Must be one of: '{}'"
.format(option_string, cls.__name__,
"', '".join(cls.value_strings)))
return cls.values[index]
@classmethod
def doc_repr(cls, value):
return '``{}``'.format(value)
@classmethod
def doc_format(cls):
return ', '.join('``{}``'.format(s) for s in cls.value_strings)
class Attribute(NamedDescriptor):
"""Descriptor used to describe a style attribute"""
def __init__(self, accepted_type, default_value, description):
self.name = None
self.accepted_type = accepted_type
self.default_value = accepted_type.validate(default_value)
self.description = description
self.source = None
def __get__(self, style, type=None):
try:
return style.get(self.name, self.default_value)
except AttributeError:
return self
def __set__(self, style, value):
if not self.accepted_type.check_type(value):
raise TypeError('The {} attribute only accepts {} instances'
.format(self.name, self.accepted_type.__name__))
style[self.name] = value
class OverrideDefault(Attribute):
"""Overrides the default value of an attribute defined in a superclass"""
def __init__(self, default_value):
self._default_value = default_value
@property
def overrides(self):
return self._overrides
@overrides.setter
def overrides(self, attribute):
self._overrides = attribute
self.default_value = self.accepted_type.validate(self._default_value)
@property
def accepted_type(self):
return self.overrides.accepted_type
@property
def description(self):
return self.overrides.description
class WithAttributes(WithNamedDescriptors):
def __new__(mcls, classname, bases, cls_dict):
attributes = cls_dict['_attributes'] = OrderedDict()
doc = []
for name, attr in cls_dict.items():
if not isinstance(attr, Attribute):
continue
attributes[name] = attr
if isinstance(attr, OverrideDefault):
for mro_cls in (cls for base_cls in bases
for cls in base_cls.__mro__):
try:
attr.overrides = mro_cls._attributes[name]
break
except (AttributeError, KeyError):
pass
else:
raise NotImplementedError
battr = ':attr:`{0} <.{0}.{1}>`'.format(mro_cls.__name__, name)
inherits = f' (inherited from {battr})'
overrides = f' (overrides {battr} default)'
else:
inherits = overrides = ''
doc.append('{}: {}{}'.format(name, attr.description, inherits))
format = attr.accepted_type.doc_format()
default = attr.accepted_type.doc_repr(attr.default_value)
doc.append('\n *Accepts* :class:`.{}`: {}\n'
.format(attr.accepted_type.__name__, format))
doc.append('\n *Default*: {}{}\n'
.format(default, overrides))
supported_attributes = list(name for name in attributes)
documented = set(supported_attributes)
for base_class in bases:
try:
supported_attributes.extend(base_class._supported_attributes)
except AttributeError:
continue
for mro_cls in base_class.__mro__:
for name, attr in getattr(mro_cls, '_attributes', {}).items():
if name in documented:
continue
doc.append('{0}: {1} (inherited from :attr:`{2} <.{2}.{0}>`)'
.format(name, attr.description,
mro_cls.__name__))
format = attr.accepted_type.doc_format()
default = attr.accepted_type.doc_repr(attr.default_value)
doc.append('\n *Accepts* :class:`.{}`: {}\n'
.format(attr.accepted_type.__name__, format))
doc.append('\n *Default*: {}\n'.format(default))
documented.add(name)
if doc:
attr_doc = '\n '.join(chain([' Attributes:'], doc))
cls_dict['__doc__'] = (cls_dict.get('__doc__', '') + '\n\n'
+ attr_doc)
cls_dict['_supported_attributes'] = supported_attributes
return super().__new__(mcls, classname, bases, cls_dict)
@property
def _all_attributes(cls):
for mro_class in reversed(cls.__mro__):
for name in getattr(mro_class, '_attributes', ()):
yield name
@property
def supported_attributes(cls):
for mro_class in cls.__mro__:
for name in getattr(mro_class, '_supported_attributes', ()):
yield name
class AttributesDictionary(OrderedDict, metaclass=WithAttributes):
def __init__(self, base=None, **attributes):
self.name = None
self.source = None
self.base = base
super().__init__(attributes)
@classmethod
def _get_default(cls, attribute):
"""Return the default value for `attribute`.
If no default is specified in this style, get the default from the
nearest superclass.
If `attribute` is not supported, raise a :class:`KeyError`."""
try:
for klass in cls.__mro__:
if attribute in klass._attributes:
return klass._attributes[attribute].default_value
except AttributeError:
raise KeyError("No attribute '{}' in {}".format(attribute, cls))
@classmethod
def attribute_definition(cls, name):
try:
for klass in cls.__mro__:
if name in klass._attributes:
return klass._attributes[name]
except AttributeError:
pass
raise KeyError(name)
@classmethod
def attribute_type(cls, name):
try:
return cls.attribute_definition(name).accepted_type
except KeyError:
raise TypeError('{} is not a supported attribute for {}'
.format(name, cls.__name__))
@classmethod
def get_ruleset(self):
raise NotImplementedError
class DefaultValueException(Exception):
pass
class Configurable(object):
configuration_class = NotImplementedAttribute()
def configuration_name(self, document):
raise NotImplementedError
def get_config_value(self, attribute, document):
ruleset = self.configuration_class.get_ruleset(document)
return ruleset.get_value_for(self, attribute, document)
class BaseConfigurationException(Exception):
def __init__(self, base_name):
self.name = base_name
class Source(object):
"""Describes where a :class:`DocumentElement` was defined"""
@property
def location(self):
"""Textual representation of this source"""
return repr(self)
@property
def root(self):
"""Directory path for resolving paths relative to this source"""
return None
class RuleSet(OrderedDict, Source):
main_section = NotImplementedAttribute()
def __init__(self, name, base=None, source=None, **kwargs):
super().__init__(**kwargs)
self.name = name
self.base = base
self.source = source
self.variables = OrderedDict()
def contains(self, name):
return name in self or (self.base and self.base.contains(name))
def find_source(self, name):
"""Find top-most ruleset where configuration `name` is defined"""
if name in self:
return self.name
if self.base:
return self.base.find_source(name)
def get_configuration(self, name):
try:
return self[name]
except KeyError:
if self.base:
return self.base.get_configuration(name)
raise
def __setitem__(self, name, item):
assert name not in self
if isinstance(item, AttributesDictionary): # FIXME
self._validate_attributes(name, item)
super().__setitem__(name, item)
def __call__(self, name, **kwargs):
self[name] = self.get_entry_class(name)(**kwargs)
def __repr__(self):
return '{}({})'.format(type(self).__name__, self.name)
def __str__(self):
return repr(self)
def __bool__(self):
return True
RE_VARIABLE = re.compile(r'^\$\(([a-z_ -]+)\)$', re.IGNORECASE)
def _validate_attributes(self, name, attr_dict):
attr_dict.name = name
attr_dict.source = self
for key, val in attr_dict.items():
attr_dict[key] = self._validate_attribute(attr_dict, key, val)
def _validate_attribute(self, attr_dict, name, value):
attribute_type = attr_dict.attribute_type(name)
if isinstance(value, str):
stripped = value.replace('\n', ' ').strip()
m = self.RE_VARIABLE.match(stripped)
if m:
return Var(m.group(1))
value = self._attribute_from_string(attribute_type, stripped)
elif hasattr(value, 'source'):
value.source = self
if not isinstance(value, Var) and not attribute_type.check_type(value):
raise TypeError("{} ({}) is not of the correct type for the '{}' "
"attribute".format(value, type(value).__name__,
name))
return value
@cached
def _attribute_from_string(self, attribute_type, string):
return attribute_type.from_string(string, self)
def get_variable(self, configuration_class, attribute, variable):
try:
value = self.variables[variable.name]
except KeyError:
if not self.base:
raise VariableNotDefined("Variable '{}' is not defined"
.format(variable.name))
return self.base.get_variable(configuration_class, attribute,
variable)
return self._validate_attribute(configuration_class, attribute, value)
def get_entry_class(self, name):
raise NotImplementedError
def _get_value_recursive(self, name, attribute):
if name in self:
entry = self[name]
if attribute in entry:
return entry[attribute]
elif isinstance(entry.base, str):
raise BaseConfigurationException(entry.base)
elif entry.base is not None:
return entry.base[attribute]
if self.base:
return self.base._get_value_recursive(name, attribute)
raise DefaultValueException
@cached
def get_value(self, name, attribute):
try:
return self._get_value_recursive(name, attribute)
except BaseConfigurationException as exc:
return self.get_value(exc.name, attribute)
def _get_value_lookup(self, configurable, attribute, document):
name = configurable.configuration_name(document)
return self.get_value(name, attribute)
def get_value_for(self, configurable, attribute, document):
try:
value = self._get_value_lookup(configurable, attribute, document)
except DefaultValueException:
value = configurable.configuration_class._get_default(attribute)
if isinstance(value, Var):
configuration_class = configurable.configuration_class
value = self.get_variable(configuration_class, attribute, value)
return value
class RuleSetFile(RuleSet):
def __init__(self, filename, base=None, source=None, **kwargs):
self.filename = self._absolute_path(filename, source)
config = ConfigParser(default_section=None, delimiters=('=',),
interpolation=None)
with self.filename.open() as file:
config.read_file(file)
options = dict(config[self.main_section]
if config.has_section(self.main_section) else {})
name = options.pop('name', filename)
base = options.pop('base', base)
options.update(kwargs) # optionally override options
super().__init__(name, base=base, source=source, **options)
if config.has_section('VARIABLES'):
for name, value in config.items('VARIABLES'):
self.variables[name] = value
for section_name, section_body in config.items():
if section_name in (None, self.main_section, 'VARIABLES'):
continue
if ':' in section_name:
name, classifier = (s.strip() for s in section_name.split(':'))
else:
name, classifier = section_name.strip(), None
self.process_section(name, classifier, section_body.items())
@classmethod
def _absolute_path(cls, filename, source):
file_path = Path(filename)
if not file_path.is_absolute():
if source is None or source.root is None:
raise ValueError('{} path should be absolute: {}'
.format(cls.__name__, file_path))
file_path = source.root / file_path
return file_path
@property
def location(self):
return str(self.filename.resolve()), None, None
@property
def root(self):
return self.filename.parent.resolve()
def process_section(self, section_name, classifier, items):
raise NotImplementedError
class Bool(AttributeType):
"""Expresses a binary choice"""
@classmethod
def check_type(cls, value):
return isinstance(value, bool)
@classmethod
def from_tokens(cls, tokens, source):
string = next(tokens).string
lower_string = string.lower()
if lower_string not in ('true', 'false'):
raise ValueError("'{}' is not a valid {}. Must be one of 'true' "
"or 'false'".format(string, cls.__name__))
return lower_string == 'true'
@classmethod
def doc_repr(cls, value):
return '``{}``'.format(str(value).lower())
@classmethod
def doc_format(cls):
return '``true`` or ``false``'
class Integer(AttributeType):
"""Accepts natural numbers"""
@classmethod
def check_type(cls, value):
return isinstance(value, int)
@classmethod
def from_tokens(cls, tokens, source):
token = next(tokens)
sign = 1
if token.exact_type in (MINUS, PLUS):
sign = 1 if token.exact_type == PLUS else -1
token = next(tokens)
if token.type != NUMBER:
raise ParseError('Expecting a number')
try:
value = int(token.string)
except ValueError:
raise ParseError('Expecting an integer')
return sign * value
@classmethod
def doc_format(cls):
return 'a natural number (positive integer)'
class TokenIterator(PeekIterator):
"""Tokenizes `string` and iterates over the tokens"""
def __init__(self, string):
self.string = string
tokens = generate_tokens(StringIO(string).readline)
super().__init__(tokens)
def _advance(self):
result = super()._advance()
if self.next and self.next.type == NEWLINE and self.next.string == '':
super()._advance()
return result
class ParseError(Exception):
pass
# variables
class Var(object):
def __init__(self, name):
super().__init__()
self.name = name
def __repr__(self):
return "{}('{}')".format(type(self).__name__, self.name)
def __str__(self):
return '$({})'.format(self.name)
def __eq__(self, other):
return self.name == other.name
class VariableNotDefined(Exception):
pass
|
Browse art created by Brian Kalata.
Here Comes The Sun. Good Morning La! |
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import getpass
import os
import string
import sys
from catkin_pkg.cmake import configure_file
from catkin_pkg.cmake import get_metapackage_cmake_template_path
from catkin_pkg.package import Dependency
from catkin_pkg.package import Package
from catkin_pkg.package import PACKAGE_MANIFEST_FILENAME
from catkin_pkg.package import Person
class PackageTemplate(Package):
def __init__(self, catkin_deps=None, system_deps=None, boost_comps=None, **kwargs):
super(PackageTemplate, self).__init__(**kwargs)
self.catkin_deps = catkin_deps or []
self.system_deps = system_deps or []
self.boost_comps = boost_comps or []
self.validate()
@staticmethod
def _create_package_template(package_name, description=None, licenses=None,
maintainer_names=None, author_names=None,
version=None, catkin_deps=None, system_deps=None,
boost_comps=None):
"""
alternative factory method mapping CLI args to argument for
Package class
:param package_name:
:param description:
:param licenses:
:param maintainer_names:
:param authors:
:param version:
:param catkin_deps:
"""
# Sort so they are alphebetical
licenses = list(licenses or ["TODO"])
licenses.sort()
if not maintainer_names:
maintainer_names = [getpass.getuser()]
maintainer_names = list(maintainer_names or [])
maintainer_names.sort()
maintainers = []
for maintainer_name in maintainer_names:
maintainers.append(
Person(maintainer_name,
'%s@todo.todo' % maintainer_name.split()[-1])
)
author_names = list(author_names or [])
author_names.sort()
authors = []
for author_name in author_names:
authors.append(Person(author_name))
catkin_deps = list(catkin_deps or [])
catkin_deps.sort()
pkg_catkin_deps = []
build_depends = []
run_depends = []
buildtool_depends = [Dependency('catkin')]
for dep in catkin_deps:
if dep.lower() == 'catkin':
catkin_deps.remove(dep)
continue
if dep.lower() == 'genmsg':
sys.stderr.write('WARNING: Packages with messages or services should not depend on genmsg, but on message_generation and message_runtime\n')
buildtool_depends.append(Dependency('genmsg'))
continue
if dep.lower() == 'message_generation':
if not 'message_runtime' in catkin_deps:
sys.stderr.write('WARNING: Packages with messages or services should depend on both message_generation and message_runtime\n')
build_depends.append(Dependency('message_generation'))
continue
if dep.lower() == 'message_runtime':
if not 'message_generation' in catkin_deps:
sys.stderr.write('WARNING: Packages with messages or services should depend on both message_generation and message_runtime\n')
run_depends.append(Dependency('message_runtime'))
continue
pkg_catkin_deps.append(Dependency(dep))
for dep in pkg_catkin_deps:
build_depends.append(dep)
run_depends.append(dep)
if boost_comps:
if not system_deps:
system_deps = ['boost']
elif not 'boost' in system_deps:
system_deps.append('boost')
for dep in system_deps or []:
if not dep.lower().startswith('python-'):
build_depends.append(Dependency(dep))
run_depends.append(Dependency(dep))
package_temp = PackageTemplate(
name=package_name,
version=version or '0.0.0',
description=description or 'The %s package' % package_name,
buildtool_depends=buildtool_depends,
build_depends=build_depends,
run_depends=run_depends,
catkin_deps=catkin_deps,
system_deps=system_deps,
boost_comps=boost_comps,
licenses=licenses,
authors=authors,
maintainers=maintainers,
urls=[])
return package_temp
def read_template_file(filename, rosdistro):
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
templates = []
templates.append(os.path.join(template_dir, rosdistro, '%s.in' % filename))
templates.append(os.path.join(template_dir, '%s.in' % filename))
for template in templates:
if os.path.isfile(template):
with open(template, 'r') as fhand:
template_contents = fhand.read()
return template_contents
raise IOError(
"Could not read template for ROS distro "
"'{}' at '{}': ".format(rosdistro, ', '.join(templates)) +
"no such file or directory"
)
def _safe_write_files(newfiles, target_dir):
"""
writes file contents to target_dir/filepath for all entries of newfiles.
Aborts early if files exist in places for new files or directories
:param newfiles: a dict {filepath: contents}
:param target_dir: a string
"""
# first check no filename conflict exists
for filename in newfiles:
target_file = os.path.join(target_dir, filename)
if os.path.exists(target_file):
raise ValueError('File exists: %s' % target_file)
dirname = os.path.dirname(target_file)
while(dirname != target_dir):
if os.path.isfile(dirname):
raise ValueError('Cannot create directory, file exists: %s' %
dirname)
dirname = os.path.dirname(dirname)
for filename, content in newfiles.items():
target_file = os.path.join(target_dir, filename)
dirname = os.path.dirname(target_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
# print(target_file, content)
with open(target_file, 'ab') as fhand:
fhand.write(content.encode())
print('Created file %s' % os.path.relpath(target_file, os.path.dirname(target_dir)))
def create_package_files(target_path, package_template, rosdistro,
newfiles=None, meta=False):
"""
creates several files from templates to start a new package.
:param target_path: parent folder where to create the package
:param package_template: contains the required information
:param rosdistro: name of the distro to look up respective template
:param newfiles: dict {filepath: contents} for additional files to write
"""
if newfiles is None:
newfiles = {}
# allow to replace default templates when path string is equal
manifest_path = os.path.join(target_path, PACKAGE_MANIFEST_FILENAME)
if manifest_path not in newfiles:
newfiles[manifest_path] = \
create_package_xml(package_template, rosdistro, meta=meta)
cmake_path = os.path.join(target_path, 'CMakeLists.txt')
if not cmake_path in newfiles:
newfiles[cmake_path] = create_cmakelists(package_template, rosdistro, meta=meta)
_safe_write_files(newfiles, target_path)
if 'roscpp' in package_template.catkin_deps:
fname = os.path.join(target_path, 'include', package_template.name)
os.makedirs(fname)
print('Created folder %s' % os.path.relpath(fname, os.path.dirname(target_path)))
if 'roscpp' in package_template.catkin_deps or \
'rospy' in package_template.catkin_deps:
fname = os.path.join(target_path, 'src')
os.makedirs(fname)
print('Created folder %s' % os.path.relpath(fname, os.path.dirname(target_path)))
class CatkinTemplate(string.Template):
"""subclass to use @ instead of $ as markers"""
delimiter = '@'
escape = '@'
def create_cmakelists(package_template, rosdistro, meta=False):
"""
:param package_template: contains the required information
:returns: file contents as string
"""
if meta:
template_path = get_metapackage_cmake_template_path()
temp_dict = {'name': package_template.name,
'metapackage_arguments': ''
}
return configure_file(template_path, temp_dict)
else:
cmakelists_txt_template = read_template_file('CMakeLists.txt', rosdistro)
ctemp = CatkinTemplate(cmakelists_txt_template)
if package_template.catkin_deps == []:
components = ''
else:
components = ' COMPONENTS\n %s\n' % '\n '.join(package_template.catkin_deps)
boost_find_package = \
('' if not package_template.boost_comps
else ('find_package(Boost REQUIRED COMPONENTS %s)\n' %
' '.join(package_template.boost_comps)))
system_find_package = ''
for sysdep in package_template.system_deps:
if sysdep == 'boost':
continue
if sysdep.startswith('python-'):
system_find_package += '# '
system_find_package += 'find_package(%s REQUIRED)\n' % sysdep
# provide dummy values
catkin_depends = (' '.join(package_template.catkin_deps)
if package_template.catkin_deps
else 'other_catkin_pkg')
system_depends = (' '.join(package_template.system_deps)
if package_template.system_deps
else 'system_lib')
message_pkgs = [pkg for pkg in package_template.catkin_deps if pkg.endswith('_msgs')]
if message_pkgs:
message_depends = '# %s' % '# '.join(message_pkgs)
else:
message_depends = '# std_msgs # Or other packages containing msgs'
temp_dict = {'name': package_template.name,
'components': components,
'include_directories': _create_include_macro(package_template),
'boost_find': boost_find_package,
'systems_find': system_find_package,
'catkin_depends': catkin_depends,
'system_depends': system_depends,
'target_libraries': _create_targetlib_args(package_template),
'message_dependencies': message_depends
}
return ctemp.substitute(temp_dict)
def _create_targetlib_args(package_template):
result = '# ${catkin_LIBRARIES}\n'
if package_template.boost_comps:
result += '# ${Boost_LIBRARIES}\n'
if package_template.system_deps:
result += (''.join(['# ${%s_LIBRARIES}\n' %
sdep for sdep in package_template.system_deps]))
return result
def _create_include_macro(package_template):
result = '# include_directories(include)'
includes = []
if package_template.catkin_deps:
includes.append('${catkin_INCLUDE_DIRS}')
if package_template.boost_comps:
includes.append('${Boost_INCLUDE_DIRS}')
if package_template.system_deps:
deplist = []
for sysdep in package_template.system_deps:
if not sysdep.startswith('python-'):
deplist.append(sysdep)
includes.append('${%s_INCLUDE_DIRS}' % sysdep)
if deplist:
result += '\n# TODO: Check names of system library include directories (%s)' % ', '.join(deplist)
if includes:
result += '\ninclude_directories(\n %s\n)' % '\n '.join(includes)
return result
def _create_depend_tag(dep_type,
name,
version_eq=None,
version_lt=None,
version_lte=None,
version_gt=None,
version_gte=None):
"""
Helper to create xml snippet for package.xml
"""
version_string = []
for key, var in {'version_eq': version_eq,
'version_lt': version_lt,
'version_lte': version_lte,
'version_gt': version_gt,
'version_gte': version_gte}.items():
if var is not None:
version_string.append(' %s="%s"' % (key, var))
result = ' <%s%s>%s</%s>\n' % (dep_type,
''.join(version_string),
name,
dep_type)
return result
def create_package_xml(package_template, rosdistro, meta=False):
"""
:param package_template: contains the required information
:returns: file contents as string
"""
package_xml_template = \
read_template_file(PACKAGE_MANIFEST_FILENAME, rosdistro)
ctemp = CatkinTemplate(package_xml_template)
temp_dict = {}
for key in package_template.__slots__:
temp_dict[key] = getattr(package_template, key)
if package_template.version_abi:
temp_dict['version_abi'] = ' abi="%s"' % package_template.version_abi
else:
temp_dict['version_abi'] = ''
if not package_template.description:
temp_dict['description'] = 'The %s package ...' % package_template.name
licenses = []
for plicense in package_template.licenses:
licenses.append(' <license>%s</license>\n' % plicense)
temp_dict['licenses'] = ''.join(licenses)
def get_person_tag(tagname, person):
email_string = (
"" if person.email is None else 'email="%s"' % person.email
)
return ' <%s %s>%s</%s>\n' % (tagname, email_string,
person.name, tagname)
maintainers = []
for maintainer in package_template.maintainers:
maintainers.append(get_person_tag('maintainer', maintainer))
temp_dict['maintainers'] = ''.join(maintainers)
urls = []
for url in package_template.urls:
type_string = ("" if url.type is None
else 'type="%s"' % url.type)
urls.append(' <url %s >%s</url>\n' % (type_string, url.url))
temp_dict['urls'] = ''.join(urls)
authors = []
for author in package_template.authors:
authors.append(get_person_tag('author', author))
temp_dict['authors'] = ''.join(authors)
dependencies = []
dep_map = {
'build_depend': package_template.build_depends,
'buildtool_depend': package_template.buildtool_depends,
'run_depend': package_template.run_depends,
'test_depend': package_template.test_depends,
'conflict': package_template.conflicts,
'replace': package_template.replaces
}
for dep_type in ['buildtool_depend', 'build_depend', 'run_depend',
'test_depend', 'conflict', 'replace']:
for dep in sorted(dep_map[dep_type], key=lambda x: x.name):
if 'depend' in dep_type:
dep_tag = _create_depend_tag(
dep_type,
dep.name,
dep.version_eq,
dep.version_lt,
dep.version_lte,
dep.version_gt,
dep.version_gte
)
dependencies.append(dep_tag)
else:
dependencies.append(_create_depend_tag(dep_type,
dep.name))
temp_dict['dependencies'] = ''.join(dependencies)
exports = []
if package_template.exports is not None:
for export in package_template.exports:
if export.content is not None:
print('WARNING: Create package does not know how to '
'serialize exports with content: '
'%s, %s, ' % (export.tagname, export.attributes) +
'%s' % (export.content),
file=sys.stderr)
else:
attribs = [' %s="%s"' % (k, v) for (k, v) in export.attributes.items()]
line = ' <%s%s/>\n' % (export.tagname, ''.join(attribs))
exports.append(line)
if meta:
exports.append(' <metapackage/>')
temp_dict['exports'] = ''.join(exports)
temp_dict['components'] = package_template.catkin_deps
return ctemp.substitute(temp_dict)
|
Free shipping for all orders over $45, and flat rate shipping for all other orders. Our flat rate rates are $5.00 for orders within the US and $10.00 for orders going to Canada.
If you live in the area (near Bloomfield, NY) and would like to save on shipping you're welcome to stop by and pick your products up at EMP Antiques, the antiques store in front of our studio. Simply enter coupon code Pickup at checkout. Keep an eye out for an email alerting you when your order is ready to be picked up. EMP Antiques is closed on Tuesday, but open every other weekday from 10-4, and weekends 10-5.
We are currently only shipping to the US and Canada. If you live in a different country and are interested in buying our products, please contact us directly at chicoryfarm@gmail.com to find out what shipping costs will be.
Items are not eligible for returns or refunds. If a product is damaged in the mail, or if it does not meet your expectations, please send a photo and we will replace the item. |
# -*- coding: utf-8 -*-
# Law-to-Code -- Extract formulas & parameters from laws
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2013 OpenFisca Team
# https://github.com/openfisca/LawToCode
#
# This file is part of Law-to-Code.
#
# Law-to-Code is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Law-to-Code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Controllers for sessions"""
import collections
import logging
import webob
from .. import contexts, conv, model, paginations, templates, urls, wsgihelpers
log = logging.getLogger(__name__)
@wsgihelpers.wsgify
def admin_delete(req):
ctx = contexts.Ctx(req)
session = ctx.node
if not model.is_admin(ctx):
return wsgihelpers.forbidden(ctx,
explanation = ctx._("Deletion forbidden"),
message = ctx._("You can not delete a session."),
title = ctx._('Operation denied'),
)
if req.method == 'POST':
session.delete(ctx, safe = True)
return wsgihelpers.redirect(ctx, location = model.Session.get_admin_class_url(ctx))
return templates.render(ctx, '/sessions/admin-delete.mako', session = session)
@wsgihelpers.wsgify
def admin_index(req):
ctx = contexts.Ctx(req)
model.is_admin(ctx, check = True)
assert req.method == 'GET'
page_number, error = conv.pipe(
conv.input_to_int,
conv.test_greater_or_equal(1),
conv.default(1),
)(req.params.get('page'), state = ctx)
if error is not None:
return wsgihelpers.not_found(ctx, explanation = ctx._('Page number error: {}').format(error))
cursor = model.Session.find(as_class = collections.OrderedDict)
pager = paginations.Pager(item_count = cursor.count(), page_number = page_number)
sessions = cursor.skip(pager.first_item_index or 0).limit(pager.page_size)
return templates.render(ctx, '/sessions/admin-index.mako', sessions = sessions, pager = pager)
@wsgihelpers.wsgify
def admin_view(req):
ctx = contexts.Ctx(req)
session = ctx.node
model.is_admin(ctx, check = True)
return templates.render(ctx, '/sessions/admin-view.mako', session = session)
def route_admin(environ, start_response):
req = webob.Request(environ)
ctx = contexts.Ctx(req)
session, error = conv.pipe(
conv.input_to_uuid,
conv.not_none,
model.Session.uuid_to_instance,
)(req.urlvars.get('token'), state = ctx)
if error is not None:
return wsgihelpers.not_found(ctx, explanation = ctx._('Session Error: {}').format(error))(
environ, start_response)
ctx.node = session
router = urls.make_router(
('GET', '^/?$', admin_view),
(('GET', 'POST'), '^/delete/?$', admin_delete),
)
return router(environ, start_response)
def route_admin_class(environ, start_response):
router = urls.make_router(
('GET', '^/?$', admin_index),
(None, '^/(?P<token>[^/]+)(?=/|$)', route_admin),
)
return router(environ, start_response)
|
Free Green Apples Wallpaper makes a great desktop wallpaper image or screensaver photo for your desktop, iPhone, or Android cell phone or tablet. Our high resolution 4k HD quality picture will make your desktop or mobile cell phone beautiful. Keywords: computer, delicious, photo, image, mobile, yummy, background, high resolution, desktop, hd, food, wallpaper, free, nutrition. |
class ReflectionBlockIntegratorStills:
"""A class to perform the integration."""
def __init__(self, params, experiments, reference, extractor=None):
"""Initialise the integrator."""
from dials.algorithms import shoebox
# Ensure we have 1 experiment at the moment
assert len(experiments) == 1
assert extractor is not None
# Save the parameters
self.params = params
self.experiments = experiments
self.extractor = extractor
# Create the shoebox masker
n_sigma = params.integration.shoebox.n_sigma
assert n_sigma > 0
self._mask_profiles = shoebox.MaskerEmpirical(
experiments[0], reference=reference
)
def integrate(self):
"""Integrate all the reflections."""
from dials.algorithms.shoebox import MaskCode
from dials.array_family import flex
result = flex.reflection_table()
for indices, reflections in self.extractor:
self._mask_profiles(reflections, None)
reflections.integrate(self.experiments[0])
bg_code = MaskCode.Valid | MaskCode.BackgroundUsed
fg_code = MaskCode.Valid | MaskCode.Foreground
n_bg = reflections["shoebox"].count_mask_values(bg_code)
n_fg = reflections["shoebox"].count_mask_values(fg_code)
reflections["n_background"] = n_bg
reflections["n_foreground"] = n_fg
del reflections["shoebox"]
del reflections["rs_shoebox"]
result.extend(reflections)
assert len(result) > 0
result.sort("miller_index")
return result
class IntegratorStills:
"""Integrate reflections"""
def __init__(self, params, exlist, reference=None, predicted=None, shoeboxes=None):
"""Initialise the script."""
assert reference is not None
# Load the extractor based on the input
if shoeboxes is not None:
extractor = self._load_extractor(shoeboxes, params, exlist)
else:
if predicted is None:
predicted = self._predict_reflections(params, exlist)
# predicted = self._filter_reflections(params, exlist, predicted) # FIXME
predicted = self._match_with_reference(predicted, reference)
import math
from annlib_ext import AnnAdaptor
from dials.array_family import flex
matcheddata = predicted.select(
predicted.get_flags(predicted.flags.reference_spot)
)
A = AnnAdaptor(matcheddata["xyzcal.mm"].as_double(), 3, 10)
A.query(predicted["xyzcal.mm"].as_double())
bboxes = flex.int6()
for i, ref in enumerate(predicted):
nn_pred = [matcheddata[A.nn[i * 10 + j]] for j in range(10)]
nn_ref = [
reference[reference["miller_index"].first_index(r["miller_index"])]
for r in nn_pred
]
max_x = max([r["bbox"][1] - r["bbox"][0] for r in nn_ref])
max_y = max([r["bbox"][3] - r["bbox"][2] for r in nn_ref])
panel = exlist[ref["id"]].detector[ref["panel"]]
imgsize_x, imgsize_y = panel.get_image_size()
x1 = int(math.floor(ref["xyzcal.px"][0] - (max_x / 2)))
x2 = int(math.ceil(ref["xyzcal.px"][0] + (max_x / 2)))
y1 = int(math.floor(ref["xyzcal.px"][1] - (max_y / 2)))
y2 = int(math.ceil(ref["xyzcal.px"][1] + (max_y / 2)))
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 > imgsize_x:
x2 = imgsize_x
if y2 > imgsize_y:
y2 = imgsize_y
bboxes.append((x1, x2, y1, y2, 0, 1))
predicted["bbox"] = bboxes
extractor = self._create_extractor(params, exlist, predicted)
# Initialise the integrator
self._integrator = ReflectionBlockIntegratorStills(
params, exlist, reference, extractor
)
def integrate(self):
"""Integrate the reflections."""
return self._integrator.integrate()
def _match_with_reference(self, predicted, reference):
"""Match predictions with reference spots."""
from dials.algorithms.spot_finding.spot_matcher import SpotMatcher
from dials.util.command_line import Command
Command.start("Matching reference spots with predicted reflections")
match = SpotMatcher(max_separation=1)
rind, pind = match(reference, predicted)
h1 = predicted.select(pind)["miller_index"]
h2 = reference.select(rind)["miller_index"]
mask = h1 == h2
predicted.set_flags(pind.select(mask), predicted.flags.reference_spot)
Command.end(
"Matched %d reference spots with predicted reflections" % mask.count(True)
)
return predicted
def _load_extractor(self, filename, params, exlist):
"""Load the shoebox extractor."""
from dials.model.serialize.reflection_block import ReflectionBlockExtractor
assert len(exlist) == 1
imageset = exlist[0].imageset
return ReflectionBlockExtractor(
filename, params.integration.shoebox.block_size, imageset
)
def _create_extractor(self, params, exlist, predicted):
"""Create the extractor."""
from dials.model.serialize.reflection_block import ReflectionBlockExtractor
assert len(exlist) == 1
imageset = exlist[0].imageset
return ReflectionBlockExtractor(
"shoebox.dat", params.integration.shoebox.block_size, imageset, predicted
)
def _predict_reflections(self, params, experiments):
"""Predict all the reflections."""
from dials.array_family import flex
result = flex.reflection_table()
for i, experiment in enumerate(experiments):
predicted = flex.reflection_table.from_predictions(experiment)
predicted["id"] = flex.int(len(predicted), i)
result.extend(predicted)
return result
def _filter_reflections(self, params, experiments, reflections):
"""Filter the reflections to integrate."""
from dials.algorithms import filtering
from dials.array_family import flex
from dials.util.command_line import Command
# Set all reflections which overlap bad pixels to zero
Command.start("Filtering reflections by detector mask")
if experiments[0].scan is None:
array_range = 1
else:
array_range = experiments[0].scan.get_array_range()
mask = filtering.by_detector_mask(
reflections["bbox"],
experiments[0].imageset.get_raw_data(0)[0] >= 0,
array_range,
)
reflections.del_selected(not mask)
Command.end(f"Filtered {len(reflections)} reflections by detector mask")
# Filter the reflections by zeta
min_zeta = params.integration.filter.by_zeta
if min_zeta > 0:
Command.start(f"Filtering reflections by zeta >= {min_zeta:f}")
zeta = reflections.compute_zeta(experiments[0])
reflections.del_selected(flex.abs(zeta) < min_zeta)
n = len(reflections)
Command.end("Filtered %d reflections by zeta >= %f" % (n, min_zeta))
return reflections
|
Ecards are the best way to say thank you to father. It is really effective way to honor dads who supported you from your childhood. If you are looking for Ecard, then you are at the right place. Most of people use e-cards for their far away dads either living in the different places all around the world. If you are tight on the cash, then You should not be spending money buying cards and spending money to mail it in overnight mail in order to get it by tomorrow.
You can find e-card and send it right away to your father. Some websites are offering automatic notification if you father reads it. With customize cards; you can add children color to make it extra special. You can also add special quote that you can think will bring smile on your cards. Here are some really good websites, 123greetings, heartgreetings, bluemountain, verse4cards. |
# django-salesforce
#
# by Phil Christensen
# (c) 2012-2013 Freelancers Union (http://www.freelancersunion.org)
# See LICENSE.md for details
#
"""
Salesforce object query and queryset customizations.
"""
# TODO hynekcer: class CursorWrapper and function handle_api_exceptions should
# be moved to salesforce.backend.driver at the next big refactoring
# (Evenso some low level internals of salesforce.auth should be moved to
# salesforce.backend.driver.Connection)
import logging, types, datetime, decimal
from django.conf import settings
from django.core.serializers import python
from django.core.exceptions import ImproperlyConfigured
from django.db import connections
from django.db.models import query, Count
from django.db.models.sql import Query, RawQuery, constants, subqueries
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.query_utils import deferred_class_factory
from django.utils.six import PY3
from itertools import islice
import requests
import pytz
from salesforce import auth, models, DJANGO_16_PLUS, DJANGO_17_PLUS, DJANGO_18_PLUS
from salesforce import DJANGO_184_PLUS
from salesforce.backend.compiler import SQLCompiler
from salesforce.fields import NOT_UPDATEABLE, NOT_CREATEABLE, SF_PK
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
try:
import json
except ImportError:
import simplejson as json
log = logging.getLogger(__name__)
API_STUB = '/services/data/v34.0'
# Values of seconds are with 3 decimal places in SF, but they are rounded to
# whole seconds for the most of fields.
SALESFORCE_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f+0000'
DJANGO_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S.%f-00:00'
request_count = 0
def quoted_string_literal(s, d):
"""
SOQL requires single quotes to be escaped.
http://www.salesforce.com/us/developer/docs/soql_sosl/Content/sforce_api_calls_soql_select_quotedstringescapes.htm
"""
try:
return "'%s'" % (s.replace("\\", "\\\\").replace("'", "\\'"),)
except TypeError as e:
raise NotImplementedError("Cannot quote %r objects: %r" % (type(s), s))
def process_args(args):
"""
Perform necessary quoting on the arg list.
"""
def _escape(item, conv):
if(isinstance(item, models.SalesforceModel)):
return conv.get(models.SalesforceModel, conv[str])(item, conv)
if(isinstance(item, decimal.Decimal)):
return conv.get(decimal.Decimal, conv[str])(item, conv)
return conv.get(type(item), conv[str])(item, conv)
return tuple([_escape(x, sql_conversions) for x in args])
def process_json_args(args):
"""
Perform necessary JSON quoting on the arg list.
"""
def _escape(item, conv):
if(isinstance(item, models.SalesforceModel)):
return conv.get(models.SalesforceModel, conv[str])(item, conv)
if(isinstance(item, decimal.Decimal)):
return conv.get(decimal.Decimal, conv[str])(item, conv)
return conv.get(type(item), conv[str])(item, conv)
return tuple([_escape(x, json_conversions) for x in args])
def handle_api_exceptions(url, f, *args, **kwargs):
"""Call REST API and handle exceptions
Params:
f: requests.get or requests.post...
_cursor: sharing the debug information in cursor
"""
global request_count
from salesforce.backend import base
# The 'verify' option is about verifying SSL certificates
kwargs_in = {'timeout': getattr(settings, 'SALESFORCE_QUERY_TIMEOUT', 3),
'verify': True}
kwargs_in.update(kwargs)
_cursor = kwargs_in.pop('_cursor', None)
log.debug('Request API URL: %s' % url)
request_count += 1
try:
response = f(url, *args, **kwargs_in)
# TODO some timeouts can be rarely raised as "SSLError: The read operation timed out"
except requests.exceptions.Timeout:
raise base.SalesforceError("Timeout, URL=%s" % url)
if response.status_code == 401:
# Unauthorized (expired or invalid session ID or OAuth)
data = response.json()[0]
if(data['errorCode'] == 'INVALID_SESSION_ID'):
token = db_alias=f.__self__.auth.reauthenticate()
if('headers' in kwargs):
kwargs['headers'].update(dict(Authorization='OAuth %s' % token))
try:
response = f(url, *args, **kwargs_in)
except requests.exceptions.Timeout:
raise base.SalesforceError("Timeout, URL=%s" % url)
if response.status_code in (200, 201, 204):
return response
# TODO Remove this verbose setting after tuning of specific messages.
# Currently it is better more or less.
# http://www.salesforce.com/us/developer/docs/api_rest/Content/errorcodes.htm
verbose = not getattr(getattr(_cursor, 'query', None), 'debug_silent', False)
# Errors are reported in the body
data = response.json()[0]
if response.status_code == 404: # ResourceNotFound
if (f.__func__.__name__ == 'delete') and data['errorCode'] in (
'ENTITY_IS_DELETED', 'INVALID_CROSS_REFERENCE_KEY'):
# It is a delete command and the object is in trash bin or
# completely deleted or it only could be a valid Id for this type
# then is ignored similarly to delete by a classic database query:
# DELETE FROM xy WHERE id = 'something_deleted_yet'
return None
else:
# if this Id can not be ever valid.
raise base.SalesforceError("Couldn't connect to API (404): %s, URL=%s"
% (response.text, url), data, response, verbose)
if(data['errorCode'] == 'INVALID_FIELD'):
raise base.SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'MALFORMED_QUERY'):
raise base.SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'INVALID_FIELD_FOR_INSERT_UPDATE'):
raise base.SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'METHOD_NOT_ALLOWED'):
raise base.SalesforceError('%s: %s' % (url, data['message']), data, response, verbose)
# some kind of failed query
else:
raise base.SalesforceError('%s' % data, data, response, verbose)
def prep_for_deserialize(model, record, using, init_list=None):
"""
Convert a record from SFDC (decoded JSON) to dict(model string, pk, fields)
If fixes fields of some types. If names of required fields `init_list `are
specified, then only these fields are processed.
"""
from salesforce.backend import base
# TODO the parameter 'using' is not currently important.
attribs = record.pop('attributes')
mod = model.__module__.split('.')
if(mod[-1] == 'models'):
app_label = mod[-2]
elif(hasattr(model._meta, 'app_label')):
app_label = getattr(model._meta, 'app_label')
else:
raise ImproperlyConfigured("Can't discover the app_label for %s, you must specify it via model meta options.")
if len(record.keys()) == 1 and model._meta.db_table in record:
while len(record) == 1:
record = list(record.values())[0]
fields = dict()
for x in model._meta.fields:
if not x.primary_key and (not init_list or x.name in init_list):
if x.column.endswith('.Type'):
# Type of generic foreign key
simple_column, _ = x.column.split('.')
fields[x.name] = record[simple_column]['Type']
else:
# Normal fields
field_val = record[x.column]
#db_type = x.db_type(connection=connections[using])
if(x.__class__.__name__ == 'DateTimeField' and field_val is not None):
d = datetime.datetime.strptime(field_val, SALESFORCE_DATETIME_FORMAT)
import pytz
d = d.replace(tzinfo=pytz.utc)
if settings.USE_TZ:
fields[x.name] = d.strftime(DJANGO_DATETIME_FORMAT)
else:
tz = pytz.timezone(settings.TIME_ZONE)
d = tz.normalize(d.astimezone(tz))
fields[x.name] = d.strftime(DJANGO_DATETIME_FORMAT[:-6])
else:
fields[x.name] = field_val
if init_list and set(init_list).difference(fields).difference([SF_PK]):
raise base.DatabaseError("Not found some expected fields")
return dict(
model = '.'.join([app_label, model.__name__]),
pk = record.pop('Id'),
fields = fields,
)
def extract_values(query):
"""
Extract values from insert or update query.
"""
d = dict()
fields = query.model._meta.fields
for index in range(len(fields)):
field = fields[index]
if (field.get_internal_type() == 'AutoField' or
isinstance(query, subqueries.UpdateQuery) and (getattr(field, 'sf_read_only', 0) & NOT_UPDATEABLE) != 0 or
isinstance(query, subqueries.InsertQuery) and (getattr(field, 'sf_read_only', 0) & NOT_CREATEABLE) != 0):
continue
if(isinstance(query, subqueries.UpdateQuery)):
value_or_empty = [value for qfield, model, value in query.values if qfield.name == field.name]
if value_or_empty:
[value] = value_or_empty
else:
assert len(query.values) < len(fields), \
"Match name can miss only with an 'update_fields' argument."
continue
else: # insert
# TODO bulk insert
assert len(query.objs) == 1, "bulk_create is not supported by Salesforce REST API"
value = getattr(query.objs[0], field.attname)
# The 'DEFAULT' is a backward compatibility name.
if isinstance(field, (models.ForeignKey, models.BooleanField)) and value in ('DEFAULT', 'DEFAULTED_ON_CREATE'):
continue
if isinstance(value, models.DefaultedOnCreate):
continue
[arg] = process_json_args([value])
d[field.column] = arg
return d
class SalesforceRawQuerySet(query.RawQuerySet):
def __len__(self):
if self.query.cursor is None:
# force the query
self.query.get_columns()
return self.query.cursor.rowcount
class SalesforceQuerySet(query.QuerySet):
"""
Use a custom SQL compiler to generate SOQL-compliant queries.
"""
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
remote web service.
"""
try:
sql, params = SQLCompiler(self.query, connections[self.db], None).as_sql()
except EmptyResultSet:
raise StopIteration
cursor = CursorWrapper(connections[self.db], self.query)
cursor.execute(sql, params)
pfd = prep_for_deserialize
only_load = self.query.get_loaded_field_names()
load_fields = []
# If only/defer clauses have been specified,
# build the list of fields that are to be loaded.
if not only_load:
model_cls = self.model
init_list = None
else:
if DJANGO_16_PLUS:
fields = self.model._meta.concrete_fields
fields_with_model = self.model._meta.get_concrete_fields_with_model()
else:
fields = self.model._meta.fields
fields_with_model = self.model._meta.get_fields_with_model()
for field, model in fields_with_model:
if model is None:
model = self.model
try:
selected_name = field.attname if DJANGO_18_PLUS else field.name
if selected_name in only_load[model]:
# Add a field that has been explicitly included
load_fields.append(field.name)
except KeyError:
# Model wasn't explicitly listed in the only_load table
# Therefore, we need to load all fields from this model
load_fields.append(field.name)
init_list = []
skip = set()
for field in fields:
if field.name not in load_fields:
skip.add(field.attname)
else:
init_list.append(field.name)
model_cls = deferred_class_factory(self.model, skip)
field_names = self.query.get_loaded_field_names()
for res in python.Deserializer(pfd(model_cls, r, self.db, init_list) for r in cursor.results):
# Store the source database of the object
res.object._state.db = self.db
# This object came from the database; it's not being added.
res.object._state.adding = False
yield res.object
def query_all(self):
"""
Allows querying for also deleted or merged records.
Lead.objects.query_all().filter(IsDeleted=True,...)
https://www.salesforce.com/us/developer/docs/api_rest/Content/resources_queryall.htm
"""
obj = self._clone(klass=SalesforceQuerySet)
obj.query.set_query_all()
return obj
class SalesforceRawQuery(RawQuery):
def clone(self, using):
return SalesforceRawQuery(self.sql, using, params=self.params)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.table_name_converter
if self.cursor.rowcount > 0:
return [converter(col) for col in self.cursor.first_row.keys() if col != 'attributes']
# TODO hy: A more general fix is desirable with rewriting more code.
# This is changed due to Django 1.8.4+ https://github.com/django/django/pull/5036
# related to https://code.djangoproject.com/ticket/12768
return ['Id'] if DJANGO_184_PLUS else [SF_PK]
def _execute_query(self):
self.cursor = CursorWrapper(connections[self.using], self)
self.cursor.execute(self.sql, self.params)
def __repr__(self):
return "<SalesforceRawQuery: %s; %r>" % (self.sql, tuple(self.params))
class SalesforceQuery(Query):
"""
Override aggregates.
"""
# Warn against name collision: The name 'aggregates' is the name of
# a new property introduced by Django 1.7 to the parent class
# 'django.db.models.sql.query.Query'.
# 'aggregates_module' is overriden here, to be visible in the base class.
from salesforce.backend import aggregates as aggregates_module
def __init__(self, *args, **kwargs):
super(SalesforceQuery, self).__init__(*args, **kwargs)
self.is_query_all = False
self.first_chunk_len = None
self.max_depth = 1
def clone(self, klass=None, memo=None, **kwargs):
query = Query.clone(self, klass, memo, **kwargs)
query.is_query_all = self.is_query_all
return query
def has_results(self, using):
q = self.clone()
compiler = q.get_compiler(using=using)
return bool(compiler.execute_sql(constants.SINGLE))
def set_query_all(self):
self.is_query_all = True
if DJANGO_18_PLUS:
def get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count('pk'), alias='x_sf_count', is_summary=True)
number = obj.get_aggregation(using, ['x_sf_count'])['x_sf_count']
if number is None:
number = 0
return number
class CursorWrapper(object):
"""
A wrapper that emulates the behavior of a database cursor.
This is the class that is actually responsible for making connections
to the SF REST API
"""
def __init__(self, db, query=None):
"""
Connect to the Salesforce API.
"""
self.db = db
self.query = query
self.session = db.sf_session
# A consistent value of empty self.results after execute will be `iter([])`
self.results = None
self.rowcount = None
self.first_row = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
@property
def oauth(self):
return self.session.auth.authenticate()
def execute(self, q, args=()):
"""
Send a query to the Salesforce API.
"""
from salesforce.backend import base
self.rowcount = None
if isinstance(self.query, SalesforceQuery) or self.query is None:
response = self.execute_select(q, args)
elif isinstance(self.query, SalesforceRawQuery):
response = self.execute_select(q, args)
elif isinstance(self.query, subqueries.InsertQuery):
response = self.execute_insert(self.query)
elif isinstance(self.query, subqueries.UpdateQuery):
response = self.execute_update(self.query)
elif isinstance(self.query, subqueries.DeleteQuery):
response = self.execute_delete(self.query)
else:
raise base.DatabaseError("Unsupported query: type %s: %s" % (type(self.query), self.query))
# the encoding is detected automatically, e.g. from headers
if(response and response.text):
# parse_float set to decimal.Decimal to avoid precision errors when
# converting from the json number to a float to a Decimal object
# on a model's DecimalField...converts from json number directly
# a Decimal object
data = response.json(parse_float=decimal.Decimal)
# a SELECT query
if('totalSize' in data):
self.rowcount = data['totalSize']
# a successful INSERT query, return after getting PK
elif('success' in data and 'id' in data):
self.lastrowid = data['id']
return
# something we don't recognize
else:
raise base.DatabaseError(data)
if q.upper().startswith('SELECT COUNT() FROM'):
# COUNT() queries in SOQL are a special case, as they don't actually return rows
self.results = iter([[self.rowcount]])
else:
if self.query:
self.query.first_chunk_len = len(data['records'])
self.first_row = data['records'][0] if data['records'] else None
self.results = self.query_results(data)
else:
self.results = iter([])
def execute_select(self, q, args):
processed_sql = str(q) % process_args(args)
cmd = 'query' if not getattr(self.query, 'is_query_all', False) else 'queryAll'
url = u'{base}{api}/{cmd}?{query_str}'.format(
base=self.session.auth.instance_url, api=API_STUB, cmd=cmd,
query_str=urlencode(dict(q=processed_sql)),
)
log.debug(processed_sql)
return handle_api_exceptions(url, self.session.get, _cursor=self)
def query_more(self, nextRecordsUrl):
url = u'%s%s' % (self.session.auth.instance_url, nextRecordsUrl)
return handle_api_exceptions(url, self.session.get, _cursor=self)
def execute_insert(self, query):
table = query.model._meta.db_table
url = self.session.auth.instance_url + API_STUB + ('/sobjects/%s/' % table)
headers = {'Content-Type': 'application/json'}
post_data = extract_values(query)
log.debug('INSERT %s%s' % (table, post_data))
return handle_api_exceptions(url, self.session.post, headers=headers, data=json.dumps(post_data), _cursor=self)
def execute_update(self, query):
table = query.model._meta.db_table
# this will break in multi-row updates
if DJANGO_17_PLUS:
pk = query.where.children[0].rhs
elif DJANGO_16_PLUS:
pk = query.where.children[0][3]
else:
pk = query.where.children[0].children[0][-1]
assert pk
url = self.session.auth.instance_url + API_STUB + ('/sobjects/%s/%s' % (table, pk))
headers = {'Content-Type': 'application/json'}
post_data = extract_values(query)
log.debug('UPDATE %s(%s)%s' % (table, pk, post_data))
ret = handle_api_exceptions(url, self.session.patch, headers=headers, data=json.dumps(post_data), _cursor=self)
self.rowcount = 1
return ret
def execute_delete(self, query):
table = query.model._meta.db_table
## the root where node's children may itself have children..
def recurse_for_pk(children):
for node in children:
if hasattr(node, 'rhs'):
pk = node.rhs[0] # for Django 1.7+
else:
try:
pk = node[-1][0]
except TypeError:
pk = recurse_for_pk(node.children)
return pk
pk = recurse_for_pk(self.query.where.children)
assert pk
url = self.session.auth.instance_url + API_STUB + ('/sobjects/%s/%s' % (table, pk))
log.debug('DELETE %s(%s)' % (table, pk))
return handle_api_exceptions(url, self.session.delete, _cursor=self)
def query_results(self, results):
while True:
for rec in results['records']:
if rec['attributes']['type'] == 'AggregateResult' and hasattr(self.query, 'aggregate_select'):
assert len(rec) -1 == len(list(self.query.aggregate_select.items()))
# The 'attributes' info is unexpected for Django within fields.
rec = [rec[k] for k, _ in self.query.aggregate_select.items()]
yield rec
if results['done']:
break
# see about Retrieving the Remaining SOQL Query Results
# http://www.salesforce.com/us/developer/docs/api_rest/Content/dome_query.htm#retrieve_remaining_results_title
response = self.query_more(results['nextRecordsUrl'])
results = response.json(parse_float=decimal.Decimal)
def __iter__(self):
return iter(self.results)
def fetchone(self):
"""
Fetch a single result from a previously executed query.
"""
try:
return next(self.results)
except StopIteration:
return None
def fetchmany(self, size=None):
"""
Fetch multiple results from a previously executed query.
"""
if size is None:
size = 200
return list(islice(self.results, size))
def fetchall(self):
"""
Fetch all results from a previously executed query.
"""
return list(self.results)
def close(self): # for Django 1.7+
pass
string_literal = quoted_string_literal
def date_literal(d, c):
if not d.tzinfo:
import time
tz = pytz.timezone(settings.TIME_ZONE)
d = tz.localize(d, is_dst=time.daylight)
# Format of `%z` is "+HHMM"
tzname = datetime.datetime.strftime(d, "%z")
return datetime.datetime.strftime(d, "%Y-%m-%dT%H:%M:%S.000") + tzname
def sobj_id(obj, conv):
return obj.pk
# supported types
sql_conversions = {
int: lambda s,d: str(s),
float: lambda o,d: '%.15g' % o,
type(None): lambda s,d: 'NULL',
str: lambda o,d: string_literal(o, d), # default
bool: lambda s,d: str(s).lower(),
datetime.date: lambda d,c: datetime.date.strftime(d, "%Y-%m-%d"),
datetime.datetime: lambda d,c: date_literal(d, c),
decimal.Decimal: lambda s,d: float(s),
models.SalesforceModel: sobj_id,
}
if not PY3:
sql_conversions[long] = lambda s,d: str(s)
sql_conversions[unicode] = lambda s,d: string_literal(s.encode('utf8'), d)
# supported types
json_conversions = {
int: lambda s,d: str(s),
float: lambda o,d: '%.15g' % o,
type(None): lambda s,d: None,
str: lambda o,d: o, # default
bool: lambda s,d: str(s).lower(),
datetime.date: lambda d,c: datetime.date.strftime(d, "%Y-%m-%d"),
datetime.datetime: date_literal,
datetime.time: lambda d,c: datetime.time.strftime(d, "%H:%M:%S.%f"),
decimal.Decimal: lambda s,d: float(s),
models.SalesforceModel: sobj_id,
}
if not PY3:
json_conversions[long] = lambda s,d: str(s)
json_conversions[unicode] = lambda s,d: s.encode('utf8')
|
Strobert Tree Services is your reliable cost-effective solution for tree removal in Felton Delaware and surrounding areas. Strobert is locally owned and operated and has thousands of satisfied customers in this area. Many of your friends and neighbors have called on us when they needed a tree removed, and we stand ready to do the same for you! Removing a tree may not always be as easy as it appears.
Things can go wrong and it can fall the wrong way, damaging property and sometimes people as well. When you need a tree removed in Felton DE, call on the experts at Strobert to get the job done right the first time.
Full line of Felton DE tree services - including stump grinding and removal.
Emergency (24 hour) tree services in Felton DE.
Sometimes, it makes good sense to remove a tree from your land. The tree might have been damaged in a recent storm, or it may have become diseased and deteriorated over a long period of time. However it happened, if you have trees that are in danger of causing damage to your dwelling, outbuildings, cars, ATVs, or your loved ones, then you have nothing to lose by getting a free evaluation from one of our tree removal specialists.
When it comes to tree removal in Felton DE, every property has a unique circumstance. For this reason, our professionals will need to look everything over before deciding on the best way to remove the tree(s). This will involve selecting the best machinery and the right people to come out and perform the job in the safest and most efficient manner.
There could be circumstances when the damage is more widespread. If this is the case with your property, larger equipment might be required in order to clear a wider area of land. And if there are trees that appear to be posing an immediate risk to your home and property, we will expedite an emergency crew that can go to work ASAP and make sure any additional damage is kept to a minimum. After we have cleaned out all the trees, branches and other debris at your home in Felton DE, stump grinding and removal might also be needed.
For your FREE, no obligation Felton Delaware tree services estimate, get in touch with the friendly professionals at Strobert Tree Services today by calling 302-394-0116. You can also use our free estimate consultation form on the right. |
# Copyright 2019 Alastair Pharo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy
import os
import scipy
from numpy.testing import assert_allclose
from infsocsol.helpers import matrix
@pytest.fixture(scope="module", params=[
# engine states time_step start steps steady steady_accuracy optim_accuracy
( 'matlab', 10, 1, (100.0, 0.5), 100, True, 0.01, 0.009 ),
( 'matlab', 20, 0.5, (600.0, 0.6), 200, True, 0.01, 0.015 ),
( 'matlab', 40, 0.25, (60.0, 0.1), 300, True, 0.01, 0.018 ),
( 'matlab', 10, 1, (600.0, 1.0), 200, False, 0.001, None ),
( 'octave', 10, 1, (100.0, 0.5), 100, True, 0.001, 0.009 ),
( 'octave', 20, 0.5, (600.0, 0.6), 200, True, 0.001, 0.015 )
])
def fisheries_scenario(request):
return request.param
def test_fisheries_det_basic(engines, fisheries_scenario):
_engine, states, time_step, _start, steps, steady, steady_accuracy, optim_accuracy = fisheries_scenario
engine = engines[_engine]
start = matrix(engine, _start)
engine.cd(os.path.join(os.path.dirname(__file__), "fisheries_det_basic"))
engine.solve(float(states), float(time_step), nargout=0)
final = numpy.array(engine.sim_final(start, steps))
# This is determined by setting s\dot = 0, which solves to 1 = x/L + q/r e
steady_one = numpy.dot(final, [1/600, 5/4])
if steady:
assert_allclose(steady_one, 1, atol=steady_accuracy)
# This is the most profitable steady state -- x = L/2 + c/2pq
profit_max_steady = numpy.array([[302.5, 0.39667]])
assert_allclose(final, profit_max_steady, rtol=optim_accuracy)
else:
assert steady_one > 1 + steady_accuracy
|
I tried to feed baby this morning, but she couldn't concentrate on eating... She kept moving her eyes and then her head to the object of distraction. The fan. The lights. She is mesmerized by the lights and fan. I made sweet noises and funny faces but nothing was interesting enough to keep her focused. She wouldn't eat. We moved her to her mom's angle, so she could look at her fan and lights while eating. She ate fine after that.
In an attempt to better understand my daughter, I wanted to get a picture of what she was seeing. What was she so enthralled with? While her mom was holding her I put the camera behind her head and took a picture. |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from os.path import join
from setuptools import setup, find_packages
import pyyelp
# To get python setup.py test to work on python 2.7
try:
import multiprocessing
import logging
except ImportError:
pass
setup(
name=pyyelp.__name__,
version=pyyelp.__version__,
author=pyyelp.__author__,
author_email=pyyelp.__email__,
url='https://github.com/motte/python-yelp',
download_url = 'https://github.com/motte/python-yelp/tarball/{0}'.format(pyyelp.__version__),
description='Python wrapper for the Yelp v2 api',
long_description=open('README.md').read(),
license='ISC',
packages = [pyyelp.__name__],
keywords = ['yelp', 'wrapper', 'api'],
install_requires=map(str.strip,open(join('requirements', 'base.txt'))),
include_package_data=True,
classifiers=(
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: ISC License (ISCL)',
'Operating System :: OS Independent',
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
),
)
|
With increasing use of technology and the web among customer bases in all industry areas, websites are a key focus for businesses as part of their business and marketing strategies. With this new focus on websites and online marketing channels, it’s easy to forgot about offline marketing channels, or to give them much lower priority. Offline channels, however, can be used very effectively to support your website and online marketing.
But to increase your chances of success across all channels, you need to first go back to basics and review your branding. Does your branding accurately reflect your company and its products or services? And do you have branding guidelines that you apply across all marketing activities to ensure brand consistency? These are important concepts to consider, and getting it right can have dramatic effects on your conversion rates.
Reviewing you logo, branding and brand guidelines is an important activity to undertake at any time, but is especially important when looking to develop or refresh your company website – this is a perfect opportunity to assess your company’s brand assets and make any necessary updates – these can then be applied in the design and development of your website.
If you need any advice on assessing your company’s branding and brand consistency across marketing channels, get in touch with Rapid Web Services today on 01785 250 222. We also have a range of logo and branding packages if you feel that your company needs an update – all packages include guidelines for usage, and our Complete Branding Package will provide you will sample layouts for a range of online and offline marketing communications. |
## @file
# Collect all defined strings in multiple uni files.
#
# Copyright (c) 2014 - 2019, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
"""
Collect all defined strings in multiple uni files
"""
from __future__ import print_function
##
# Import Modules
#
import os, codecs, re
import distutils.util
from Logger import ToolError
from Logger import Log as EdkLogger
from Logger import StringTable as ST
from Library.StringUtils import GetLineNo
from Library.Misc import PathClass
from Library.Misc import GetCharIndexOutStr
from Library import DataType as DT
from Library.ParserValidate import CheckUTF16FileHeader
##
# Static definitions
#
UNICODE_WIDE_CHAR = u'\\wide'
UNICODE_NARROW_CHAR = u'\\narrow'
UNICODE_NON_BREAKING_CHAR = u'\\nbr'
UNICODE_UNICODE_CR = '\r'
UNICODE_UNICODE_LF = '\n'
NARROW_CHAR = u'\uFFF0'
WIDE_CHAR = u'\uFFF1'
NON_BREAKING_CHAR = u'\uFFF2'
CR = u'\u000D'
LF = u'\u000A'
NULL = u'\u0000'
TAB = u'\t'
BACK_SPLASH = u'\\'
gLANG_CONV_TABLE = {'eng':'en', 'fra':'fr', \
'aar':'aa', 'abk':'ab', 'ave':'ae', 'afr':'af', 'aka':'ak', 'amh':'am', \
'arg':'an', 'ara':'ar', 'asm':'as', 'ava':'av', 'aym':'ay', 'aze':'az', \
'bak':'ba', 'bel':'be', 'bul':'bg', 'bih':'bh', 'bis':'bi', 'bam':'bm', \
'ben':'bn', 'bod':'bo', 'bre':'br', 'bos':'bs', 'cat':'ca', 'che':'ce', \
'cha':'ch', 'cos':'co', 'cre':'cr', 'ces':'cs', 'chu':'cu', 'chv':'cv', \
'cym':'cy', 'dan':'da', 'deu':'de', 'div':'dv', 'dzo':'dz', 'ewe':'ee', \
'ell':'el', 'epo':'eo', 'spa':'es', 'est':'et', 'eus':'eu', 'fas':'fa', \
'ful':'ff', 'fin':'fi', 'fij':'fj', 'fao':'fo', 'fry':'fy', 'gle':'ga', \
'gla':'gd', 'glg':'gl', 'grn':'gn', 'guj':'gu', 'glv':'gv', 'hau':'ha', \
'heb':'he', 'hin':'hi', 'hmo':'ho', 'hrv':'hr', 'hat':'ht', 'hun':'hu', \
'hye':'hy', 'her':'hz', 'ina':'ia', 'ind':'id', 'ile':'ie', 'ibo':'ig', \
'iii':'ii', 'ipk':'ik', 'ido':'io', 'isl':'is', 'ita':'it', 'iku':'iu', \
'jpn':'ja', 'jav':'jv', 'kat':'ka', 'kon':'kg', 'kik':'ki', 'kua':'kj', \
'kaz':'kk', 'kal':'kl', 'khm':'km', 'kan':'kn', 'kor':'ko', 'kau':'kr', \
'kas':'ks', 'kur':'ku', 'kom':'kv', 'cor':'kw', 'kir':'ky', 'lat':'la', \
'ltz':'lb', 'lug':'lg', 'lim':'li', 'lin':'ln', 'lao':'lo', 'lit':'lt', \
'lub':'lu', 'lav':'lv', 'mlg':'mg', 'mah':'mh', 'mri':'mi', 'mkd':'mk', \
'mal':'ml', 'mon':'mn', 'mar':'mr', 'msa':'ms', 'mlt':'mt', 'mya':'my', \
'nau':'na', 'nob':'nb', 'nde':'nd', 'nep':'ne', 'ndo':'ng', 'nld':'nl', \
'nno':'nn', 'nor':'no', 'nbl':'nr', 'nav':'nv', 'nya':'ny', 'oci':'oc', \
'oji':'oj', 'orm':'om', 'ori':'or', 'oss':'os', 'pan':'pa', 'pli':'pi', \
'pol':'pl', 'pus':'ps', 'por':'pt', 'que':'qu', 'roh':'rm', 'run':'rn', \
'ron':'ro', 'rus':'ru', 'kin':'rw', 'san':'sa', 'srd':'sc', 'snd':'sd', \
'sme':'se', 'sag':'sg', 'sin':'si', 'slk':'sk', 'slv':'sl', 'smo':'sm', \
'sna':'sn', 'som':'so', 'sqi':'sq', 'srp':'sr', 'ssw':'ss', 'sot':'st', \
'sun':'su', 'swe':'sv', 'swa':'sw', 'tam':'ta', 'tel':'te', 'tgk':'tg', \
'tha':'th', 'tir':'ti', 'tuk':'tk', 'tgl':'tl', 'tsn':'tn', 'ton':'to', \
'tur':'tr', 'tso':'ts', 'tat':'tt', 'twi':'tw', 'tah':'ty', 'uig':'ug', \
'ukr':'uk', 'urd':'ur', 'uzb':'uz', 'ven':'ve', 'vie':'vi', 'vol':'vo', \
'wln':'wa', 'wol':'wo', 'xho':'xh', 'yid':'yi', 'yor':'yo', 'zha':'za', \
'zho':'zh', 'zul':'zu'}
## Convert a python unicode string to a normal string
#
# Convert a python unicode string to a normal string
# UniToStr(u'I am a string') is 'I am a string'
#
# @param Uni: The python unicode string
#
# @retval: The formatted normal string
#
def UniToStr(Uni):
return repr(Uni)[2:-1]
## Convert a unicode string to a Hex list
#
# Convert a unicode string to a Hex list
# UniToHexList('ABC') is ['0x41', '0x00', '0x42', '0x00', '0x43', '0x00']
#
# @param Uni: The python unicode string
#
# @retval List: The formatted hex list
#
def UniToHexList(Uni):
List = []
for Item in Uni:
Temp = '%04X' % ord(Item)
List.append('0x' + Temp[2:4])
List.append('0x' + Temp[0:2])
return List
## Convert special unicode characters
#
# Convert special characters to (c), (r) and (tm).
#
# @param Uni: The python unicode string
#
# @retval NewUni: The converted unicode string
#
def ConvertSpecialUnicodes(Uni):
OldUni = NewUni = Uni
NewUni = NewUni.replace(u'\u00A9', '(c)')
NewUni = NewUni.replace(u'\u00AE', '(r)')
NewUni = NewUni.replace(u'\u2122', '(tm)')
if OldUni == NewUni:
NewUni = OldUni
return NewUni
## GetLanguageCode1766
#
# Check the language code read from .UNI file and convert RFC 4646 codes to RFC 1766 codes
# RFC 1766 language codes supported in compatibility mode
# RFC 4646 language codes supported in native mode
#
# @param LangName: Language codes read from .UNI file
#
# @retval LangName: Valid language code in RFC 1766 format or None
#
def GetLanguageCode1766(LangName, File=None):
return LangName
length = len(LangName)
if length == 2:
if LangName.isalpha():
for Key in gLANG_CONV_TABLE.keys():
if gLANG_CONV_TABLE.get(Key) == LangName.lower():
return Key
elif length == 3:
if LangName.isalpha() and gLANG_CONV_TABLE.get(LangName.lower()):
return LangName
else:
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
"Invalid RFC 1766 language code : %s" % LangName,
File)
elif length == 5:
if LangName[0:2].isalpha() and LangName[2] == '-':
for Key in gLANG_CONV_TABLE.keys():
if gLANG_CONV_TABLE.get(Key) == LangName[0:2].lower():
return Key
elif length >= 6:
if LangName[0:2].isalpha() and LangName[2] == '-':
for Key in gLANG_CONV_TABLE.keys():
if gLANG_CONV_TABLE.get(Key) == LangName[0:2].lower():
return Key
if LangName[0:3].isalpha() and gLANG_CONV_TABLE.get(LangName.lower()) is None and LangName[3] == '-':
for Key in gLANG_CONV_TABLE.keys():
if Key == LangName[0:3].lower():
return Key
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
"Invalid RFC 4646 language code : %s" % LangName,
File)
## GetLanguageCode
#
# Check the language code read from .UNI file and convert RFC 1766 codes to RFC 4646 codes if appropriate
# RFC 1766 language codes supported in compatibility mode
# RFC 4646 language codes supported in native mode
#
# @param LangName: Language codes read from .UNI file
#
# @retval LangName: Valid lanugage code in RFC 4646 format or None
#
def GetLanguageCode(LangName, IsCompatibleMode, File):
length = len(LangName)
if IsCompatibleMode:
if length == 3 and LangName.isalpha():
TempLangName = gLANG_CONV_TABLE.get(LangName.lower())
if TempLangName is not None:
return TempLangName
return LangName
else:
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
"Invalid RFC 1766 language code : %s" % LangName,
File)
if (LangName[0] == 'X' or LangName[0] == 'x') and LangName[1] == '-':
return LangName
if length == 2:
if LangName.isalpha():
return LangName
elif length == 3:
if LangName.isalpha() and gLANG_CONV_TABLE.get(LangName.lower()) is None:
return LangName
elif length == 5:
if LangName[0:2].isalpha() and LangName[2] == '-':
return LangName
elif length >= 6:
if LangName[0:2].isalpha() and LangName[2] == '-':
return LangName
if LangName[0:3].isalpha() and gLANG_CONV_TABLE.get(LangName.lower()) is None and LangName[3] == '-':
return LangName
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
"Invalid RFC 4646 language code : %s" % LangName,
File)
## FormatUniEntry
#
# Formatted the entry in Uni file.
#
# @param StrTokenName StrTokenName.
# @param TokenValueList A list need to be processed.
# @param ContainerFile ContainerFile.
#
# @return formatted entry
def FormatUniEntry(StrTokenName, TokenValueList, ContainerFile):
SubContent = ''
PreFormatLength = 40
if len(StrTokenName) > PreFormatLength:
PreFormatLength = len(StrTokenName) + 1
for (Lang, Value) in TokenValueList:
if not Value or Lang == DT.TAB_LANGUAGE_EN_X:
continue
if Lang == '':
Lang = DT.TAB_LANGUAGE_EN_US
if Lang == 'eng':
Lang = DT.TAB_LANGUAGE_EN_US
elif len(Lang.split('-')[0]) == 3:
Lang = GetLanguageCode(Lang.split('-')[0], True, ContainerFile)
else:
Lang = GetLanguageCode(Lang, False, ContainerFile)
ValueList = Value.split('\n')
SubValueContent = ''
for SubValue in ValueList:
if SubValue.strip():
SubValueContent += \
' ' * (PreFormatLength + len('#language en-US ')) + '\"%s\\n\"' % SubValue.strip() + '\r\n'
SubValueContent = SubValueContent[(PreFormatLength + len('#language en-US ')):SubValueContent.rfind('\\n')] \
+ '\"' + '\r\n'
SubContent += ' '*PreFormatLength + '#language %-5s ' % Lang + SubValueContent
if SubContent:
SubContent = StrTokenName + ' '*(PreFormatLength - len(StrTokenName)) + SubContent[PreFormatLength:]
return SubContent
## StringDefClassObject
#
# A structure for language definition
#
class StringDefClassObject(object):
def __init__(self, Name = None, Value = None, Referenced = False, Token = None, UseOtherLangDef = ''):
self.StringName = ''
self.StringNameByteList = []
self.StringValue = ''
self.StringValueByteList = ''
self.Token = 0
self.Referenced = Referenced
self.UseOtherLangDef = UseOtherLangDef
self.Length = 0
if Name is not None:
self.StringName = Name
self.StringNameByteList = UniToHexList(Name)
if Value is not None:
self.StringValue = Value
self.StringValueByteList = UniToHexList(self.StringValue)
self.Length = len(self.StringValueByteList)
if Token is not None:
self.Token = Token
def __str__(self):
return repr(self.StringName) + ' ' + \
repr(self.Token) + ' ' + \
repr(self.Referenced) + ' ' + \
repr(self.StringValue) + ' ' + \
repr(self.UseOtherLangDef)
def UpdateValue(self, Value = None):
if Value is not None:
if self.StringValue:
self.StringValue = self.StringValue + '\r\n' + Value
else:
self.StringValue = Value
self.StringValueByteList = UniToHexList(self.StringValue)
self.Length = len(self.StringValueByteList)
## UniFileClassObject
#
# A structure for .uni file definition
#
class UniFileClassObject(object):
def __init__(self, FileList = None, IsCompatibleMode = False, IncludePathList = None):
self.FileList = FileList
self.File = None
self.IncFileList = FileList
self.UniFileHeader = ''
self.Token = 2
self.LanguageDef = [] #[ [u'LanguageIdentifier', u'PrintableName'], ... ]
self.OrderedStringList = {} #{ u'LanguageIdentifier' : [StringDefClassObject] }
self.OrderedStringDict = {} #{ u'LanguageIdentifier' : {StringName:(IndexInList)} }
self.OrderedStringListByToken = {} #{ u'LanguageIdentifier' : {Token: StringDefClassObject} }
self.IsCompatibleMode = IsCompatibleMode
if not IncludePathList:
self.IncludePathList = []
else:
self.IncludePathList = IncludePathList
if len(self.FileList) > 0:
self.LoadUniFiles(FileList)
#
# Get Language definition
#
def GetLangDef(self, File, Line):
Lang = distutils.util.split_quoted((Line.split(u"//")[0]))
if len(Lang) != 3:
try:
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_8').readlines()
except UnicodeError as Xstr:
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_16').readlines()
except UnicodeError as Xstr:
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_16_le').readlines()
except:
EdkLogger.Error("Unicode File Parser",
ToolError.FILE_OPEN_FAILURE,
"File read failure: %s" % str(Xstr),
ExtraData=File)
LineNo = GetLineNo(FileIn, Line, False)
EdkLogger.Error("Unicode File Parser",
ToolError.PARSER_ERROR,
"Wrong language definition",
ExtraData="""%s\n\t*Correct format is like '#langdef en-US "English"'""" % Line,
File = File, Line = LineNo)
else:
LangName = GetLanguageCode(Lang[1], self.IsCompatibleMode, self.File)
LangPrintName = Lang[2]
IsLangInDef = False
for Item in self.LanguageDef:
if Item[0] == LangName:
IsLangInDef = True
break
if not IsLangInDef:
self.LanguageDef.append([LangName, LangPrintName])
#
# Add language string
#
self.AddStringToList(u'$LANGUAGE_NAME', LangName, LangName, 0, True, Index=0)
self.AddStringToList(u'$PRINTABLE_LANGUAGE_NAME', LangName, LangPrintName, 1, True, Index=1)
if not IsLangInDef:
#
# The found STRING tokens will be added into new language string list
# so that the unique STRING identifier is reserved for all languages in the package list.
#
FirstLangName = self.LanguageDef[0][0]
if LangName != FirstLangName:
for Index in range (2, len (self.OrderedStringList[FirstLangName])):
Item = self.OrderedStringList[FirstLangName][Index]
if Item.UseOtherLangDef != '':
OtherLang = Item.UseOtherLangDef
else:
OtherLang = FirstLangName
self.OrderedStringList[LangName].append (StringDefClassObject(Item.StringName,
'',
Item.Referenced,
Item.Token,
OtherLang))
self.OrderedStringDict[LangName][Item.StringName] = len(self.OrderedStringList[LangName]) - 1
return True
#
# Get String name and value
#
def GetStringObject(self, Item):
Language = ''
Value = ''
Name = Item.split()[1]
# Check the string name is the upper character
if Name != '':
MatchString = re.match('[A-Z0-9_]+', Name, re.UNICODE)
if MatchString is None or MatchString.end(0) != len(Name):
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
'The string token name %s in UNI file %s must be upper case character.' %(Name, self.File))
LanguageList = Item.split(u'#language ')
for IndexI in range(len(LanguageList)):
if IndexI == 0:
continue
else:
Language = LanguageList[IndexI].split()[0]
#.replace(u'\r\n', u'')
Value = \
LanguageList[IndexI][LanguageList[IndexI].find(u'\"') + len(u'\"') : LanguageList[IndexI].rfind(u'\"')]
Language = GetLanguageCode(Language, self.IsCompatibleMode, self.File)
self.AddStringToList(Name, Language, Value)
#
# Get include file list and load them
#
def GetIncludeFile(self, Item, Dir = None):
if Dir:
pass
FileName = Item[Item.find(u'!include ') + len(u'!include ') :Item.find(u' ', len(u'!include '))][1:-1]
self.LoadUniFile(FileName)
#
# Pre-process before parse .uni file
#
def PreProcess(self, File, IsIncludeFile=False):
if not os.path.exists(File.Path) or not os.path.isfile(File.Path):
EdkLogger.Error("Unicode File Parser",
ToolError.FILE_NOT_FOUND,
ExtraData=File.Path)
#
# Check file header of the Uni file
#
# if not CheckUTF16FileHeader(File.Path):
# EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
# ExtraData='The file %s is either invalid UTF-16LE or it is missing the BOM.' % File.Path)
try:
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_8').readlines()
except UnicodeError as Xstr:
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_16').readlines()
except UnicodeError:
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_16_le').readlines()
except:
EdkLogger.Error("Unicode File Parser", ToolError.FILE_OPEN_FAILURE, ExtraData=File.Path)
#
# get the file header
#
Lines = []
HeaderStart = False
HeaderEnd = False
if not self.UniFileHeader:
FirstGenHeader = True
else:
FirstGenHeader = False
for Line in FileIn:
Line = Line.strip()
if Line == u'':
continue
if Line.startswith(DT.TAB_COMMENT_EDK1_SPLIT) and (Line.find(DT.TAB_HEADER_COMMENT) > -1) \
and not HeaderEnd and not HeaderStart:
HeaderStart = True
if not Line.startswith(DT.TAB_COMMENT_EDK1_SPLIT) and HeaderStart and not HeaderEnd:
HeaderEnd = True
if Line.startswith(DT.TAB_COMMENT_EDK1_SPLIT) and HeaderStart and not HeaderEnd and FirstGenHeader:
self.UniFileHeader += Line + '\r\n'
continue
#
# Use unique identifier
#
FindFlag = -1
LineCount = 0
MultiLineFeedExits = False
#
# 0: initial value
# 1: single String entry exist
# 2: line feed exist under the some single String entry
#
StringEntryExistsFlag = 0
for Line in FileIn:
Line = FileIn[LineCount]
LineCount += 1
Line = Line.strip()
#
# Ignore comment line and empty line
#
if Line == u'' or Line.startswith(u'//'):
#
# Change the single line String entry flag status
#
if StringEntryExistsFlag == 1:
StringEntryExistsFlag = 2
#
# If the '#string' line and the '#language' line are not in the same line,
# there should be only one line feed character between them
#
if MultiLineFeedExits:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
continue
MultiLineFeedExits = False
#
# Process comment embedded in string define lines
#
FindFlag = Line.find(u'//')
if FindFlag != -1 and Line.find(u'//') < Line.find(u'"'):
Line = Line.replace(Line[FindFlag:], u' ')
if FileIn[LineCount].strip().startswith('#language'):
Line = Line + FileIn[LineCount]
FileIn[LineCount-1] = Line
FileIn[LineCount] = '\r\n'
LineCount -= 1
for Index in range (LineCount + 1, len (FileIn) - 1):
if (Index == len(FileIn) -1):
FileIn[Index] = '\r\n'
else:
FileIn[Index] = FileIn[Index + 1]
continue
CommIndex = GetCharIndexOutStr(u'/', Line)
if CommIndex > -1:
if (len(Line) - 1) > CommIndex:
if Line[CommIndex+1] == u'/':
Line = Line[:CommIndex].strip()
else:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
else:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
Line = Line.replace(UNICODE_WIDE_CHAR, WIDE_CHAR)
Line = Line.replace(UNICODE_NARROW_CHAR, NARROW_CHAR)
Line = Line.replace(UNICODE_NON_BREAKING_CHAR, NON_BREAKING_CHAR)
Line = Line.replace(u'\\\\', u'\u0006')
Line = Line.replace(u'\\r\\n', CR + LF)
Line = Line.replace(u'\\n', CR + LF)
Line = Line.replace(u'\\r', CR)
Line = Line.replace(u'\\t', u'\t')
Line = Line.replace(u'''\"''', u'''"''')
Line = Line.replace(u'\t', u' ')
Line = Line.replace(u'\u0006', u'\\')
#
# Check if single line has correct '"'
#
if Line.startswith(u'#string') and Line.find(u'#language') > -1 and Line.find('"') > Line.find(u'#language'):
if not Line.endswith('"'):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
ExtraData='''The line %s misses '"' at the end of it in file %s'''
% (LineCount, File.Path))
#
# Between Name entry and Language entry can not contain line feed
#
if Line.startswith(u'#string') and Line.find(u'#language') == -1:
MultiLineFeedExits = True
if Line.startswith(u'#string') and Line.find(u'#language') > 0 and Line.find(u'"') < 0:
MultiLineFeedExits = True
#
# Between Language entry and String entry can not contain line feed
#
if Line.startswith(u'#language') and len(Line.split()) == 2:
MultiLineFeedExits = True
#
# Check the situation that there only has one '"' for the language entry
#
if Line.startswith(u'#string') and Line.find(u'#language') > 0 and Line.count(u'"') == 1:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
ExtraData='''The line %s misses '"' at the end of it in file %s'''
% (LineCount, File.Path))
#
# Check the situation that there has more than 2 '"' for the language entry
#
if Line.startswith(u'#string') and Line.find(u'#language') > 0 and Line.replace(u'\\"', '').count(u'"') > 2:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
ExtraData='''The line %s has more than 2 '"' for language entry in file %s'''
% (LineCount, File.Path))
#
# Between two String entry, can not contain line feed
#
if Line.startswith(u'"'):
if StringEntryExistsFlag == 2:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
Message=ST.ERR_UNIPARSE_LINEFEED_UP_EXIST % Line, ExtraData=File.Path)
StringEntryExistsFlag = 1
if not Line.endswith('"'):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
ExtraData='''The line %s misses '"' at the end of it in file %s'''
% (LineCount, File.Path))
#
# Check the situation that there has more than 2 '"' for the language entry
#
if Line.strip() and Line.replace(u'\\"', '').count(u'"') > 2:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
ExtraData='''The line %s has more than 2 '"' for language entry in file %s'''
% (LineCount, File.Path))
elif Line.startswith(u'#language'):
if StringEntryExistsFlag == 2:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
Message=ST.ERR_UNI_MISS_STRING_ENTRY % Line, ExtraData=File.Path)
StringEntryExistsFlag = 0
else:
StringEntryExistsFlag = 0
Lines.append(Line)
#
# Convert string def format as below
#
# #string MY_STRING_1
# #language eng
# "My first English string line 1"
# "My first English string line 2"
# #string MY_STRING_1
# #language spa
# "Mi segunda secuencia 1"
# "Mi segunda secuencia 2"
#
if not IsIncludeFile and not Lines:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_NO_SECTION_EXIST, \
ExtraData=File.Path)
NewLines = []
StrName = u''
ExistStrNameList = []
for Line in Lines:
if StrName and not StrName.split()[1].startswith(DT.TAB_STR_TOKENCNAME + DT.TAB_UNDERLINE_SPLIT):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_STRNAME_FORMAT_ERROR % StrName.split()[1], \
ExtraData=File.Path)
if StrName and len(StrName.split()[1].split(DT.TAB_UNDERLINE_SPLIT)) == 4:
StringTokenList = StrName.split()[1].split(DT.TAB_UNDERLINE_SPLIT)
if (StringTokenList[3].upper() in [DT.TAB_STR_TOKENPROMPT, DT.TAB_STR_TOKENHELP] and \
StringTokenList[3] not in [DT.TAB_STR_TOKENPROMPT, DT.TAB_STR_TOKENHELP]) or \
(StringTokenList[2].upper() == DT.TAB_STR_TOKENERR and StringTokenList[2] != DT.TAB_STR_TOKENERR):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_STRTOKEN_FORMAT_ERROR % StrName.split()[1], \
ExtraData=File.Path)
if Line.count(u'#language') > 1:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_SEP_LANGENTRY_LINE % Line, \
ExtraData=File.Path)
if Line.startswith(u'//'):
continue
elif Line.startswith(u'#langdef'):
if len(Line.split()) == 2:
NewLines.append(Line)
continue
elif len(Line.split()) > 2 and Line.find(u'"') > 0:
NewLines.append(Line[:Line.find(u'"')].strip())
NewLines.append(Line[Line.find(u'"'):])
else:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
elif Line.startswith(u'#string'):
if len(Line.split()) == 2:
StrName = Line
if StrName:
if StrName.split()[1] not in ExistStrNameList:
ExistStrNameList.append(StrName.split()[1].strip())
elif StrName.split()[1] in [DT.TAB_INF_ABSTRACT, DT.TAB_INF_DESCRIPTION, \
DT.TAB_INF_BINARY_ABSTRACT, DT.TAB_INF_BINARY_DESCRIPTION, \
DT.TAB_DEC_PACKAGE_ABSTRACT, DT.TAB_DEC_PACKAGE_DESCRIPTION, \
DT.TAB_DEC_BINARY_ABSTRACT, DT.TAB_DEC_BINARY_DESCRIPTION]:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_MULTI_ENTRY_EXIST % StrName.split()[1], \
ExtraData=File.Path)
continue
elif len(Line.split()) == 4 and Line.find(u'#language') > 0:
if Line[Line.find(u'#language')-1] != ' ' or \
Line[Line.find(u'#language')+len(u'#language')] != u' ':
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
if Line.find(u'"') > 0:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
StrName = Line.split()[0] + u' ' + Line.split()[1]
if StrName:
if StrName.split()[1] not in ExistStrNameList:
ExistStrNameList.append(StrName.split()[1].strip())
elif StrName.split()[1] in [DT.TAB_INF_ABSTRACT, DT.TAB_INF_DESCRIPTION, \
DT.TAB_INF_BINARY_ABSTRACT, DT.TAB_INF_BINARY_DESCRIPTION, \
DT.TAB_DEC_PACKAGE_ABSTRACT, DT.TAB_DEC_PACKAGE_DESCRIPTION, \
DT.TAB_DEC_BINARY_ABSTRACT, DT.TAB_DEC_BINARY_DESCRIPTION]:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_MULTI_ENTRY_EXIST % StrName.split()[1], \
ExtraData=File.Path)
if IsIncludeFile:
if StrName not in NewLines:
NewLines.append((Line[:Line.find(u'#language')]).strip())
else:
NewLines.append((Line[:Line.find(u'#language')]).strip())
NewLines.append((Line[Line.find(u'#language'):]).strip())
elif len(Line.split()) > 4 and Line.find(u'#language') > 0 and Line.find(u'"') > 0:
if Line[Line.find(u'#language')-1] != u' ' or \
Line[Line.find(u'#language')+len(u'#language')] != u' ':
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
if Line[Line.find(u'"')-1] != u' ':
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
StrName = Line.split()[0] + u' ' + Line.split()[1]
if StrName:
if StrName.split()[1] not in ExistStrNameList:
ExistStrNameList.append(StrName.split()[1].strip())
elif StrName.split()[1] in [DT.TAB_INF_ABSTRACT, DT.TAB_INF_DESCRIPTION, \
DT.TAB_INF_BINARY_ABSTRACT, DT.TAB_INF_BINARY_DESCRIPTION, \
DT.TAB_DEC_PACKAGE_ABSTRACT, DT.TAB_DEC_PACKAGE_DESCRIPTION, \
DT.TAB_DEC_BINARY_ABSTRACT, DT.TAB_DEC_BINARY_DESCRIPTION]:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_MULTI_ENTRY_EXIST % StrName.split()[1], \
ExtraData=File.Path)
if IsIncludeFile:
if StrName not in NewLines:
NewLines.append((Line[:Line.find(u'#language')]).strip())
else:
NewLines.append((Line[:Line.find(u'#language')]).strip())
NewLines.append((Line[Line.find(u'#language'):Line.find(u'"')]).strip())
NewLines.append((Line[Line.find(u'"'):]).strip())
else:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
elif Line.startswith(u'#language'):
if len(Line.split()) == 2:
if IsIncludeFile:
if StrName not in NewLines:
NewLines.append(StrName)
else:
NewLines.append(StrName)
NewLines.append(Line)
elif len(Line.split()) > 2 and Line.find(u'"') > 0:
if IsIncludeFile:
if StrName not in NewLines:
NewLines.append(StrName)
else:
NewLines.append(StrName)
NewLines.append((Line[:Line.find(u'"')]).strip())
NewLines.append((Line[Line.find(u'"'):]).strip())
else:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
elif Line.startswith(u'"'):
if u'#string' in Line or u'#language' in Line:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
NewLines.append(Line)
else:
print(Line)
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
if StrName and not StrName.split()[1].startswith(u'STR_'):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_STRNAME_FORMAT_ERROR % StrName.split()[1], \
ExtraData=File.Path)
if StrName and not NewLines:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNI_MISS_LANGENTRY % StrName, \
ExtraData=File.Path)
#
# Check Abstract, Description, BinaryAbstract and BinaryDescription order,
# should be Abstract, Description, BinaryAbstract, BinaryDescription
AbstractPosition = -1
DescriptionPosition = -1
BinaryAbstractPosition = -1
BinaryDescriptionPosition = -1
for StrName in ExistStrNameList:
if DT.TAB_HEADER_ABSTRACT.upper() in StrName:
if 'BINARY' in StrName:
BinaryAbstractPosition = ExistStrNameList.index(StrName)
else:
AbstractPosition = ExistStrNameList.index(StrName)
if DT.TAB_HEADER_DESCRIPTION.upper() in StrName:
if 'BINARY' in StrName:
BinaryDescriptionPosition = ExistStrNameList.index(StrName)
else:
DescriptionPosition = ExistStrNameList.index(StrName)
OrderList = sorted([AbstractPosition, DescriptionPosition])
BinaryOrderList = sorted([BinaryAbstractPosition, BinaryDescriptionPosition])
Min = OrderList[0]
Max = OrderList[1]
BinaryMin = BinaryOrderList[0]
BinaryMax = BinaryOrderList[1]
if BinaryDescriptionPosition > -1:
if not(BinaryDescriptionPosition == BinaryMax and BinaryAbstractPosition == BinaryMin and \
BinaryMax > Max):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_ENTRY_ORDER_WRONG, \
ExtraData=File.Path)
elif BinaryAbstractPosition > -1:
if not(BinaryAbstractPosition > Max):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_ENTRY_ORDER_WRONG, \
ExtraData=File.Path)
if DescriptionPosition > -1:
if not(DescriptionPosition == Max and AbstractPosition == Min and \
DescriptionPosition > AbstractPosition):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_ENTRY_ORDER_WRONG, \
ExtraData=File.Path)
if not self.UniFileHeader:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
Message = ST.ERR_NO_SOURCE_HEADER,
ExtraData=File.Path)
return NewLines
#
# Load a .uni file
#
def LoadUniFile(self, File = None):
if File is None:
EdkLogger.Error("Unicode File Parser",
ToolError.PARSER_ERROR,
Message='No unicode file is given',
ExtraData=File.Path)
self.File = File
#
# Process special char in file
#
Lines = self.PreProcess(File)
#
# Get Unicode Information
#
for IndexI in range(len(Lines)):
Line = Lines[IndexI]
if (IndexI + 1) < len(Lines):
SecondLine = Lines[IndexI + 1]
if (IndexI + 2) < len(Lines):
ThirdLine = Lines[IndexI + 2]
#
# Get Language def information
#
if Line.find(u'#langdef ') >= 0:
self.GetLangDef(File, Line + u' ' + SecondLine)
continue
Name = ''
Language = ''
Value = ''
CombineToken = False
#
# Get string def information format as below
#
# #string MY_STRING_1
# #language eng
# "My first English string line 1"
# "My first English string line 2"
# #string MY_STRING_1
# #language spa
# "Mi segunda secuencia 1"
# "Mi segunda secuencia 2"
#
if Line.find(u'#string ') >= 0 and Line.find(u'#language ') < 0 and \
SecondLine.find(u'#string ') < 0 and SecondLine.find(u'#language ') >= 0 and \
ThirdLine.find(u'#string ') < 0 and ThirdLine.find(u'#language ') < 0:
if Line.find('"') > 0 or SecondLine.find('"') > 0:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
Message=ST.ERR_UNIPARSE_DBLQUOTE_UNMATCHED,
ExtraData=File.Path)
Name = Line[Line.find(u'#string ') + len(u'#string ') : ].strip(' ')
Language = SecondLine[SecondLine.find(u'#language ') + len(u'#language ') : ].strip(' ')
for IndexJ in range(IndexI + 2, len(Lines)):
if Lines[IndexJ].find(u'#string ') < 0 and Lines[IndexJ].find(u'#language ') < 0 and \
Lines[IndexJ].strip().startswith(u'"') and Lines[IndexJ].strip().endswith(u'"'):
if Lines[IndexJ][-2] == ' ':
CombineToken = True
if CombineToken:
if Lines[IndexJ].strip()[1:-1].strip():
Value = Value + Lines[IndexJ].strip()[1:-1].rstrip() + ' '
else:
Value = Value + Lines[IndexJ].strip()[1:-1]
CombineToken = False
else:
Value = Value + Lines[IndexJ].strip()[1:-1] + '\r\n'
else:
IndexI = IndexJ
break
if Value.endswith('\r\n'):
Value = Value[: Value.rfind('\r\n')]
Language = GetLanguageCode(Language, self.IsCompatibleMode, self.File)
self.AddStringToList(Name, Language, Value)
continue
#
# Load multiple .uni files
#
def LoadUniFiles(self, FileList):
if len(FileList) > 0:
for File in FileList:
FilePath = File.Path.strip()
if FilePath.endswith('.uni') or FilePath.endswith('.UNI') or FilePath.endswith('.Uni'):
self.LoadUniFile(File)
#
# Add a string to list
#
def AddStringToList(self, Name, Language, Value, Token = 0, Referenced = False, UseOtherLangDef = '', Index = -1):
for LangNameItem in self.LanguageDef:
if Language == LangNameItem[0]:
break
if Language not in self.OrderedStringList:
self.OrderedStringList[Language] = []
self.OrderedStringDict[Language] = {}
IsAdded = True
if Name in self.OrderedStringDict[Language]:
IsAdded = False
if Value is not None:
ItemIndexInList = self.OrderedStringDict[Language][Name]
Item = self.OrderedStringList[Language][ItemIndexInList]
Item.UpdateValue(Value)
Item.UseOtherLangDef = ''
if IsAdded:
Token = len(self.OrderedStringList[Language])
if Index == -1:
self.OrderedStringList[Language].append(StringDefClassObject(Name,
Value,
Referenced,
Token,
UseOtherLangDef))
self.OrderedStringDict[Language][Name] = Token
for LangName in self.LanguageDef:
#
# New STRING token will be added into all language string lists.
# so that the unique STRING identifier is reserved for all languages in the package list.
#
if LangName[0] != Language:
if UseOtherLangDef != '':
OtherLangDef = UseOtherLangDef
else:
OtherLangDef = Language
self.OrderedStringList[LangName[0]].append(StringDefClassObject(Name,
'',
Referenced,
Token,
OtherLangDef))
self.OrderedStringDict[LangName[0]][Name] = len(self.OrderedStringList[LangName[0]]) - 1
else:
self.OrderedStringList[Language].insert(Index, StringDefClassObject(Name,
Value,
Referenced,
Token,
UseOtherLangDef))
self.OrderedStringDict[Language][Name] = Index
#
# Set the string as referenced
#
def SetStringReferenced(self, Name):
#
# String stoken are added in the same order in all language string lists.
# So, only update the status of string stoken in first language string list.
#
Lang = self.LanguageDef[0][0]
if Name in self.OrderedStringDict[Lang]:
ItemIndexInList = self.OrderedStringDict[Lang][Name]
Item = self.OrderedStringList[Lang][ItemIndexInList]
Item.Referenced = True
#
# Search the string in language definition by Name
#
def FindStringValue(self, Name, Lang):
if Name in self.OrderedStringDict[Lang]:
ItemIndexInList = self.OrderedStringDict[Lang][Name]
return self.OrderedStringList[Lang][ItemIndexInList]
return None
#
# Search the string in language definition by Token
#
def FindByToken(self, Token, Lang):
for Item in self.OrderedStringList[Lang]:
if Item.Token == Token:
return Item
return None
#
# Re-order strings and re-generate tokens
#
def ReToken(self):
if len(self.LanguageDef) == 0:
return None
#
# Retoken all language strings according to the status of string stoken in the first language string.
#
FirstLangName = self.LanguageDef[0][0]
# Convert the OrderedStringList to be OrderedStringListByToken in order to faciliate future search by token
for LangNameItem in self.LanguageDef:
self.OrderedStringListByToken[LangNameItem[0]] = {}
#
# Use small token for all referred string stoken.
#
RefToken = 0
for Index in range (0, len (self.OrderedStringList[FirstLangName])):
FirstLangItem = self.OrderedStringList[FirstLangName][Index]
if FirstLangItem.Referenced == True:
for LangNameItem in self.LanguageDef:
LangName = LangNameItem[0]
OtherLangItem = self.OrderedStringList[LangName][Index]
OtherLangItem.Referenced = True
OtherLangItem.Token = RefToken
self.OrderedStringListByToken[LangName][OtherLangItem.Token] = OtherLangItem
RefToken = RefToken + 1
#
# Use big token for all unreferred string stoken.
#
UnRefToken = 0
for Index in range (0, len (self.OrderedStringList[FirstLangName])):
FirstLangItem = self.OrderedStringList[FirstLangName][Index]
if FirstLangItem.Referenced == False:
for LangNameItem in self.LanguageDef:
LangName = LangNameItem[0]
OtherLangItem = self.OrderedStringList[LangName][Index]
OtherLangItem.Token = RefToken + UnRefToken
self.OrderedStringListByToken[LangName][OtherLangItem.Token] = OtherLangItem
UnRefToken = UnRefToken + 1
#
# Show the instance itself
#
def ShowMe(self):
print(self.LanguageDef)
#print self.OrderedStringList
for Item in self.OrderedStringList:
print(Item)
for Member in self.OrderedStringList[Item]:
print(str(Member))
#
# Read content from '!include' UNI file
#
def ReadIncludeUNIfile(self, FilaPath):
if self.File:
pass
if not os.path.exists(FilaPath) or not os.path.isfile(FilaPath):
EdkLogger.Error("Unicode File Parser",
ToolError.FILE_NOT_FOUND,
ExtraData=FilaPath)
try:
FileIn = codecs.open(FilaPath, mode='rb', encoding='utf_8').readlines()
except UnicodeError as Xstr:
FileIn = codecs.open(FilaPath, mode='rb', encoding='utf_16').readlines()
except UnicodeError:
FileIn = codecs.open(FilaPath, mode='rb', encoding='utf_16_le').readlines()
except:
EdkLogger.Error("Unicode File Parser", ToolError.FILE_OPEN_FAILURE, ExtraData=FilaPath)
return FileIn
|
Can Psychologists Predict Whether Donald Trump's Popularity With Followers Will Endure or Fade?
Can Psychologists Predict Whether Donald Trump’s Popularity With Followers Will Endure or Fade?
Perhaps the least contestable thing you can say about the unpredictable Donald Trump is that he is a polarizing politician. Clearly a lot of people love him, while it seems almost as many detest the President.
Yet no one appears to have posed the key dilemma that will surely determine the success of his reign. Will his ardent followers continue to love him for an extended period into his Presidency? Will they forgive the inevitable set-backs and disappointments? Can Donald Trump display the necessary leadership skills to retain the country’s confidence when governing isn’t going his way?
There is a recent scientific psychological study that suggests an intriguing answer to these questions.
The research was entitled, “The Leader Ship Is Sinking: A Temporal Investigation of Narcissistic Leadership“ and concluded that despite enjoying a honeymoon period of leadership, the appeal and attractiveness of the narcissistic leader rapidly wanes. University students were randomly assigned to leaderless groups, enabling participants to develop and display leadership.
This study might predict that Donald Trump’s supporters could become rather rapidly disenchanted with the very trailblazer they so idolize at the moment.
The investigation, published in the prestigious academic psychology journal,Journal of Personality in 2016, was inspired by the so-called ‘chocolate cake’ model of narcissistic leadership.
This model was first introduced by Keith Campbell, a professor of psychology at the University of Georgia, who was alluding to the fact that the first mouthful of chocolate cake is usually tasty and therefore, extremely pleasurable.
But, over time as you consume more of the confectionary, it’s the very richness of its taste that renders you increasingly nauseous. Professor Keith Campbell contended that being led by a narcissist was like eating too much chocolate cake: Narcissists are initially perceived as effective leaders, but this positive take seems to decrease over time.
Originally developed to explain love affairs with narcissists, for example, such ego-merchants are great on a first date, but tend to get tiresome further into a relationship. The ‘chocolate cake’ model also applies to leaders and their followers.
Is the psychological research on narcissistic leaders predicting that once the honeymoon is over, Donald Trump’s followers are going to live to regret their commitment following the heady ‘first date’ of the election campaign?
The authors of the latest study, from Bangor University, The University of Stirling and The University of Derby in the UK, were partly inspired by this finding that while narcissists make a better first impression than those less in love with themselves, with increasing acquaintance, their heightened arrogance begins to drag. So, almost inevitably, narcissists are inexorably found less entertaining the more you get to know them. Particularly unappealing is their tendency to swagger and overestimate their talents.
Psychologists Chin Wei Ong, Ross Roberts, Calum Arthur, Tim Woodman and Sally Akehurst, the authors of the latest study into narcissistic leadership, point out that one possible explanation for why narcissistic leaders seem great ‘on a first date’, but end up being rated poorly in the long run is that leadership ‘emergence’ and leadership ‘effectiveness’ are two different things, but are frequently confused with eachother by an electorate.
Leadership emergence is achieved by attaining high status in a group of strangers, while once you are identified as a leader, effectiveness is judged by one’s actual performance in the post.
Although there is no doubt Donald Trump is masterful at emerging as a front-runner, this first impression doesn’t in any way predict his future effectiveness as a leader. It is suggesting that his followers may shortly experience a rude awakening.
Another theory is that narcissists’ decision-making strategies focus on short-term gains (which makes sense when trying to get attention at the beginning of a popularity contest) but then later while being in power, this strategy comes at the expense of long-term benefits.
The authors of this latest study found that the decline in rating of leadership in those higher in narcissism is associated with a waning in the degree to which they display transformational leadership.
The researchers explain that transformational leadership is an approach that involves establishing relationships with followers through emotional and inspirational interactions, so that supporters become motivated to perform beyond their expectations.
However, given narcissists’ continual striving for self-enhancement and personal glory to the extent of exploiting others for personal gain, their transformational leadership possibilities fade over time.
A truly great transformational leader grabs your attention – but does so in a bid to get you to perform better. By everyone in the team doing better, the squad wins and rises to the top. In the end, it’s not about them – it’s about you.
Perhaps the most emblematic example is the famous quote from John F Kennedy delivered at his inauguration on January 20, 1961: ‘My fellow Americans, ask not what your country can do for you, ask what you can do for your country’. |
import numpy as np
import pandas as pd
import xarray as xr
import Grid
import pf_dynamic_cart
import os
import sys
from timeit import default_timer as timer
# import pf_static_cart
if __name__ == "__main__":
start = timer()
# ---- INITIALIZE GRIDS ----
(Lx, Ly, Lz) = (21, 21, 21)
(dx, dy, dz) = (0.375, 0.375, 0.375)
xgrid = Grid.Grid('CARTESIAN_3D')
xgrid.initArray('x', -Lx, Lx, dx); xgrid.initArray('y', -Ly, Ly, dy); xgrid.initArray('z', -Lz, Lz, dz)
(Nx, Ny, Nz) = (len(xgrid.getArray('x')), len(xgrid.getArray('y')), len(xgrid.getArray('z')))
# print((Nx * Ny * Nz)**(1 / 3) * (0.1 / 21))
# print(3 * np.log10(2560 / 10))
kxfft = np.fft.fftfreq(Nx) * 2 * np.pi / dx; kyfft = np.fft.fftfreq(Nx) * 2 * np.pi / dy; kzfft = np.fft.fftfreq(Nx) * 2 * np.pi / dz
kgrid = Grid.Grid('CARTESIAN_3D')
kgrid.initArray_premade('kx', np.fft.fftshift(kxfft)); kgrid.initArray_premade('ky', np.fft.fftshift(kyfft)); kgrid.initArray_premade('kz', np.fft.fftshift(kzfft))
kx = kgrid.getArray('kx')
tMax = 1000
dt = 10
# tMax = 100
# dt = 0.2
tgrid = np.arange(0, tMax + dt, dt)
gParams = [xgrid, kgrid, tgrid]
# NGridPoints = (2 * Lx / dx) * (2 * Ly / dy) * (2 * Lz / dz)
NGridPoints = xgrid.size()
kx = kgrid.getArray('kx'); ky = kgrid.getArray('ky'); kz = kgrid.getArray('kz')
k_max = np.sqrt(np.max(kx)**2 + np.max(ky)**2 + np.max(kz)**2)
print('datagen_qdynamics_cart_massRat')
print('Total time steps: {0}'.format(tgrid.size))
print('UV cutoff: {0}'.format(k_max))
print('NGridPoints: {0}'.format(NGridPoints))
# Basic parameters
# Toggle parameters
toggleDict = {'Location': 'cluster', 'Dynamics': 'imaginary', 'Coupling': 'twophonon', 'Grid': 'cartesian'}
# ---- SET PARAMS ----
mB = 1
n0 = 1
gBB = (4 * np.pi / mB) * 0.05
Params_List = []
# mI_Vals = np.array([1, 2, 5, 10])
# aIBi_Vals = np.array([-10.0, -5.0, -2.0])
# # P_Vals = np.array([0.1, 0.4, 0.8, 0.9, 1.0, 1.1, 1.2, 1.4, 1.6, 2.0, 2.4, 2.7, 3.0, 4.0, 5.0])
# P_Vals = np.array([3.2, 3.4, 3.6, 3.8, 3.9, 4.1, 4.2, 4.4, 4.6, 4.8, 5.2, 5.4, 5.6, 5.8, 6.0])
# for mI in mI_Vals:
# for aIBi in aIBi_Vals:
# for P in P_Vals:
# sParams = [mI, mB, n0, gBB]
# cParams = [P, aIBi]
# if toggleDict['Location'] == 'home':
# datapath = '/home/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}'.format(NGridPoints, mI / mB)
# elif toggleDict['Location'] == 'work':
# datapath = '/media/kis/Storage/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}'.format(NGridPoints, mI / mB)
# elif toggleDict['Location'] == 'cluster':
# datapath = '/n/regal/demler_lab/kis/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}'.format(NGridPoints, mI / mB)
# if toggleDict['Dynamics'] == 'real':
# innerdatapath = datapath + '/redyn'
# elif toggleDict['Dynamics'] == 'imaginary':
# innerdatapath = datapath + '/imdyn'
# if toggleDict['Grid'] == 'cartesian':
# innerdatapath = innerdatapath + '_cart'
# elif toggleDict['Grid'] == 'spherical':
# innerdatapath = innerdatapath + '_spherical'
# if toggleDict['Coupling'] == 'frohlich':
# innerdatapath = innerdatapath + '_froh'
# elif toggleDict['Coupling'] == 'twophonon':
# innerdatapath = innerdatapath
# Params_List.append([sParams, cParams, innerdatapath])
# redo (mI, P, aIBi)
redo_Vals = [(2, 4.4, -5.0), (2, 4.6, -5.0), (2, 4.8, -5.0),
(1, 3.2, -2.0), (1, 3.4, -2.0), (1, 3.6, -2.0),
(1, 3.8, -2.0), (1, 3.9, -2.0), (1, 4.1, -5.0),
(1, 4.1, -2.0), (1, 4.2, -5.0), (1, 4.2, -2.0),
(1, 4.4, -5.0), (1, 4.6, -2.0), (1, 5.6, -10.0),
(1, 5.8, -10.0), (1, 6.0, -10.0)]
for tup in redo_Vals:
(mI, P, aIBi) = tup
sParams = [mI, mB, n0, gBB]
cParams = [P, aIBi]
if toggleDict['Location'] == 'home':
datapath = '/home/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}'.format(NGridPoints, mI / mB)
elif toggleDict['Location'] == 'work':
datapath = '/media/kis/Storage/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}'.format(NGridPoints, mI / mB)
elif toggleDict['Location'] == 'cluster':
datapath = '/n/regal/demler_lab/kis/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}'.format(NGridPoints, mI / mB)
if toggleDict['Dynamics'] == 'real':
innerdatapath = datapath + '/redyn'
elif toggleDict['Dynamics'] == 'imaginary':
innerdatapath = datapath + '/imdyn'
if toggleDict['Grid'] == 'cartesian':
innerdatapath = innerdatapath + '_cart'
elif toggleDict['Grid'] == 'spherical':
innerdatapath = innerdatapath + '_spherical'
if toggleDict['Coupling'] == 'frohlich':
innerdatapath = innerdatapath + '_froh'
elif toggleDict['Coupling'] == 'twophonon':
innerdatapath = innerdatapath
Params_List.append([sParams, cParams, innerdatapath])
# # ---- COMPUTE DATA ON COMPUTER ----
# runstart = timer()
# for ind, Params in enumerate(Params_List):
# loopstart = timer()
# [sParams, cParams, innerdatapath] = Params_List[ind]
# [mI, mB, n0, gBB] = sParams
# [P, aIBi] = cParams
# dyncart_ds = pf_dynamic_cart.quenchDynamics_DataGeneration(cParams, gParams, sParams, toggleDict)
# dyncart_ds.to_netcdf(innerdatapath + '/P_{:.3f}_aIBi_{:.2f}.nc'.format(P, aIBi))
# loopend = timer()
# print('Index: {:d}, P: {:.2f}, aIBi: {:.2f} Time: {:.2f}'.format(ind, P, aIBi, loopend - loopstart))
# end = timer()
# print('Total Time: {:.2f}'.format(end - runstart))
# ---- COMPUTE DATA ON CLUSTER ----
runstart = timer()
taskCount = int(os.getenv('SLURM_ARRAY_TASK_COUNT'))
taskID = int(os.getenv('SLURM_ARRAY_TASK_ID'))
if(taskCount > len(Params_List)):
print('ERROR: TASK COUNT MISMATCH')
P = float('nan')
aIBi = float('nan')
sys.exit()
else:
[sParams, cParams, innerdatapath] = Params_List[taskID]
[mI, mB, n0, gBB] = sParams
[P, aIBi] = cParams
dyncart_ds = pf_dynamic_cart.quenchDynamics_DataGeneration(cParams, gParams, sParams, toggleDict)
dyncart_ds.to_netcdf(innerdatapath + '/P_{:.3f}_aIBi_{:.2f}.nc'.format(P, aIBi))
end = timer()
print('Task ID: {:d}, P: {:.2f}, aIBi: {:.2f} Time: {:.2f}'.format(taskID, P, aIBi, end - runstart))
|
10 Positive Affirmations for Happiness - #besomebody | Advice.
A guide for how to work with positive affirmations, even when you feel the farthest away from what they say.
Positive affirmations have changed my life. More accurately, positive affirmations have saved my life. For most of my life, I have been depressed and felt a sense of hopelessness in everything that had to do with my future. Filled with a deep sense of shame, I was addicted to my misery and defended my negativity way of thinking and feeling, like I was protecting someone near and dear to me. I deserved to not be happy because of the life I was given and the world was to blame. I was a victim.
When I was introduced to positive affirmations through the Louise Hay book You Can Heal Your Life, I was so full of resistance, I had to stop reading it in the middle and take a three-month break.
Here was this woman telling me I could change my life by thinking positively and everything I didn’t have in my life: enough money, a relationship, a job that I loved, friends, and abundance, was because of MY thinking. I was very pissed.
After calming down, I returned to the book and slowly started working with positive affirmations every day. Bit by bit, they helped changed me into a positive and thriving person. Now, when I hear or say something negative, it doesn’t feel good. It still hurts, but I don’t want to be and feel hurt anymore, so I quickly change it to something positive.
Here is the biggest secret I learned about positive affirmations: For them to really work, help you and change you, you need to dive into your negativity and into your shadows and learn all about your demons, your pain and the places inside of you that may be scary. The only way to positive change is through some very uncomfortable places inside you.
Here are 10 steps that show how to work with positive affirmations, even when you feel the farthest away from what they say. Work with this list to turn a painful moment around with a positive affirmation. I did, and it works.
Give words to your pain by noticing your most painful thought at the moment (No one loves me; I have nothing; Everyone has a better life them me; I’m so fat; I’m stupid; I’m alone).
Create a positive affirmation that is the complete opposite of your most painful thought at this moment.
Adjust your affirmation by saying it in a few different ways.
Choose the right affirmation by feeling which affirmation is making you feel the most emotional, which affirmation makes your heart skip a bit.
This is the affirmation for you, for today!
Say the affirmation to yourself as many times as you can. Let your heart start believing it (your mind may take a lot more work). Write it on a card, on your mirror, on your iPhone, or be even braver and say your affirmation to yourself in front of the mirror.
Respond to yourself as you would respond to a family member or a friend who is hurting – why not to yourself.
Repeat steps 1-8 every day.
Sit back and watch your life change.
Here are my 10 personal favorite positive affirmations for happiness. They helped me bring a lot of positivity and happiness into my life. May they do the same to you.
My life is a joyously balanced between work and play.
There is love all around me, and I am worth loving.
Today I look for the good in my life, and that is what I find.
Today I enjoy every minute of whatever I am doing.
Wonderful new doors are opening for me all the time.
It is now safe for me to forgive all of my childhood traumas. I am free.
Today I remind myself that all I need to do is keep my thinking in line with my goals. The Universe will do the rest.
Beautiful! Thanks for sharing these tips. I look forward to reading more of your posts.
Thanks Nia! So great to meet you! |
__author__ = 'adley'
import requests
import json
import random
from wscacicneo.utils.utils import Utils
from pyramid.httpexceptions import HTTPFound
from wscacicneo.model import config_reports
from liblightbase.lbsearch.search import NullDocument
from pyramid.session import check_csrf_token
from wscacicneo.search.orgao import SearchOrgao
class Graficos():
def __init__(self, request):
"""
Método construtor
:param request: Requisição
"""
self.request = request
self.usuario_autenticado = Utils.retorna_usuario_autenticado(
self.request.session.get('userid'))
def graficos_orgao(self):
if 'attr' in self.request.matchdict.keys():
attr = self.request.matchdict['attr']
else:
attr = 'softwarelist'
orgao = self.request.matchdict['nm_orgao']
data = dict()
if orgao != 'todos-orgaos':
if attr not in ['softwarelist','todos']:
return self.graficos(orgao = orgao)
elif attr == 'todos':
for attrib in ['win32_physicalmemory', 'win32_bios', 'win32_diskdrive', 'operatingsystem', 'win32_processor']:
data[attrib] = self.graficos(attr=attrib)['data']
data['softwarelist'] = self.graficos_software(view_type='detailed')['data']
else:
return self.graficos_software(orgao)
else:
search = SearchOrgao()
orgaos = [org.nome for org in search.list_by_name()]
for org in orgaos:
if attr != 'softwarelist':
data[org] = self.graficos(orgao = org)['data']
else:
data[org] = self.graficos_software(org)['data']
title_chart = ''
# Define o nome do gráfico baseado no "attr"
if attr == "win32_processor":
title_chart = "Gráfico de Processadores"
elif attr == "win32_diskdrive":
title_chart = "Gráfico de HD"
elif attr == "win32_bios":
title_chart = "Gráfico de BIOS"
elif attr == "win32_physicalmemory":
title_chart = "Gráfico de Memória"
elif attr == "operatingsystem":
title_chart = "Gráfico de Sistemas Operacionais"
elif attr == "softwarelist":
title_chart = "Gráfico de Softwares"
elif attr != 'todos':
title_chart = "Gráfico de "+attr
return {"data": data,
"usuario_autenticado": self.usuario_autenticado,
"title_chart": title_chart,
"orgao_nm": orgao,
"attr": attr
}
def graficos(self, orgao=None, attr=None):
# Define o nome do gráfico baseado no "attr"
if attr is None:
attr = self.request.matchdict['attr']
if attr == "win32_processor":
title_chart = "Gráfico de Processadores"
elif attr == "win32_diskdrive":
title_chart = "Gráfico de HD"
elif attr == "win32_bios":
title_chart = "Gráfico de BIOS"
elif attr == "win32_physicalmemory":
title_chart = "Gráfico de Memória"
elif attr == "operatingsystem":
title_chart = "Gráfico de Sistemas Operacionais"
else:
title_chart = "Gráfico de "+attr
if orgao is None:
orgao_nm = self.request.matchdict['nm_orgao']
else:
orgao_nm = orgao
nm_orgao = Utils.format_name(orgao_nm)
reports_config = config_reports.ConfReports(nm_orgao)
get_base = reports_config.get_attribute(attr)
results = get_base.results
data = []
list_of_numbers = []
data.append(['Item', 'Quantidade'])
# color_list = ["#8B0000", "#191970", "#2F4F4F", "#006400", "#808000",
# "#696969", "#B8860B", "#FF8C00", "#2E8B57", "#228B22"]
# chosen_color = 0
for elm in results:
if isinstance(elm, NullDocument):
continue
parent = getattr(elm, attr)
item = getattr(parent, attr + '_item')
amount = getattr(parent, attr + '_amount')
data.append([item, int(amount)])
list_of_numbers.append([int(amount)])
# Antigo código para o Charts JS
# data.append({"label": item, "data": int(amount), "color": color_list[chosen_color]})
# chosen_color += 1
# if chosen_color >= len(color_list):
# chosen_color = 0
# if attr == "software":
# max_num = Utils.getMaxOfList(list_of_numbers)
return {"data": data,
"usuario_autenticado": self.usuario_autenticado,
"title_chart": title_chart,
"orgao_nm": orgao_nm,
"attr": attr
}
def graficos_software(self, orgao=None, view_type = None):
attr = 'softwarelist'
title_chart = "Gráfico de Softwares"
if view_type is None:
view_type = self.request.matchdict['view_type']
if orgao is None:
orgao_nm = self.request.matchdict['nm_orgao']
else:
orgao_nm = orgao
nm_orgao = Utils.format_name(orgao_nm)
reports_config = config_reports.ConfReports(nm_orgao)
get_base = reports_config.get_attribute(attr)
results = get_base.results
data = []
list_of_numbers = []
data.append(['Item', 'Quantidade'])
# color_list = ["#8B0000", "#191970", "#2F4F4F", "#006400", "#808000",
# "#696969", "#B8860B", "#FF8C00", "#2E8B57", "#228B22"]
# chosen_color = 0
for elm in results:
if isinstance(elm, NullDocument):
continue
parent = getattr(elm, attr)
item = getattr(parent, attr + '_item')
amount = getattr(parent, attr + '_amount')
data.append([item, int(amount)])
list_of_numbers.append([int(amount)])
# Antigo código para o Charts JS
# data.append({"label": item, "data": int(amount), "color": color_list[chosen_color]})
# chosen_color += 1
# if chosen_color >= len(color_list):
# chosen_color = 0
if view_type == 'simple':
data_dict = dict()
data.pop(0)
for a in data:
data_dict[a[0]]= a[1]
data_dict = Utils.group_data(data_dict)
data=list()
data.append(['Item', 'Quantidade'])
for a in data_dict.keys():
data.append([a, int(data_dict[a])])
#if attr == "software":
#max_num = Utils.getMaxOfList(list_of_numbers)
return {"data": data,
"usuario_autenticado": self.usuario_autenticado,
"title_chart": title_chart,
"orgao_nm": orgao_nm,
"attr": attr
} |
Comes in 7 colours - Twinkle, Apple Green, Citrus Pop, Tinted Prune, Jazzy Green, Sticky Pink and Splashy Yellow.
Generous matte finish. Hangs from white polyester cord, with an adjustable sliding knot closure. |
class Preprocessor(object):
__tokenizer = None
__steps = []
def __init__(self, tokenizer, steps):
self.__tokenizer = tokenizer
self.__steps = steps
def processText(self, comment):
tokens = self.__tokenizer.tokenize(comment)
tokenTuple = [(token, position) for position, token in enumerate(tokens)]
###### which way is faster?
for step in self.__steps:
tokenTuple = step.processAll(tokenTuple)
######
"""
pTokenTuple = []
for token, position in tokenTuple:
pT = (token, position)
for step in self.__steps:
pT = step.process(pT)
if pT: pTokenTuple.append(pT)
tokenTuple = pTokenTuple
"""
###### which way is faster?
## doesn't make a huge difference:
"""
ncalls tottime percall cumtime percall filename:lineno(function)
first one:
81098 0.342 0.000 114.429 0.001 /vagrant/cse/lang/Preprocessor.py:12(processText)
81098 0.057 0.000 65.668 0.001 /vagrant/cse/lang/NltkStemmer.py:22(processAll)
compared to second one:
81098 2.708 0.000 119.330 0.001 /vagrant/cse/lang/Preprocessor.py:12(processText)
2955756 1.275 0.000 67.069 0.000 /vagrant/cse/lang/NltkStemmer.py:26(process)
--> only about 5 seconds difference in cumulative execution time for 81098 calls
"""
return tokenTuple
|
Heavy duty #42 netting, 1-3/4" x 1-3/4" mesh. Suitable for high impact. High density polyethylene, knotted mesh. Hung square. Easier to see through, easier to install. Rope border on two sides, hand stitched throughout. Black dye and UV inhibitors are internal. No dyes or chemicals to wash off or harm your pets. Resists fading and lasts longer. |
#!/usr/bin/env python
from __future__ import with_statement
import os
from setuptools import setup
readme = 'README.md'
if os.path.exists('README.rst'):
readme = 'README.rst'
with open(readme) as f:
long_description = f.read()
setup(
name='stacktracer',
version='0.1.2',
author='messense',
author_email='messense@icloud.com',
url='https://github.com/messense/stacktracer',
keywords='stack, tracer, multi-threaded, threading',
description='Stack tracer for multi-threaded applications',
long_description=long_description,
py_modules=['stacktracer'],
install_requires=[
'pygments',
],
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: Implementation :: CPython',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
|
A hired murderer with a destroyed past because of its complicated work, the arrival of his ex-wife again in your life will stagger their existence and his life on the line having to confront ghosts of past betrayals and many difficulties.
Body Count tá área!!! Director: Marco T. Alves Duration: 10'59" |
#
# This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2017 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
"""
The :mod:`gruvi.dbus` module implements a D-BUS client and server.
The implementation uses parts of the `txdbus
<https://github.com/cocagne/txdbus>`_ project. A cut down copy of txdbus,
containing only those parts needed by Gruvi, is available as ``gruvi.txdbus``.
You need this if you are providing a message handler (see below).
Both a client and a server/bus-side implementation are provided. The bus-side
implementation is very bare bones and apart from the "Hello" message it does
not implement any of the "org.freedestkop.DBus" interface. It also does not
implement any message routing. The server side is provided mostly for testing
purposes (but it could serve as the basis for a real D-BUS server).
The client side of a D-BUS connection is implemented by :class:`DbusClient` and
the server/bus-side by :class:`DbusServer`. Both implement a procedural
interface. Messages can be send using e.g. :meth:`DbusClient.send_message` or
:meth:`DbusClient.call_method`. An object-oriented interface that represents
D-BUS objects as Python objects, like the one txdbus provides, is currently not
available. The procedural interface can be used as a basis for your own
object-oriented interface though.
To receive notifications or to respond to method calls, you need to provide a
*message handler* to the client or the server constructor. The signature of the
message handler is: ``message_handler(message, protocol)``. Here, the *message*
argument is an instance of ``gruvi.txdbus.DbusMessages``, and the
*protocol* will be the :class:`DbusProtocol` instance for the current
connection.
Message handlers runs in their own fiber, which allows them to call into
switchpoints. There is one fiber for every connection.
Usage example::
client = gruvi.DbusClient()
client.connect('session')
result = client.call_method('org.freedesktop.DBus', '/org/freedesktop/DBus',
'org.freedesktop.DBus', 'ListNames')
for name in result[0]:
print('Name: {}'.format(name))
"""
from __future__ import absolute_import, print_function
import os
import struct
import binascii
import codecs
import functools
import six
import pyuv
from . import compat
from .hub import switchpoint, switch_back
from .util import delegate_method
from .sync import Event
from .transports import TransportError
from .protocols import ProtocolError, MessageProtocol
from .stream import Stream
from .endpoints import Client, Server
from .address import saddr
from .vendor import txdbus
__all__ = ['DbusError', 'DbusMethodCallError', 'DbusProtocol', 'DbusClient', 'DbusServer']
class DbusError(ProtocolError):
"""Exception that is raised in case of D-BUS protocol errors."""
class DbusMethodCallError(DbusError):
"""Exception that is raised when a error reply is received for a D-BUS
method call."""
def __init__(self, method, reply):
message = 'error calling {!r} method ({})'.format(method, reply.error_name)
super(DbusMethodCallError, self).__init__(message)
self._error = reply.error_name
self._args = tuple(reply.body) if reply.body else ()
@property
def error(self):
return self._error
@property
def args(self):
return self._args
def parse_dbus_address(address):
"""Parse a D-BUS address string into a list of addresses."""
if address == 'session':
address = os.environ.get('DBUS_SESSION_BUS_ADDRESS')
if not address:
raise ValueError('$DBUS_SESSION_BUS_ADDRESS not set')
elif address == 'system':
address = os.environ.get('DBUS_SYSTEM_BUS_ADDRESS',
'unix:path=/var/run/dbus/system_bus_socket')
addresses = []
for addr in address.split(';'):
p1 = addr.find(':')
if p1 == -1:
raise ValueError('illegal address string: {}'.format(addr))
kind = addr[:p1]
args = dict((kv.split('=') for kv in addr[p1+1:].split(',')))
if kind == 'unix':
if 'path' in args:
addr = args['path']
elif 'abstract' in args:
addr = '\0' + args['abstract']
else:
raise ValueError('require "path" or "abstract" for unix')
elif kind == 'tcp':
if 'host' not in args or 'port' not in args:
raise ValueError('require "host" and "port" for tcp')
addr = (args['host'], int(args['port']))
else:
raise ValueError('unknown transport: {}'.format(kind))
addresses.append(addr)
return addresses
class TxdbusAuthenticator(object):
"""A adapter to use the txdbus client and server authenticators with our
transports and protocols."""
# For testing, cookie_dir is set to a temporary path. Otherwise, txdbus
# uses ~/.dbus-keyrings as specified in the spec.
cookie_dir = None
def __init__(self, transport, server_side, server_guid=None):
self._transport = transport
self._server_side = server_side
if self._server_side:
self._authenticator = txdbus.BusAuthenticator(server_guid)
self._authenticator.authenticators['DBUS_COOKIE_SHA1'].keyring_dir = self.cookie_dir
else:
self._authenticator = txdbus.ClientAuthenticator()
self._authenticator.cookie_dir = self.cookie_dir
self._authenticator.beginAuthentication(self)
def sendAuthMessage(self, message):
# Called by the txdbus authenticators
message = message.encode('ascii') + b'\r\n'
self._transport.write(message)
@property
def _unix_creds(self):
# Used by txdbus.BusExternalAuthenticator
return self._transport.get_extra_info('unix_creds')
def handleAuthMessage(self, line):
# Called by our protocol
self._authenticator.handleAuthMessage(line)
def authenticationSucceeded(self):
"""Return whether the authentication succeeded."""
return self._authenticator.authenticationSucceeded()
def getMechanismName(self):
"""Return the authentication mechanism name."""
if self._server_side:
mech = self._authenticator.current_mech
return mech.getMechanismName() if mech else None
else:
return getattr(self._authenticator, 'authMech', None)
def getUserName(self):
"""Return the authenticated user name (server side)."""
if not self._server_side:
return
mech = self._authenticator.current_mech
return mech.getUserName() if mech else None
def getGUID(self):
"""Return the GUID of the authenticated server."""
return self._authenticator.getGUID()
def parse_dbus_header(header):
"""Parse a D-BUS header. Return the message size."""
if six.indexbytes(header, 0) == ord('l'):
endian = '<'
elif six.indexbytes(header, 0) == ord('B'):
endian = '>'
else:
raise ValueError('illegal endianness')
if not 1 <= six.indexbytes(header, 1) <= 4:
raise ValueError('illegel message type')
if struct.unpack(endian + 'I', header[8:12])[0] == 0:
raise ValueError('illegal serial number')
harrlen = struct.unpack(endian + 'I', header[12:16])[0]
padlen = (8 - harrlen) % 8
bodylen = struct.unpack(endian + 'I', header[4:8])[0]
return 16 + harrlen + padlen + bodylen
def new_server_guid():
"""Return a new GUID for a server."""
return binascii.hexlify(os.urandom(16)).decode('ascii')
class DbusProtocol(MessageProtocol):
"""D-BUS Protocol."""
# According to the D-BUS spec the max message size is 128MB. However since
# we want to limited memory usage we are much more conservative here.
max_message_size = 128*1024
# Maximum size for an authentication line
max_line_size = 1000
_next_unique_name = 0
S_CREDS_BYTE, S_AUTHENTICATE, S_MESSAGE_HEADER, S_MESSAGE = range(4)
def __init__(self, message_handler=None, server_side=False, server_guid=None, timeout=None):
super(DbusProtocol, self).__init__(message_handler, timeout=timeout)
self._server_side = server_side
self._name_acquired = Event()
self._buffer = bytearray()
self._method_calls = {}
self._authenticator = None
if self._server_side:
self._server_guid = server_guid or new_server_guid()
self._unique_name = ':{}'.format(self._next_unique_name)
type(self)._next_unique_name += 1
else:
self._server_guid = None
self._unique_name = None
self._state = None
@property
def server_guid(self):
return self._server_guid
def connection_made(self, transport):
# Protocol callback
super(DbusProtocol, self).connection_made(transport)
# The client initiates by sending a '\0' byte, as per the D-BUS spec.
if self._server_side:
self._state = self.S_CREDS_BYTE
else:
self._state = self.S_AUTHENTICATE
self._transport.write(b'\0')
self._writer = Stream(transport, 'w')
self._authenticator = TxdbusAuthenticator(transport, self._server_side, self._server_guid)
self._message_size = 0
def connection_lost(self, exc):
# Protocol callback
super(DbusProtocol, self).connection_lost(exc)
if self._error is None:
self._error = TransportError('connection lost')
for notify in self._method_calls.values():
if isinstance(notify, switch_back):
notify.throw(self._error)
self._method_calls.clear()
self._name_acquired.set()
self._authenticator = None # break cycle
def on_creds_byte(self, byte):
if byte != 0:
self._error = DbusError('first byte needs to be zero')
return False
self._state = self.S_AUTHENTICATE
return True
def on_partial_auth_line(self, line):
if len(line) > self.max_line_size:
self._error = DbusError('auth line too long ({} bytes)'.format(len(line)))
return False
return True
def on_auth_line(self, line):
if not self.on_partial_auth_line(line):
return False
if line[-2:] != b'\r\n':
self._error = DbusError('auth line does not end with \\r\\n')
return False
try:
line = codecs.decode(line[:-2], 'ascii') # codecs.decode allows memoryview
except UnicodeDecodeError as e:
self._error = DbusError('auth line contain non-ascii chars')
return False
try:
self._authenticator.handleAuthMessage(line)
except txdbus.DBusAuthenticationFailed as e:
self._error = DbusError('authentication failed: {!s}'.format(e))
return False
if self._authenticator.authenticationSucceeded():
if not self._server_side:
message = txdbus.MethodCallMessage('/org/freedesktop/DBus', 'Hello',
'org.freedesktop.DBus', 'org.freedesktop.DBus')
self._transport.write(message.rawMessage)
self._method_calls[message.serial] = self.on_hello_response
self._state = self.S_MESSAGE_HEADER
self._server_guid = self._authenticator.getGUID()
return True
def on_hello_response(self, message):
self._unique_name = message.body[0]
self._name_acquired.set()
def on_message_header(self, header):
try:
size = parse_dbus_header(header)
except ValueError:
self._error = DbusError('invalid message header')
return False
if size > self.max_message_size:
self._error = DbusError('message too large ({} bytes)'.format(size))
return False
self._message_size = size
self._state = self.S_MESSAGE
return True
def on_message(self, message):
try:
parsed = txdbus.parseMessage(message)
except (txdbus.MarshallingError, struct.error) as e:
self._error = DbusError('parseMessage() error: {!s}'.format(e))
return False
if self._server_side and not self._name_acquired.is_set():
if isinstance(parsed, txdbus.MethodCallMessage) \
and parsed.member == 'Hello' \
and parsed.path == '/org/freedesktop/DBus' \
and parsed.interface == 'org.freedesktop.DBus' \
and parsed.destination == 'org.freedesktop.DBus':
response = txdbus.MethodReturnMessage(parsed.serial, signature='s',
body=[self._unique_name])
self._name_acquired.set()
self._transport.write(response.rawMessage)
else:
self._error = DbusError('Hello method not called')
return False
elif isinstance(parsed, (txdbus.MethodReturnMessage, txdbus.ErrorMessage)) \
and getattr(parsed, 'reply_serial', 0) in self._method_calls:
notify = self._method_calls.pop(parsed.reply_serial)
notify(parsed)
elif self._dispatcher:
self._queue.put_nowait(parsed)
else:
mtype = type(parsed).__name__[:-7].lower()
info = ' {!r}'.format(getattr(parsed, 'member', getattr(parsed, 'error_name', '')))
self._log.warning('no handler, ignoring inbound {}{}', mtype, info)
self._state = self.S_MESSAGE_HEADER
return True
def prepend_buffer(self, buf):
if self._buffer:
self._buffer.extend(buf)
buf = self._buffer
self._buffer = bytearray()
return memoryview(buf)
def data_received(self, data):
view = memoryview(data)
offset = 0
while offset != len(data):
if self._state == self.S_CREDS_BYTE:
credsbyte = six.indexbytes(view, offset)
offset += 1
if not self.on_creds_byte(credsbyte):
break
if self._state == self.S_AUTHENTICATE:
pos = data.find(b'\n', offset)
if pos == -1:
self._buffer.extend(view[offset:])
self.on_partial_auth_line(self._buffer)
break
line = self.prepend_buffer(view[offset:pos+1])
offset = pos+1
if not self.on_auth_line(line):
break
if self._state == self.S_MESSAGE_HEADER:
needbytes = 16 - len(self._buffer)
if len(data) - offset < needbytes:
self._buffer.extend(view[offset:])
break
header = self.prepend_buffer(view[offset:offset+needbytes])
if not self.on_message_header(header):
break
offset += len(header)
self._buffer.extend(header)
if self._state == self.S_MESSAGE:
needbytes = self._message_size - len(self._buffer)
if len(data) - offset < needbytes:
self._buffer.extend(view[offset:])
break
message = self.prepend_buffer(view[offset:offset+needbytes])
offset += needbytes
if not self.on_message(message):
break
self._maybe_pause_transport()
if self._error:
self._transport.close()
return
@switchpoint
def get_unique_name(self):
"""Return the unique name of the D-BUS connection."""
self._name_acquired.wait()
if self._error:
raise compat.saved_exc(self._error)
elif self._transport is None:
raise DbusError('not connected')
return self._unique_name
@switchpoint
def send_message(self, message):
"""Send a D-BUS message.
The *message* argument must be ``gruvi.txdbus.DbusMessage`` instance.
"""
if not isinstance(message, txdbus.DbusMessage):
raise TypeError('message: expecting DbusMessage instance (got {!r})',
type(message).__name__)
self._name_acquired.wait()
if self._error:
raise compat.saved_exc(self._error)
elif self._transport is None:
raise DbusError('not connected')
self._writer.write(message.rawMessage)
@switchpoint
def call_method(self, service, path, interface, method, signature=None,
args=None, no_reply=False, auto_start=False, timeout=-1):
"""Call a D-BUS method and wait for its reply.
This method calls the D-BUS method with name *method* that resides on
the object at bus address *service*, at path *path*, on interface
*interface*.
The *signature* and *args* are optional arguments that can be used to
add parameters to the method call. The signature is a D-BUS signature
string, while *args* must be a sequence of python types that can be
converted into the types specified by the signature. See the `D-BUS
specification
<http://dbus.freedesktop.org/doc/dbus-specification.html>`_ for a
reference on signature strings.
The flags *no_reply* and *auto_start* control the NO_REPLY_EXPECTED and
NO_AUTO_START flags on the D-BUS message.
The return value is the result of the D-BUS method call. This will be a
possibly empty sequence of values.
"""
message = txdbus.MethodCallMessage(path, method, interface=interface,
destination=service, signature=signature, body=args,
expectReply=not no_reply, autoStart=auto_start)
serial = message.serial
if timeout == -1:
timeout = self._timeout
try:
with switch_back(timeout) as switcher:
self._method_calls[serial] = switcher
self.send_message(message)
args, _ = self._hub.switch()
finally:
self._method_calls.pop(serial, None)
response = args[0]
assert response.reply_serial == serial
if isinstance(response, txdbus.ErrorMessage):
raise DbusMethodCallError(method, response)
args = tuple(response.body) if response.body else ()
return args
class DbusClient(Client):
"""A D-BUS client."""
def __init__(self, message_handler=None, timeout=30):
"""
The *message_handler* argument specifies an optional message handler.
The optional *timeout* argument specifies a default timeout for
protocol operations in seconds.
"""
protocol_factory = functools.partial(DbusProtocol, message_handler)
super(DbusClient, self).__init__(protocol_factory, timeout)
@switchpoint
def connect(self, address='session'):
"""Connect to *address* and wait until the connection is established.
The *address* argument must be a D-BUS server address, in the format
described in the D-BUS specification. It may also be one of the special
addresses ``'session'`` or ``'system'``, to connect to the D-BUS
session and system bus, respectively.
"""
if isinstance(address, six.string_types):
addresses = parse_dbus_address(address)
else:
addresses = [address]
for addr in addresses:
try:
super(DbusClient, self).connect(addr)
except pyuv.error.UVError:
continue
break
else:
raise DbusError('could not connect to any address')
# Wait for authentication to complete
self.get_unique_name()
protocol = Client.protocol
delegate_method(protocol, DbusProtocol.get_unique_name)
delegate_method(protocol, DbusProtocol.send_message)
delegate_method(protocol, DbusProtocol.call_method)
class DbusServer(Server):
"""A D-BUS server."""
def __init__(self, message_handler, timeout=30):
"""
The *message_handler* argument specifies the message handler.
The optional *timeout* argument specifies a default timeout for
protocol operations in seconds.
"""
protocol_factory = functools.partial(DbusProtocol, message_handler,
server_side=True)
super(DbusServer, self).__init__(protocol_factory, timeout)
@switchpoint
def listen(self, address='session'):
"""Start listening on *address* for new connection.
The *address* argument must be a D-BUS server address, in the format
described in the D-BUS specification. It may also be one of the special
addresses ``'session'`` or ``'system'``, to connect to the D-BUS
session and system bus, respectively.
"""
if isinstance(address, six.string_types):
addresses = parse_dbus_address(address)
else:
addresses = [address]
for addr in addresses:
try:
super(DbusServer, self).listen(addr)
except pyuv.error.UVError:
self._log.error('skipping address {}', saddr(addr))
|
We are a national staffing company with new opportunities at some of our offices around the country.
The staffing industry generated $144 billion in revenue last year and is growing due to the national shortage of labor.
We have great professional opportunities for Account Executives and Office Managers to help grow our company.
We offer competitive salary and benefits for our workers.
We follow all EEOC laws and policies regarding our hiring process.
If you have a competitive personality and are looking for a career that will leverage your drive please contact us.
Professionals only - good communication skills, respectful, dependable and flexible (we work in the people business). |
from sys import argv
from sys import stdout
from sys import stderr
import logging
from argparse import ArgumentParser
parser = ArgumentParser("Parallel Cross Power Spectrum Calculator",
description=
"""Calculating cross matter power spectrum from two RunPB input files.
Output is written to stdout, in Mpc/h units.
PowerSpectrum is the true one, without (2 pi) ** 3 factor. (differ from Gadget/NGenIC internal)
""",
epilog=
"""
This script is written by Yu Feng, as part of `nbodykit'.
The author would like thank Marcel Schmittfull for the explanation on cic, shotnoise, and k==0 plane errors.
"""
)
parser.add_argument("filename1",
help='basename of the input, only runpb format is supported in this script')
parser.add_argument("filename2",
help='basename of the input, only runpb format is supported in this script')
parser.add_argument("BoxSize", type=float,
help='BoxSize in Mpc/h')
parser.add_argument("Nmesh", type=int,
help='size of calculation mesh, recommend 2 * Ngrid')
parser.add_argument("output", help='write power to this file')
parser.add_argument("--binshift", type=float, default=0.0,
help='Shift the bin center by this fraction of the bin width. Default is 0.0. Marcel uses 0.5. this shall rarely be changed.' )
parser.add_argument("--bunchsize", type=int, default=1024*1024*4,
help='Number of particles to read per rank. A larger number usually means faster IO, but less memory for the FFT mesh')
parser.add_argument("--remove-cic", default='anisotropic', choices=["anisotropic","isotropic", "none"],
help='deconvolve cic, anisotropic is the proper way, see http://www.personal.psu.edu/duj13/dissertation/djeong_diss.pdf')
ns = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
import numpy
import nbodykit
from nbodykit.files import TPMSnapshotFile, read
from nbodykit.measurepower import measurepower
from pypm.particlemesh import ParticleMesh
from pypm.transfer import TransferFunction
from mpi4py import MPI
def paint_darkmatter(pm, filename, fileformat):
pm.real[:] = 0
Ntot = 0
for round, P in enumerate(read(pm.comm, filename, TPMSnapshotFile,
columns=['Position'], bunchsize=ns.bunchsize)):
P['Position'] *= ns.BoxSize
layout = pm.decompose(P['Position'])
tpos = layout.exchange(P['Position'])
#print tpos.shape
pm.paint(tpos)
npaint = pm.comm.allreduce(len(tpos), op=MPI.SUM)
nread = pm.comm.allreduce(len(P['Position']), op=MPI.SUM)
if pm.comm.rank == 0:
logging.info('round %d, npaint %d, nread %d' % (round, npaint, nread))
Ntot = Ntot + nread
return Ntot
def main():
if MPI.COMM_WORLD.rank == 0:
print 'importing done'
pm = ParticleMesh(ns.BoxSize, ns.Nmesh, dtype='f4')
Ntot = paint_darkmatter(pm, ns.filename1, TPMSnapshotFile)
if MPI.COMM_WORLD.rank == 0:
print 'painting done'
pm.r2c()
if MPI.COMM_WORLD.rank == 0:
print 'r2c done'
complex = pm.complex.copy()
numpy.conjugate(complex, out=complex)
Ntot = paint_darkmatter(pm, ns.filename2, TPMSnapshotFile)
if MPI.COMM_WORLD.rank == 0:
print 'painting 2 done'
pm.r2c()
if MPI.COMM_WORLD.rank == 0:
print 'r2c 2 done'
complex *= pm.complex
complex **= 0.5
if MPI.COMM_WORLD.rank == 0:
print 'cross done'
k, p = measurepower(pm, complex, ns.binshift, ns.remove_cic, 0)
if MPI.COMM_WORLD.rank == 0:
print 'measure'
if pm.comm.rank == 0:
if ns.output != '-':
myout = open(ns.output, 'w')
else:
myout = stdout
numpy.savetxt(myout, zip(k, p), '%0.7g')
myout.flush()
main()
|
LET YOUR MIND RUN by Deena Kastor with Michelle Hamilton wins outstanding memoir at 2019 American Society of Journalist & Authors annual awards!
Ingrid Fetell Lee, author of JOYFUL, tours New York’s Color Factory with its designer, Erin Jang, and shares thoughts on what color means to society today.
InkWell Management is excited to announce that Paramount Television and Anonymous Content have acquired the TV rights to Susan Orlean’s best-seller The Library Book.
A HAWK IN THE WOODS by Carrie Laben is now available--ask for it at your favorite bookstore! |
#!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.spread import pb
from twisted.internet import reactor
def main():
rootobj_def = pb.getObjectAt("localhost", 8800, 30)
rootobj_def.addCallbacks(got_rootobj)
obj2_def = getSomeObjectAt("localhost", 8800, 30, "two")
obj2_def.addCallbacks(got_obj2)
obj3_def = getSomeObjectAt("localhost", 8800, 30, "three")
obj3_def.addCallbacks(got_obj3)
reactor.run()
def got_rootobj(rootobj):
print "got root object:", rootobj
print "telling root object to do foo(A)"
rootobj.callRemote("foo", "A")
def got_obj2(obj2):
print "got second object:", obj2
print "telling second object to do foo(B)"
obj2.callRemote("foo", "B")
def got_obj3(obj3):
print "got third object:", obj3
print "telling third object to do foo(C)"
obj3.callRemote("foo", "C")
class my_ObjectRetrieval(pb._ObjectRetrieval):
def __init__(self, broker, d, objname):
pb._ObjectRetrieval.__init__(self, broker, d)
self.objname = objname
def connectionMade(self):
assert not self.term, "How did this get called?"
x = self.broker.remoteForName(self.objname)
del self.broker
self.term = 1
self.deferred.callback(x)
def getSomeObjectAt(host, port, timeout=None, objname="root"):
from twisted.internet import defer
from twisted.spread.pb import Broker, BrokerClientFactory
d = defer.Deferred()
b = Broker(1)
bf = BrokerClientFactory(b)
my_ObjectRetrieval(b, d, objname)
if host == "unix":
# every time you use this, God kills a kitten
reactor.connectUNIX(port, bf, timeout)
else:
reactor.connectTCP(host, port, bf, timeout)
return d
main()
|
BARCELONA, Spain -- The Dream Team-Croatia gold-medal basketball game became one of the year's best international hustling events.
The parking lots outside the arena were full last night of ticket searchers and ticket scalpers.
Undercover policemen made arrests, and the "honest" scalpers were talking about the low morals of the "crooked" ones.
Tickets prices ranged from as high as $750 a pair three hours before the game to as low as $125 for one just minutes before tip-off.
Face value: $95 a ticket.
Highest offer heard: $1,000 a pair three days before the game.
Worst lie told: $1,000 plus your first-born child for a ticket.
"This is a hustler's paradise," said David Apathy, 40, a native of Los Angeles who is living in Hungary and was outside the arena looking for a ticket.
"But there is one universal language everyone understands. 'Hey mon, you got a ticket?' "
Greeks were selling to the French. The Hungarians were buying from the Spaniards. And the New Yorkers were hustling the Californians.
"Let me tell you something," said Mario Whitmire, a travel agent and professional scalper from Dallas. "The world's corporate monsters got all the tickets for this game and didn't use them. Somehow they filtered down to us, the scalpers, and we're selling them to the average guy who couldn't get his hands on one."
Whitmire says he was out scalping to cut down on traveling costs. He brought his wife and three kids to Barcelona.
"All the scalpers know each other," he said. "We're all at the big events. Look me up if you need tickets for the Super Bowl. I'll be staying at the Hyatt."
But the Super Bowl is low rent compared to this event.
"We have seen Magic, we have touched Magic, and he is magic," said Fernando Montero, 14, of Barcelona, who was outside the arena last night. "They're good. Everyone wants to see them because they are the best in the world. I love Michael Jordan, too."
John Grayson, of Michigan, said: "I went to the same high school as Magic Johnson. I'm living in England now for a short while, but I came here to watch Magic play one more time. It may be his last game."
Even young kids were outside trying to buy tickets.
Mark Ortego, 14, said he was from Carey, N.C. He also said he came to Barcelona with a group called Encounter for Christ.
He was a very good hustler.
"Fifty dollars max, that's all I've got," said Ortega. "This price has worked to get me into the gymnastics and swimming finals. I once got into a swimming event by giving the security guard an American pin."
When asked where his father was, Ortego replied: "On the other side of the stadium trying to hustle himself a ticket."
Bernard Nouvelett is a volunteer worker at the Olympic Games. He says he gets complimentary tickets, but no money to pay for housing. So, he sells his tickets.
The bottom line is a making and, perhaps, saving a dollar.
"They know the average person can't afford to pay $5,000 a year in advance for Olympic tickets," said Kenny Bria, 28, a student at San Diego State who bought a ticket for $125 last night, and then celebrated by buying a large glass of beer.
"The average person takes a chance, comes here with no tickets and hustles to get inside. Hey, I've watched Carl Lewis run and John Smith wrestle. It only cost me $25. I'm broke now, but I've already seen two of the world's best athletes. Now I'm going to see the world's best basketball team."
But it wasn't all fun, games and ticket sales outside.
Barcelona's police department had young undercover agents arresting scalpers and a number of counterfeit tickets were being sold.
"How do you like that?" said Whitmire, the professional. "They shouldn't be ripping people off." |
#!/usr/bin/env python
#coding: utf-8
import sys
import time
import rospy
import json
from std_msgs.msg import String, Empty, Float64
from mutualModelling.agent2 import Agent
# this node udate models of agents (by the robot) and publishes the choice of action by the robot:
#-------------------------------------------------------------------------------------------------
pub_robot_action = rospy.Publisher('robot_action_topic', String, queue_size=1)
# create a mutual modeller agent "robot" that also model an agent "human" in cowriter:
#-------------------------------------------------------------------------------------
ROBOT_NAME = "Mimi"
HUMAN_NAME = "Child"
ALL_NAMES = [ROBOT_NAME, HUMAN_NAME]
robot_percepts = ["child_progress","reward","punish","justified_reward","justified_punish","justified_new_word","with_me"]
robot_actions = ["converges","diverges","exaggerates","looks_tablet","looks_child_head","looks_out","looks_experimentator","looks_selection_tablet","points_tablet"]
robot_rewards = [["justified_reward",1.,1.],["justified_punish",1.,1],["with_me",1.,1.],["with_me",-1.,-1.],["child_progress",1.,1.],["justified_new_word",1.,1.]]
#robot_instincts = [[HUMAN_NAME+"_looks_robot_head",1.,"looks_child_head"],[HUMAN_NAME+"_looks_robot_head",1.,"looks_tablet"], [HUMAN_NAME+"_looks_tablet",1.,"looks_child_head"],[HUMAN_NAME+"_looks_tablet",1.,"looks_tablet"], [HUMAN_NAME+"_looks_noise",1.,"looks_child_head"],[HUMAN_NAME+"_looks_noise",1.,"mimics"], [HUMAN_NAME+"_looks_selection_tablet",1.,"looks_selection_tablet"], [HUMAN_NAME+"_looks_experimentator",1.,"looks_experimentator"]]
#robot = Agent(ROBOT_NAME,ALL_NAMES,robot_percepts,robot_actions,robot_rewards,robot_instincts)
robot = Agent(ROBOT_NAME,ALL_NAMES,robot_percepts,robot_actions,robot_rewards)
# the point of attention of the human is used to define what action of the robot is observed by the child:
#---------------------------------------------------------------------------------------------------------
objects = {"experimentator","selection_tablet","tablet","robot_head","out"}
human_attention = ""
# what the human can perceive about robot actions given his point of attention:
visible_for_human_from = {"tablet":["converges","diverges"], "robot_head":["looks_tablet","looks_child_head","looks_out","points_tablet","looks_experimentator"]}
# what the robot is expected to perceive about human action given robot's attention:
# (the robot is not expected (by the child) to differentiate justified/unjustified behavior of the child)
visible_for_robot_from = {"tablet":["punishes","rewards","sends_demo"],"selection_tablet":["sends_new_word"], "child_head":["looks_tablet","looks_robot_head","looks_out","looks_experimentator"]}
# when an agent do/observe something the mutual models (by the robot) are updated:
#---------------------------------------------------------------------------------
models_percepts = {}
models_actions = {}
human_target = "_"
robot_target = "_"
last_info = ""
def onChangeRobotTarget(msg):
global robot_target
robot_target = str(msg.data)
def onChangeHumanTarget(msg):
global human_target
human_target = str(msg.data)
def onChangeHumanWMN(msg):
global last_info
delta_wmn = msg.data
if last_info!=str(delta_wmn):
models_percepts.setdefault(ROBOT_NAME,[]).append(("with_me",delta_wmn))
makeDecision()
last_info = str(delta_wmn)
def onRobotAction(msg):
global models_actions
global models_percepts
global last_info
action = str(msg.data)
if last_info!=action:
if human_target in visible_for_human_from:
if action in visible_for_human_from[human_target]:
models_actions[HUMAN_NAME+':'+ROBOT_NAME] = action
models_percepts.setdefault(HUMAN_NAME,[]).append((ROBOT_NAME+"_"+action,1.))
rospy.loginfo(ROBOT_NAME+"_"+action)
rospy.loginfo(".........................................")
makeDecision()
last_info=action
def onHumanAction(msg):
global models_actions
global models_percepts
global last_info
action = str(msg.data)
if last_info!=action:
models_actions[HUMAN_NAME] = action
models_percepts.setdefault(ROBOT_NAME,[]).append((HUMAN_NAME+'_'+action,1.))
rospy.loginfo(HUMAN_NAME+'_'+action)
rospy.loginfo("////////////////////////////////////////")
if robot_target in visible_for_robot_from:
if action in visible_for_robot_from[robot_target]:
models_percepts.setdefault(HUMAN_NAME+':'+ROBOT_NAME,[]).append((HUMAN_NAME+"_"+action,1.))
makeDecision()
last_info=action
def makeDecision():
global robot
global models_actions
global models_percepts
new_robot_action = None
if models_actions:
new_robot_action = robot.update_models(None,models_percepts,models_actions)
rospy.loginfo(models_percepts)
rospy.loginfo(models_actions)
#rospy.loginfo(test)
rospy.loginfo("----------------------------------------")
if new_robot_action:
msg = String()
msg.data = new_robot_action
pub_robot_action.publish(msg)
models_percepts = {}
models_actions = {}
rospy.sleep(1.0)
# TODO:
"""
def onRobotObs(msg):
def onHumanObs(msg):
"""
if __name__=='__main__':
rospy.init_node("cowriter_mutual_modelling")
while(True):
rospy.Subscriber('robot_action_topic', String, onRobotAction )
rospy.Subscriber('human_action_topic', String, onHumanAction)
rospy.Subscriber('robot_target_topic', String, onChangeRobotTarget)
rospy.Subscriber('human_target_topic', String, onChangeHumanTarget)
rospy.Subscriber('human_wmn_topic', Float64, onChangeHumanWMN)
#rospy.Subscriber('robot_obs_topic', String, onRobotObs)
#rospy.Subscriber('human_obs_topic', String, onHumanObs)
rospy.sleep(1.0)
rospy.spin()
|
added product There are 0 items in your cart. There is 1 item in your cart.
REstore is a slow design shop and digital craftsmanship.
Each luminaire is made according to these concepts, from recycled or sustainable materials or simply cut and assembled digitally then finished by craftsmen. |
from markdown import markdown
from django import template
from django.utils.safestring import mark_safe
register = template.Library()
#
# Filters
#
@register.filter()
def oneline(value):
"""
Replace each line break with a single space
"""
return value.replace('\n', ' ')
@register.filter()
def getlist(value, arg):
"""
Return all values of a QueryDict key
"""
return value.getlist(arg)
@register.filter(is_safe=True)
def gfm(value):
"""
Render text as GitHub-Flavored Markdown
"""
html = markdown(value, extensions=['mdx_gfm'])
return mark_safe(html)
@register.filter()
def startswith(value, arg):
"""
Test whether a string starts with the given argument
"""
return str(value).startswith(arg)
@register.filter()
def user_can_add(model, user):
perm_name = '{}:add_{}'.format(model._meta.app_label, model.__class__.__name__.lower())
return user.has_perm(perm_name)
@register.filter()
def user_can_change(model, user):
perm_name = '{}:change_{}'.format(model._meta.app_label, model.__class__.__name__.lower())
return user.has_perm(perm_name)
@register.filter()
def user_can_delete(model, user):
perm_name = '{}:delete_{}'.format(model._meta.app_label, model.__class__.__name__.lower())
return user.has_perm(perm_name)
#
# Tags
#
@register.simple_tag()
def querystring_toggle(request, multi=True, page_key='page', **kwargs):
"""
Add or remove a parameter in the HTTP GET query string
"""
new_querydict = request.GET.copy()
# Remove page number from querystring
try:
new_querydict.pop(page_key)
except KeyError:
pass
# Add/toggle parameters
for k, v in kwargs.items():
values = new_querydict.getlist(k)
if k in new_querydict and v in values:
values.remove(v)
new_querydict.setlist(k, values)
elif not multi:
new_querydict[k] = v
else:
new_querydict.update({k: v})
querystring = new_querydict.urlencode()
if querystring:
return '?' + querystring
else:
return ''
@register.inclusion_tag('utilities/templatetags/utilization_graph.html')
def utilization_graph(utilization, warning_threshold=75, danger_threshold=90):
"""
Display a horizontal bar graph indicating a percentage of utilization.
"""
return {
'utilization': utilization,
'warning_threshold': warning_threshold,
'danger_threshold': danger_threshold,
}
|
Adonis Symbiote, PBX/VOIP server (code named “Hermes”) is designed to bring low cost VOIP to every business. Fully compatible with all forms of SIP, DAHDi, and IAX2 devices, Symbiote Hermes allows for out of the box VOIP setup as well as ATA connection to convert your old PSTN (such as fax machine) to VOIP. Features such as voicemail, user control panel, conference call, Inward access, IVR (interactive Voice response), and Operator panel as just some of the standard features, Symbiote Hermes offers a complete solution to your companies VOIP needs.
Features such as voicemail, user control panel, conference call, Inward access, IVR (interactive Voice response), and Operator panel as just some of the standard features, Symbiote Hermes offers a complete solution to your companies VOIP needs.
Voicemail, IVR auto attendant, FAX, Video call support, Inbound/Outbound call route control, up to 1000 extensions, 80 concurrent calls, call recording, DDNS, NAT traversal, TLS security (optional), QOS, Firewall built in, operator mode, User control panel, connect to any SIP or IAX compatible client, ATA adapter etc.
Optional: ATA adapter required for PSTN access - see our ATA products.
Larger products can be designed for larger firms and call centers.
We recommend adding the PBX to a port forward DMZ for remote workers.
Larger servers available for greater capacity needs, NO per user licensing.
Paid codecs are also available, as are speex, opus, siren, silk, adpcm, g723, g726, and others. |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
"""Variational autoencoder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import app
from absl import flags
import tensorflow as tf
from lib import data, layers, train, utils, classifiers
FLAGS = flags.FLAGS
class VAE(train.AE):
def model(self, latent, depth, scales, beta):
x = tf.placeholder(tf.float32,
[None, self.height, self.width, self.colors], 'x')
l = tf.placeholder(tf.float32, [None, self.nclass], 'label')
h = tf.placeholder(
tf.float32,
[None, self.height >> scales, self.width >> scales, latent], 'h')
def encoder(x):
return layers.encoder(x, scales, depth, latent, 'ae_enc')
def decoder(h):
return layers.decoder(h, scales, depth, self.colors, 'ae_dec')
encode = encoder(x)
with tf.variable_scope('ae_latent'):
encode_shape = tf.shape(encode)
encode_flat = tf.layers.flatten(encode)
latent_dim = encode_flat.get_shape()[-1]
q_mu = tf.layers.dense(encode_flat, latent_dim)
log_q_sigma_sq = tf.layers.dense(encode_flat, latent_dim)
q_sigma = tf.sqrt(tf.exp(log_q_sigma_sq))
q_z = tf.distributions.Normal(loc=q_mu, scale=q_sigma)
q_z_sample = q_z.sample()
q_z_sample_reshaped = tf.reshape(q_z_sample, encode_shape)
p_x_given_z_logits = decoder(q_z_sample_reshaped)
p_x_given_z = tf.distributions.Bernoulli(logits=p_x_given_z_logits)
ae = 2*tf.nn.sigmoid(p_x_given_z_logits) - 1
decode = 2*tf.nn.sigmoid(decoder(h)) - 1
loss_kl = 0.5*tf.reduce_sum(
-log_q_sigma_sq - 1 + tf.exp(log_q_sigma_sq) + q_mu**2)
loss_kl = loss_kl/tf.to_float(tf.shape(x)[0])
x_bernoulli = 0.5*(x + 1)
loss_ll = tf.reduce_sum(p_x_given_z.log_prob(x_bernoulli))
loss_ll = loss_ll/tf.to_float(tf.shape(x)[0])
elbo = loss_ll - beta*loss_kl
utils.HookReport.log_tensor(loss_kl, 'loss_kl')
utils.HookReport.log_tensor(loss_ll, 'loss_ll')
utils.HookReport.log_tensor(elbo, 'elbo')
xops = classifiers.single_layer_classifier(
tf.stop_gradient(encode), l, self.nclass)
xloss = tf.reduce_mean(xops.loss)
utils.HookReport.log_tensor(xloss, 'classify_latent')
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
ae_vars = tf.global_variables('ae_')
xl_vars = tf.global_variables('single_layer_classifier')
with tf.control_dependencies(update_ops):
train_ae = tf.train.AdamOptimizer(FLAGS.lr).minimize(
-elbo, var_list=ae_vars)
train_xl = tf.train.AdamOptimizer(FLAGS.lr).minimize(
xloss, tf.train.get_global_step(), var_list=xl_vars)
ops = train.AEOps(x, h, l, q_z_sample_reshaped, decode, ae,
tf.group(train_ae, train_xl),
classify_latent=xops.output)
n_interpolations = 16
n_images_per_interpolation = 16
def gen_images():
return self.make_sample_grid_and_save(
ops, interpolation=n_interpolations,
height=n_images_per_interpolation)
recon, inter, slerp, samples = tf.py_func(
gen_images, [], [tf.float32]*4)
tf.summary.image('reconstruction', tf.expand_dims(recon, 0))
tf.summary.image('interpolation', tf.expand_dims(inter, 0))
tf.summary.image('slerp', tf.expand_dims(slerp, 0))
tf.summary.image('samples', tf.expand_dims(samples, 0))
return ops
def main(argv):
del argv # Unused.
batch = FLAGS.batch
dataset = data.get_dataset(FLAGS.dataset, dict(batch_size=batch))
scales = int(round(math.log(dataset.width // FLAGS.latent_width, 2)))
model = VAE(
dataset,
FLAGS.train_dir,
latent=FLAGS.latent,
depth=FLAGS.depth,
scales=scales,
beta=FLAGS.beta)
model.train()
if __name__ == '__main__':
flags.DEFINE_integer('depth', 64, 'Depth of first for convolution.')
flags.DEFINE_integer(
'latent', 16,
'Latent space depth, the total latent size is the depth multiplied by '
'latent_width ** 2.')
flags.DEFINE_integer('latent_width', 4, 'Width of the latent space.')
flags.DEFINE_float('beta', 1.0, 'ELBO KL term scale.')
app.run(main)
|
One percocet is stronger than (2) T3s. Is it safe.
What is a youth? Impetuous fire. What is a maid? Ice and desire. The world wags on. A rose will bloom. It then will fade. So does a youth. So does the fairest. The soundtrack for the film Romeo and Juliet was composed and conducted by Nino Rota. It was originally released as a vinyl record, containing nine entries, most notably the song "What Is a Youth", composed by Nino Rota, written by. What is a youth? Impetuous fire. What is a maid? Ice and desire. The world wags on. A rose will bloom, It then will fade. So does a youth. So does the fairest.
Original lyrics of What Is A Youth song by Nino Rota. Explore 2 meanings and explanations or write yours. Find more of Nino Rota lyrics. Watch official video. So does the youth. So does the fairest maid. Comes the time when one sweet smile. has it's season for awhile. Then love's in love with me. What Is a Youth by Nino Rota feat. Glen Weston - discover this song's samples, covers and remixes on WhoSampled.
F#m What is a maid? Em Ice and desire. Bm The world wags on F#m A rose will bloom, G it then will fade Em F#m So does the youth. Bm F#m. |
#!/usr/bin/env python
# Copyright (c) 2012, Nick Harvey
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import Matrix, random, copy, sys
class ga:
def __init__(self):
self.population = []
self.coeff_size = 0
def seedGA(self, size, inSize, hiddenSize, outSize):
'Creates a new set of matrices for the neural network'
self.size = size
self.inSize = inSize
self.hiddenSize = hiddenSize
self.outSize = outSize
self.coeff_size = (inSize * hiddenSize) + hiddenSize + (hiddenSize * outSize) + outSize
for i in range(size):
matrices = {}
matrices["first_weights"] = self.getWeights(inSize, hiddenSize)
matrices["first_thresholds"] = self.getThresholds(hiddenSize)
matrices["second_weights"] = self.getWeights(hiddenSize, outSize)
matrices["second_thresholds"] = self.getThresholds(outSize)
self.population.append(matrices)
def getInputSize(self):
return self.inSize
def getHiddenSize(self):
return self.hiddenSize
def getOutputSize(self):
return self.outSize
def setGA2(self, population):
self.population = population
self.size = len(self.population)
def setGA(self, population):
self.population = population
self.size = len(self.population)
temp = population[0]
self.inSize = temp["first_weights"].getWidth()
self.hiddenSize = temp["first_thresholds"].getHeight()
self.outSize = temp["second_weights"].getHeight()
self.coeff_size = (self.inSize * self.hiddenSize) + self.hiddenSize + (self.hiddenSize * self.outSize) + self.outSize
def getWeights(self, width, height):
m = Matrix.Matrix()
m.setWidth(width)
m.setHeight(height)
m.randomFill(0, 1)
return m
def getThresholds(self, height):
m = Matrix.Matrix()
m.setHeight(height)
m.randomFill(0, 2)
return m
def getPopulation(self):
return self.population
def mateParents(self, firstParentId, secondParentId, intest=False):
'''Takes the ids of two chromozones, picks a crossover point and then
takes the first part of chromozone one, before the crossover location
and joins it with the second part of chromozone two, after the crossover
location. It then does the opposite with the first part of chromozone two
and the second part of chromozone one.'''
parent1 = self.population[firstParentId]
parent2 = self.population[secondParentId]
selector = int(random.random() * self.coeff_size)
children = [parent1.copy(), parent2.copy()]
crossover_index = 0
if intest:
print("Mating parents:", selector)
# print("selector:", selector, self.coeff_size)
if selector < (self.inSize * self.hiddenSize):
selector_row = int(selector/self.inSize)
selector_col = selector % self.inSize
crossover_loc = "first_weights"
crossover_index = 0
# print("Crossing first weights", selector, selector_row, selector_col)
else:
# child["first_weights"] = parent1["first_weights"].copy()
selector = selector - (self.inSize * self.hiddenSize)
if selector < self.hiddenSize:
selector_row = selector
selector_col = 0
crossover_loc = "first_thresholds"
crossover_index = 1
# print("Crossing first thresholds", selector, selector_row, selector_col)
else:
# child["first_thresholds"] = parent1["first_thresholds"].copy()
selector = selector - (self.hiddenSize)
if selector < (self.hiddenSize * self.outSize):
selector_row = int(selector/self.hiddenSize)
selector_col = selector % self.hiddenSize
crossover_loc = "second_weights"
crossover_index = 2
# print("Crossing second weights", selector, selector_row, selector_col)
else:
# child["second_weights"] = parent1["second_weights"].copy()
selector = selector - (self.hiddenSize * self.outSize)
selector_row = selector
selector_col = 0
crossover_loc = "second_thresholds"
crossover_index = 3
# print("Crossing second thresholds", selector, selector_row, selector_col)
if intest:
print("Crossover location:", crossover_loc)
children[0][crossover_loc] = self.crossOver(parent1[crossover_loc], parent2[crossover_loc], selector_row, selector_col)
children[1][crossover_loc] = self.crossOver(parent2[crossover_loc], parent1[crossover_loc], selector_row, selector_col)
## then merge the remaining matrices into the new chromozone
if crossover_index < 1:
children[0]['first_thresholds'] = copy.deepcopy(parent2['first_thresholds'])
children[1]['first_thresholds'] = copy.deepcopy(parent1['first_thresholds'])
if crossover_index < 2:
children[0]['second_weights'] = copy.deepcopy(parent2['second_weights'])
children[1]['second_weights'] = copy.deepcopy(parent1['second_weights'])
if crossover_index < 3:
children[0]['second_thresholds'] = copy.deepcopy(parent2['second_thresholds'])
children[1]['second_thresholds'] = copy.deepcopy(parent1['second_thresholds'])
return children
def crossOver(self, parent_one, parent_two, row, col, intest=False):
"""crossover determines which chromozones progress to the next population"""
rows = parent_one.getHeight()
cols = parent_one.getWidth()
cross = row*rows + col
m = Matrix.Matrix().setWidth(cols).setHeight(rows).init()
for i in range(rows):
for j in range(cols):
if (i*rows+j) < cross:
m.setElem(i, j, parent_one.getElem(i, j))
else:
m.setElem(i, j, parent_two.getElem(i, j))
return m
def mutatePopulation(self, survivors, mutationRate, crossOver):
'''Mutates a population of chromozones.
There are number of approaches to achieving this. This approach uses a primary
random number in the range (0..3) to select which of the 4 matrices in the ANN
to mutate. Once it has picked which one, it uses a second random number in the
range (0..number_of_elements) to choose which value to change.
This is simplistic, in comparison with another algorithm, which biases the
probability by the size of the matrix. This approach causes mutations on the
smaller matrices much more frequently.'''
newPopulation = []
mutations = [0 for i in range(4)]
for matrices in survivors:
# matrices = self.population[s]
if random.random() < mutationRate:
matrixToMutate = int(random.random() * 4)
print("Mutating", str(matrixToMutate))
# selector refers to the first weights matrix ([ INPUT x HIDDEN ])
if matrixToMutate == 0:
selector_col = int(random.random() * self.inSize)
selector_row = int(random.random() * self.hiddenSize)
matrices["first_weights"].setElem(selector_row, selector_col, random.random())
mutations[0] += 1
# selector refers to the first thresholds matrix
elif matrixToMutate == 1:
selector = int(random.random() * self.hiddenSize)
currVal = matrices["first_thresholds"].getElem(selector, 0)
currVal += (0.1 - random.random()*0.2)
if currVal > 2:
currVal = 2
elif currVal < 0:
currVal = 0
matrices["first_thresholds"].setElem(selector, 0, currVal)
mutations[1] += 1
# print("Mutation in first threshold", selector)
# selector refers to the second weights matrix ([ HIDDEN x OUTPUT ])
elif matrixToMutate == 2:
selector_col = int(random.random() * self.hiddenSize)
selector_row = int(random.random() * self.outSize)
matrices["second_weights"].setElem(selector_row, selector_col, random.random())
mutations[2] += 1
# print("Mutation in second matrix", selector, selector_row, selector_col)
# selector refers to the second thresholds
elif matrixToMutate == 3:
selector = int(random.random() * self.outSize)
currVal = matrices["second_thresholds"].getElem(selector, 0)
currVal += (0.1 - random.random()*0.2)
if currVal > 2:
currVal = 2
elif currVal < 0:
currVal = 0
matrices["second_thresholds"].setElem(selector, 0, currVal)
mutations[3] += 1
#print("Mutation in second threshold")
else:
print("Unknown index for the matrix to mutate:", matrixToMutate)
newPopulation.append(matrices)
self.population = newPopulation
return mutations
def mutatePopulationSingle(self, survivors, mutationRate, crossOver):
newPopulation = []
mutations = [0 for i in range(4)]
for matrices in survivors:
# matrices = self.population[s]
if random.random() < mutationRate:
selector = int(random.random() * self.coeff_size)
# selector refers to the first weights matrix
if selector < (self.inSize * self.hiddenSize):
selector_row = int(selector/self.inSize)
selector_col = selector % self.inSize
try:
matrices["first_weights"].setElem(selector_row, selector_col, random.random())
except:
print("Selector: ", selector)
print("Width: ", matrices["first_weights"].getWidth(), self.inSize, len(matrices["first_weights"].getData()[0]))
print("Height: ", matrices["first_weights"].getHeight(), self.hiddenSize, len(matrices["first_weights"].getData()))
print("Problems with setElem...")
print(selector_row, selector_col)
sys.exit()
# print("Mutation in first matrix", selector, selector_row, selector_col)
mutations[0] += 1
else:
selector = selector - (self.inSize * self.hiddenSize)
# selector refers to the first thresholds matrix
if selector < self.hiddenSize:
currVal = matrices["first_thresholds"].getElem(selector, 0)
currVal += (0.1 - random.random()*0.2)
if currVal > 2:
currVal = 2
elif currVal < 0:
currVal = 0
matrices["first_thresholds"].setElem(selector, 0, currVal)
mutations[1] += 1
# print("Mutation in first threshold", selector)
pass
else:
selector = selector - (self.hiddenSize)
# selector refers to the second weights matrix
if selector < (self.hiddenSize * self.outSize):
selector_row = int(selector/self.outSize)
selector_col = selector % self.outSize
matrices["second_weights"].setElem(selector_row, selector_col, random.random())
mutations[2] += 1
# print("Mutation in second matrix", selector, selector_row, selector_col)
# selector refers to the second thresholds
else:
selector = selector - (self.hiddenSize * self.outSize)
currVal = matrices["second_thresholds"].getElem(selector, 0)
currVal += (0.1 - random.random()*0.2)
if currVal > 2:
currVal = 2
elif currVal < 0:
currVal = 0
matrices["second_thresholds"].setElem(selector, 0, currVal)
mutations[3] += 1
#print("Mutation in second threshold")
newPopulation.append(matrices)
self.population = newPopulation
return mutations
def roulette(self, scores, intest=False):
nextGen = []
selector = int(random.random() * self.size)
selectors = []
beta = 0.0
mw = max(scores)
roul = roulette(scores)
for n in range(self.size):
pair = []
## no breeding, just fittest lives
if intest:
pair = [0, 1]
print("Testing crossover,", pair)
## breed the pair
nextGen.extend(self.mateParents(pair[0], pair[1], intest))
# purely for testing
if True:
for i in range(2):
pair.append(roul.getNext())
## breed the pair
nextGen.extend(self.mateParents(pair[0], pair[1]))
# just random roulette wheel picker and append to next gen..
else:
nextGen.append(copy.deepcopy(self.population[roul.getNext()]))
# print("Selected: ", selector)
selectors.append(selector)
# if we've grown the next generation to the maxpop size, then finish
if len(nextGen) >= self.size:
break
# print(selectors)
return nextGen
# return self.population
def validate(self):
"Verifies that the sizes of the internal ANN matrices match those of the settings"
valid = True
for p in self.population:
valid = valid and (p["first_weights"].getWidth() == self.inSize)
valid = valid and (p["first_weights"].getHeight() == self.hiddenSize)
valid = valid and (p["first_thresholds"].getWidth() == 1)
valid = valid and (p["first_thresholds"].getHeight() == self.hiddenSize)
return valid
class roulette:
def __init__(self, counts):
self.data = counts
self.mw = sum(self.data)
self.size = len(self.data)
self.beta = 0
self.selector = 0
def getNext(self):
self.beta += (random.random() * self.mw * 2.0)
while (self.beta > self.data[self.selector]):
self.beta -= self.data[self.selector]
self.selector = (self.selector + 1) % self.size
return self.selector
|
"Good to hear from you, Veronique, and to know how much Marion's life and writings touched your life. She made all the difference. I am sad for your loss as well, and join you in gratitude for her deep soul teachings and…"
"Sad to hear that Marion Woodman has passed away. What a great teacher she was. I read all of her books in the past two years - and I am grateful to her every day for what I could learn from her. She has changed my life for the better. Thank you…"
I'm a Jungian analyst, dance/movement psychotherapist and somatics practitioner, author, and teacher of Authentic Movement (embodied active imagination), and Marion Woodman's BodySoul Rhythms approach.
Hi Tina. Happy new year, and welcome to the online depth community! Robin Gates mentioned she had invited you, so perhaps you found the Alliance through her. I think once you have a chance to explore the site a bit, you'll find a dynamic group of likeminded people who are all fascinated by the field of depth psychology. While there are quite a number of Jungian analysts here, it's a very interdisicplinary community. Many of our members come from many walks of life--artists, writers, doctors, healing professionals, counselors, students, business people, etc--so there is much knowledge and experience here to be shared.
On that note, there are also many opportunities to engage as you begin browsing the vast content and interacting with others--all depending on what level of time investment you choose to make based on your current situation. You may want to consider joining the new online book club if you haven’t already—it’s just begun (with a new author each month!)—and you can participate at whatever level you wish.
You may just find a few Pacifica students among the members of the Alliance. Glad you found us and decided to join. I've spoken to several of the somatic students and they are enjoying the classes and esp. the experiential events. Somatics has been a focus within conversations in the various blogs, forums, and groups but not as a separate focus. I hope your presence with us might help draw the focus more sharply. I look forward to your posts. |
#!/usr/bin/env python
from telegram import TelegramObject
class User(TelegramObject):
def __init__(self,
id,
first_name,
last_name=None,
username=None):
self.id = id
self.first_name = first_name
self.last_name = last_name
self.username = username
@property
def name(self):
if self.username:
return '@%s' % self.username
if self.last_name:
return '%s %s' % (self.first_name, self.last_name)
return self.first_name
@staticmethod
def de_json(data):
return User(id=data.get('id', None),
first_name=data.get('first_name', None),
last_name=data.get('last_name', None),
username=data.get('username', None))
def to_dict(self):
data = {'id': self.id,
'first_name': self.first_name}
if self.last_name:
data['last_name'] = self.last_name
if self.username:
data['username'] = self.username
return data
|
The rear-loading C1100TC HD container is manufactured from injection-moulded, self-coloured high-density polyethylene, stabilised against the combined effects of UV rays and water. It is produced using environmentally-friendly, recyclable materials and there are no heavy metals in the pigments used for manufacture.
The C1100TC HD can be adapted to the different types of selective collection.
Pedal-operated lid available on request.
Integrated into the body for handling the container.
Safety locking system for the lid to prevent full closure unless manually activated to avoid accidents.
Accessory to improve product visibility.
Has a series of built-in handles on the body and lid for handling.
CONTENUR products can be supplied on order in a wide variety of colours. The high-density mass-coloured polyethylene injection system is stabilised against the combined action of water and Ultra Violet rays. The high-density polyethylene enables the product to easily withstand the elements and makes it extremely stable when faced with temperature changes. Recyclable materials that do not have a negative environmental impact are used, as well as pigments that do not contain heavy metals, which means that the colours remain inalterable.
The new Data Protection Regulation requires us to change our Cookies Policy. |
from polyphony import testbench
'''
+--------------------------------------------------------------------------+
| CHStone : A suite of Benchmark Programs for C-based High-Level Synthesis |
| ======================================================================== |
| |
| * Collected and Modified : Y. Hara, H. Tomiyama, S. Honda, |
| H. Takada and K. Ishii |
| Nagoya University, Japan |
| |
| * Remarks : |
| 1. This source code is reformatted to follow CHStone's style. |
| 2. Test vectors are added for CHStone. |
| 3. If "main_result" is 0 at the end of the program, the program is |
| successfully executed. |
| 4. Follow the copyright of each benchmark program. |
+--------------------------------------------------------------------------+
/*
* IDCT transformation of Chen algorithm
*
* @(#) $Id: chenidct.c,v 1.2 2003/07/18 10:19:21 honda Exp $
*/
/*************************************************************
Copyright (C) 1990, 1991, 1993 Andy C. Hung, all rights reserved.
PUBLIC DOMAIN LICENSE: Stanford University Portable Video Research
Group. If you use this software, you agree to the following: This
program package is purely experimental, and is licensed "as is".
Permission is granted to use, modify, and distribute this program
without charge for any purpose, provided this license/ disclaimer
notice appears in the copies. No warranty or maintenance is given,
either expressed or implied. In no event shall the author(s) be
liable to you or a third party for any special, incidental,
consequential, or other damages, arising out of the use or inability
to use the program for any purpose (or the loss of data), even if we
have been advised of such possibilities. Any public reference or
advertisement of this source code should refer to it as the Portable
Video Research Group (PVRG) code, and not by any author(s) (or
Stanford University) name.
*************************************************************/
/*
************************************************************
chendct.c
A simple DCT algorithm that seems to have fairly nice arithmetic
properties.
W. H. Chen, C. H. Smith and S. C. Fralick "A fast computational
algorithm for the discrete cosine transform," IEEE Trans. Commun.,
vol. COM-25, pp. 1004-1009, Sept 1977.
************************************************************
'''
# Cos constants
c1d4 = 362
c1d8 = 473
c3d8 = 196
c1d16 = 502
c3d16 = 426
c5d16 = 284
c7d16 = 100
'''
/*
*
* ChenIDCT() implements the Chen inverse dct. Note that there are two
* input vectors that represent x=input, and y=output, and must be
* defined (and storage allocated) before this routine is called.
*/
'''
def ChenIDct(x:list, y:list):
def LS(r,s):
return r << s
def RS(r,s):
return r >> s # Caution with rounding...
def MSCALE(expr):
return RS(expr, 9)
# Loop over columns
for i in range(8):
b0 = LS(x[i + 0], 2)
a0 = LS(x[i + 8], 2)
b2 = LS(x[i + 16], 2)
a1 = LS(x[i + 24], 2)
b1 = LS(x[i + 32], 2)
a2 = LS(x[i + 40], 2)
b3 = LS(x[i + 48], 2)
a3 = LS(x[i + 56], 2)
# Split into even mode b0 = x0 b1 = x4 b2 = x2 b3 = x6.
# And the odd terms a0 = x1 a1 = x3 a2 = x5 a3 = x7.
c0 = MSCALE((c7d16 * a0) - (c1d16 * a3))
c1 = MSCALE((c3d16 * a2) - (c5d16 * a1))
c2 = MSCALE((c3d16 * a1) + (c5d16 * a2))
c3 = MSCALE((c1d16 * a0) + (c7d16 * a3))
# First Butterfly on even terms.
a0 = MSCALE(c1d4 * (b0 + b1))
a1 = MSCALE(c1d4 * (b0 - b1))
a2 = MSCALE((c3d8 * b2) - (c1d8 * b3))
a3 = MSCALE((c1d8 * b2) + (c3d8 * b3))
b0 = a0 + a3
b1 = a1 + a2
b2 = a1 - a2
b3 = a0 - a3
# Second Butterfly
a0 = c0 + c1
a1 = c0 - c1
a2 = c3 - c2
a3 = c3 + c2
c0 = a0
c1 = MSCALE(c1d4 * (a2 - a1))
c2 = MSCALE(c1d4 * (a2 + a1))
c3 = a3
y[i + 0] = b0 + c3
y[i + 8] = b1 + c2
y[i + 16] = b2 + c1
y[i + 24] = b3 + c0
y[i + 32] = b3 - c0
y[i + 40] = b2 - c1
y[i + 48] = b1 - c2
y[i + 56] = b0 - c3
# Loop over rows
for i in range(8):
idx = LS(i, 3)
b0 = y[idx+0]
a0 = y[idx+1]
b2 = y[idx+2]
a1 = y[idx+3]
b1 = y[idx+4]
a2 = y[idx+5]
b3 = y[idx+6]
a3 = y[idx+7]
# Split into even mode b0 = x0 b1 = x4 b2 = x2 b3 = x6.
# And the odd terms a0 = x1 a1 = x3 a2 = x5 a3 = x7.
c0 = MSCALE((c7d16 * a0) - (c1d16 * a3))
c1 = MSCALE((c3d16 * a2) - (c5d16 * a1))
c2 = MSCALE((c3d16 * a1) + (c5d16 * a2))
c3 = MSCALE((c1d16 * a0) + (c7d16 * a3))
# First Butterfly on even terms.
a0 = MSCALE(c1d4 * (b0 + b1))
a1 = MSCALE(c1d4 * (b0 - b1))
a2 = MSCALE((c3d8 * b2) - (c1d8 * b3))
a3 = MSCALE((c1d8 * b2) + (c3d8 * b3))
# Calculate last set of b's
b0 = a0 + a3
b1 = a1 + a2
b2 = a1 - a2
b3 = a0 - a3
# Second Butterfly
a0 = c0 + c1
a1 = c0 - c1
a2 = c3 - c2
a3 = c3 + c2
c0 = a0
c1 = MSCALE(c1d4 * (a2 - a1))
c2 = MSCALE(c1d4 * (a2 + a1))
c3 = a3
idx = LS(i, 3)
y[idx+0] = b0 + c3
y[idx+1] = b1 + c2
y[idx+2] = b2 + c1
y[idx+3] = b3 + c0
y[idx+4] = b3 - c0
y[idx+5] = b2 - c1
y[idx+6] = b1 - c2
y[idx+7] = b0 - c3
# Retrieve correct accuracy. We have additional factor
# of 16 that must be removed.
for i in range(64):
v = y[i]
if v < 0:
y[i] = (v - 8)>>4
else:
y[i] = (v + 8)>>4
return 0
@testbench
def test():
ins = [
154, 192, 254, 239, 180, 128, 123, 110,
123, 180, 198, 180, 154, 136, 105, 136,
123, 136, 154, 136, 136, 123, 110, 123,
123, 154, 154, 180, 167, 136, 149, 123,
123, 154, 180, 180, 166, 154, 136, 123,
123, 154, 154, 166, 149, 180, 136, 136,
123, 136, 123, 123, 136, 198, 180, 154,
136, 110, 123, 123, 136, 154, 166, 136
]
outs = [None] * 64
expected = [
1077, -250, 114, -109, 76, -27, 56, 12,
-232, 156, -106, -16, -13, -9, -25, 8,
236, -74, 62, -20, 5, -4, 31, 6,
16, 48, -68, -18, -18, -7, 1, -16,
163, -30, -7, -25, 16, 23, -9, 22,
29, -9, -4, -4, -4, 13, -13, -8,
81, -2, -12, -10, 12, 15, 5, 11,
37, 3, -4, -7, -6, 6, 7, 18
]
ChenIDct(ins, outs)
for i in range(64):
print(outs[i])
assert outs[i] == expected[i]
test()
|
flashers do nothing. Flashers have a clicking sound. This happened last year also, jumped car and would run but once I turn off everything dead again. The auto store says battery is fully charged. Any help out there?
Just because you charge a battery doesn't mean it is capable of holding it. You need to have the battery tested and likely replaced.
If the car requires a jump to start, then you still have a battery problem with the battery itself or the cable connections. |
# Copyright (c) 2014, Matt Layman
from datetime import datetime
import io
import json
import os
import time
from werkzeug.contrib.atom import AtomFeed
from werkzeug.contrib.atom import FeedEntry
from handroll import logger
from handroll.composers import Composer
from handroll.exceptions import AbortError
from handroll.i18n import _
class AtomComposer(Composer):
"""Compose an Atom feed from an Atom metadata file (``.atom``).
The ``AtomComposer`` parses the metadata specified in the source file and
produces an XML Atom feed. ``AtomComposer`` uses parameters that are needed
by Werkzeug's ``AtomFeed`` API. Refer to the `Werkzeug documentation
<http://werkzeug.pocoo.org/docs/contrib/atom/>`_ for all the available
options.
The dates in the feed should be in `RfC 3339
<http://www.ietf.org/rfc/rfc3339.txt>`_ format (e.g.,
``2014-06-13T11:39:30``).
Here is a sample feed:
.. literalinclude:: ../sample/atom_sample.atom
"""
def compose(self, catalog, source_file, out_dir):
root, ext = os.path.splitext(os.path.basename(source_file))
filename = root + '.xml'
output_file = os.path.join(out_dir, filename)
if self._needs_update(source_file, output_file):
logger.info(_('Generating Atom XML for {source_file} ...').format(
source_file=source_file))
feed = self._parse_feed(source_file)
with open(output_file, 'wb') as out:
out.write(feed.to_string().encode('utf-8'))
out.write(b'<!-- handrolled for excellence -->\n')
else:
logger.debug(_('Skipping {filename} ... It is up to date.').format(
filename=filename))
def _needs_update(self, source_file, out_file):
"""Check if the output file needs to be updated by looking at the
modified times of the source file and output file."""
if os.path.exists(out_file):
return os.path.getmtime(source_file) > os.path.getmtime(out_file)
else:
# The file doesn't exist so it definitely needs to be "updated."
return True
def _parse_feed(self, source_file):
try:
with io.open(source_file, 'r', encoding='utf-8') as f:
metadata = json.loads(f.read())
if metadata.get('entries') is None:
raise ValueError(_('Missing entries list.'))
entries = metadata['entries']
# AtomFeed expects FeedEntry objects for the entries keyword so
# remove it from the metadata and add it after the feed is built.
del metadata['entries']
feed = AtomFeed(**metadata)
[feed.add(self._make_entry(entry)) for entry in entries]
except ValueError as error:
raise AbortError(_('Invalid feed {source_file}: {error}').format(
source_file=source_file, error=str(error)))
return feed
def _make_entry(self, data):
# Convert dates into datetime instances.
if 'updated' in data:
data['updated'] = self._convert_date(data['updated'])
if 'published' in data:
data['published'] = self._convert_date(data['published'])
return FeedEntry(**data)
def _convert_date(self, date):
"""Convert a date string into a datetime instance. Assumes date string
is RfC 3389 format."""
time_s = time.strptime(date, '%Y-%m-%dT%H:%M:%S')
return datetime.fromtimestamp(time.mktime(time_s))
|
RedDoorz @ Kubu Anyar 3 - Kuta, Bali, Indonesia - Great discounted rates!
6.6 out of 10 based on 2 verified user reviews via Agoda.
RedDoorz @ Kubu Anyar 3 is conveniently located in the popular Kuta area. The hotel offers a wide range of amenities and perks to ensure you have a great time. Service-minded staff will welcome and guide you at the RedDoorz @ Kubu Anyar 3. Television LCD plasma screen, internet access ?ÛÒ wireless (complimentary), non smoking rooms, air conditioning, wake-up service can be found in selected guestrooms. Entertain the hotel's recreational facilities, including outdoor pool, garden. RedDoorz @ Kubu Anyar 3 combines warm hospitality with a lovely ambiance to make your stay in Bali unforgettable.
RedDoorz @ Kubu Anyar 3 - Bali classifies itself as a 3 star hotel. |
# Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E0611,R0201,E1101
from unittest import TestCase
from nose.tools import assert_equal, raises, assert_true
from wlauto.core.extension import Extension, Parameter, Param, ExtensionMeta, Module
from wlauto.utils.types import list_of_ints
from wlauto.exceptions import ConfigError
class MyMeta(ExtensionMeta):
virtual_methods = ['validate', 'virtual1', 'virtual2']
class MyBaseExtension(Extension):
__metaclass__ = MyMeta
name = 'base'
parameters = [
Parameter('base'),
]
def __init__(self, **kwargs):
super(MyBaseExtension, self).__init__(**kwargs)
self.v1 = 0
self.v2 = 0
self.v3 = ''
def virtual1(self):
self.v1 += 1
self.v3 = 'base'
def virtual2(self):
self.v2 += 1
class MyAcidExtension(MyBaseExtension):
name = 'acid'
parameters = [
Parameter('hydrochloric', kind=list_of_ints, default=[1, 2]),
'citric',
('carbonic', int),
]
def __init__(self, **kwargs):
super(MyAcidExtension, self).__init__(**kwargs)
self.vv1 = 0
self.vv2 = 0
def virtual1(self):
self.vv1 += 1
self.v3 = 'acid'
def virtual2(self):
self.vv2 += 1
class MyOtherExtension(MyBaseExtension):
name = 'other'
parameters = [
Param('mandatory', mandatory=True),
Param('optional', allowed_values=['test', 'check']),
]
class MyOtherOtherExtension(MyOtherExtension):
name = 'otherother'
parameters = [
Param('mandatory', override=True),
]
class MyOverridingExtension(MyAcidExtension):
name = 'overriding'
parameters = [
Parameter('hydrochloric', override=True, default=[3, 4]),
]
class MyThirdTeerExtension(MyOverridingExtension):
name = 'thirdteer'
class MultiValueParamExt(Extension):
name = 'multivalue'
parameters = [
Parameter('test', kind=list_of_ints, allowed_values=[42, 7, 73]),
]
class MyCoolModule(Module):
name = 'cool_module'
capabilities = ['fizzle']
def initialize(self):
self.fizzle_factor = 0 # pylint: disable=attribute-defined-outside-init
def fizzle(self):
self.fizzle_factor += 1
class MyEvenCoolerModule(Module):
name = 'even_cooler_module'
capabilities = ['fizzle']
def fizzle(self):
self.owner.self_fizzle_factor += 2
class MyModularExtension(Extension):
name = 'modular'
parameters = [
Parameter('modules', override=True, default=['cool_module']),
]
class MyOtherModularExtension(Extension):
name = 'other_modular'
parameters = [
Parameter('modules', override=True, default=[
'cool_module',
'even_cooler_module',
]),
]
def __init__(self, **kwargs):
super(MyOtherModularExtension, self).__init__(**kwargs)
self.self_fizzle_factor = 0
class FakeLoader(object):
modules = [
MyCoolModule,
MyEvenCoolerModule,
]
def get_module(self, name, owner, **kwargs): # pylint: disable=unused-argument
for module in self.modules:
if module.name == name:
return _instantiate(module, owner)
class ExtensionMetaTest(TestCase):
def test_propagation(self):
acid_params = [p.name for p in MyAcidExtension.parameters]
assert_equal(acid_params, ['modules', 'base', 'hydrochloric', 'citric', 'carbonic'])
@raises(ValueError)
def test_duplicate_param_spec(self):
class BadExtension(MyBaseExtension): # pylint: disable=W0612
parameters = [
Parameter('base'),
]
def test_param_override(self):
class OverridingExtension(MyBaseExtension): # pylint: disable=W0612
parameters = [
Parameter('base', override=True, default='cheese'),
]
assert_equal(OverridingExtension.parameters['base'].default, 'cheese')
@raises(ValueError)
def test_invalid_param_spec(self):
class BadExtension(MyBaseExtension): # pylint: disable=W0612
parameters = [
7,
]
def test_virtual_methods(self):
acid = _instantiate(MyAcidExtension)
acid.virtual1()
assert_equal(acid.v1, 1)
assert_equal(acid.vv1, 1)
assert_equal(acid.v2, 0)
assert_equal(acid.vv2, 0)
assert_equal(acid.v3, 'acid')
acid.virtual2()
acid.virtual2()
assert_equal(acid.v1, 1)
assert_equal(acid.vv1, 1)
assert_equal(acid.v2, 2)
assert_equal(acid.vv2, 2)
class ParametersTest(TestCase):
def test_setting(self):
myext = _instantiate(MyAcidExtension, hydrochloric=[5, 6], citric=5, carbonic=42)
assert_equal(myext.hydrochloric, [5, 6])
assert_equal(myext.citric, '5')
assert_equal(myext.carbonic, 42)
def test_validation_ok(self):
myext = _instantiate(MyOtherExtension, mandatory='check', optional='check')
myext.validate()
def test_default_override(self):
myext = _instantiate(MyOverridingExtension)
assert_equal(myext.hydrochloric, [3, 4])
myotherext = _instantiate(MyThirdTeerExtension)
assert_equal(myotherext.hydrochloric, [3, 4])
def test_multivalue_param(self):
myext = _instantiate(MultiValueParamExt, test=[7, 42])
myext.validate()
assert_equal(myext.test, [7, 42])
@raises(ConfigError)
def test_bad_multivalue_param(self):
myext = _instantiate(MultiValueParamExt, test=[5])
myext.validate()
@raises(ConfigError)
def test_validation_no_mandatory(self):
myext = _instantiate(MyOtherExtension, optional='check')
myext.validate()
@raises(ConfigError)
def test_validation_no_mandatory_in_derived(self):
_instantiate(MyOtherOtherExtension)
@raises(ConfigError)
def test_validation_bad_value(self):
myext = _instantiate(MyOtherExtension, mandatory=1, optional='invalid')
myext.validate()
class ModuleTest(TestCase):
def test_fizzle(self):
myext = _instantiate(MyModularExtension)
myext.load_modules(FakeLoader())
assert_true(myext.can('fizzle'))
myext.fizzle()
assert_equal(myext.fizzle_factor, 1)
def test_self_fizzle(self):
myext = _instantiate(MyOtherModularExtension)
myext.load_modules(FakeLoader())
myext.fizzle()
assert_equal(myext.self_fizzle_factor, 2)
def _instantiate(cls, *args, **kwargs):
# Needed to get around Extension's __init__ checks
return cls(*args, **kwargs)
|
“Fallon’s resignation” — Of course.
“Vice President Cheney’s peace trip” — Whether for war or peace, all travel by Cheney signals war to some folks! Also, peace trips by high US government officials are like trolleys, as they come along frequently. Not exactly a hot indicator of war.
“Israeli airstrike on Syria” — Five month old news, so not exactly a Defcon 1 alert.
“Warships off Lebanon” — US fleet movements are another standard element of the “about to bomb Iran” urban legends. US ships leave the Middle East (which means noting); US ships arrive … which means war!
“Israeli comments: Israeli President Shimon Peres said earlier this month that Israel will not consider unilateral action to stop Iran from getting a nuclear bomb. In the past, though, Israeli officials have quite consistently said they were prepared to act alone.” — Israel saying they will bomb Iran is evidence; so are statements that they will not bomb.
“Israel’s war with Hezbollah: While this seems a bit old, Israel’s July 2006 war in Lebanon…” — Almost two years ago, and so of little relevance. Yes, this is odd to include in this list.
Other posts in this series about the Internet: does it make us smarter or dumber?
Previous Post Will we bomb Iran, now that Admiral Fallon is gone?
Next Post What will America look like after this recession?
“Warships off Lebanon” – yes because if I wanted to attack Iran, I couldn’t think of a better place to put my warships than someplace with Iran’s sole ally between them and their target.
It seems that the logical strategy would be to ‘contain’ Iran while working toward aiding and/or influencing some form of internal change. I may give too much credit, but I suspect that we’re doing an adequate job in that regard.
The rest is just balance-of-power word war with a faint redolence of brinksmanship. I’d wager that every time the words ‘invade’, ‘attack’ and ‘Iran’ show up close together, people over there get pretty nervous.
I think you are right. But the story is interesting in another way: The US government today reminds me today of the old Soviet regime and even the most subtle changes in rhetoric or a sudden change in the leadership give rise to speculation. A bit sad for American democracy I think.
Fabius Maximus replies: Interesting observation! Some of this might result from size, as the government is so large that we must guess at what goes on inside the beast (as it grows, volume increases faster than surface area — so it grows more “opaque.”) Some of this perhaps results from decay, as the government increasingly responds largely to internal factors. This is the “court politics” model of Versailles-on-the -Potomac, where policy decisions are made on the basis of competing factions (both individuals and departments) — not on their effect on the outside world.
That would actually be there to deal with Hezbullah, Iran’s proxy there. The line they keep giving about Iran having some sort of secret program is another signal of intentions against Iran.
I am not ‘pushing’ the ‘Iran about to be attacked scenario’ but, if the Israeli air strike on Syria were to test the standard Russian built defenses (as I assume these are) then a few months to figure the best way to circumvent them is not out of the question. It would be a prudent action if a strike were contemplated.
Fabius Maximus: Unfortunately this logic works just as well the other way. If we thought the Russian-build air defenses had hopes, Israel’s strike this blown our operational advantage. Iran has had months to upgrade and otherwise adapt to the lessons provided by Israel. That suggests that the strike was not a prelude to hitting Iran (by either Israel or the US), or if so (as you suggest) the strike should have followed more closed — before Iran too could adjust. |
from django.test import TestCase
from django.core.urlresolvers import reverse, resolve
from ..views import async
class SearchUrlTest(TestCase):
def test_search(self):
actual = reverse('async:search:search',
kwargs={
'organization': 'habitat',
'project': '123',
})
expected = '/async/organizations/habitat/projects/123/search/'
assert actual == expected
resolved = resolve(
'/async/organizations/habitat/projects/123/search/')
assert resolved.func.__name__ == async.Search.__name__
assert resolved.kwargs['organization'] == 'habitat'
assert resolved.kwargs['project'] == '123'
# def test_search_export(self):
# actual = reverse('async:search:export',
# kwargs={
# 'organization': 'habitat',
# 'project': '123',
# })
# expected = '/async/organizations/habitat/projects/123/search/export/'
# assert actual == expected
# resolved = resolve(
# '/async/organizations/habitat/projects/123/search/export/')
# assert resolved.func.__name__ == async.SearchExport.__name__
# assert resolved.kwargs['organization'] == 'habitat'
# assert resolved.kwargs['project'] == '123'
|
We've seen a lot of the best scopes for the money for a lot of different types of guns.
What we would like to do now is reveal to you best scopes for the money overall.
Without taking the caliber into consideration, or the use, we are going to look at the highest quality scopes for the best value possible!
Let's get started! Once we reveal the top 10 best scopes for the money, I'll review each one as well!
These are our top rifle scopes for the money! Most are tubular style scopes, and a couple are red dot scopes. Regardless of the type, we made sure there was value in all of them for price.
What does 'Best Scope For The Money' Even Mean?
With so many different scopes out there, some of you may think there are other scopes that should be on this list.
We didn't tread lightly here when narrowing these down. We followed some criteria when determining which scopes should make the top 10. The biggest criteria of all; making sure that there is value combined with high quality.
For most, it simply means finding the best deal. But finding the best deal is all relative, right?
For example, a FLIR Systems Thermal Night Vision Scope could climb close to $6,000! But what if you could buy that same scope for 50% off at $3,000? Now, technically, this FLIR is a great scope for the money, because usually it's $3,000 more!
But I don't think that's what most people mean. Most people, when they are trying to find the best scope for the money, are looking for the lower priced scopes that still have some high quality, right?
This isn't the same as finding a budget scope, though. Cheap scopes are just that, cheap. Budget / cheap scopes are considered to be in the $20 - $60 range. Anything in this range is a risky.
The materials are very low grade and the optic likely won't hold zero long term. You'll have to keep repurchasing a scope every few years and hope it never breaks on you while you are in the middle of using it!
Now that you've seen briefly how we determined value, let's dig deeper into each rifle scope.
Nikon dominated the this list, with 4 Nikon scopes in the top 5 and there's a big reason why. They make high quality scopes and they keep the prices low.
Even if you jump up into the $200-400 range, you are looking at a high quality scope that is compared to other similar scopes that are $500 and above!
With that being said, why is the Nikon P-223 3-9x40 the best scope for the money in our eyes?
Not only is it built by Nikon, who provides an outstanding warranty on all of their products, but it's the price that got me. At just under $200, it'll feel like you are getting a scope for at least double the price!
The Nikon P-223 is designed specifically for .223/5.56mm rifles, which is what most Ar-15's are chambered for.
With it being specifically designed for an AR-15, you know that it will be able to handle a high a mount of recoil is short amounts of time.
Nikon made the P-223 for hunters and shooters who like being able to get first round hits and longer distances.
When zeroed at 200 yards, the BDC 600 reticle has additional aiming point at 50 and 100 yard increments up to 600 yards.
The P-223 also comes with target turrets which offers 1/4 inch click adjustments at 100 yards. Both the windage and elevation turrets can be pulled out to reset the turret.
The quality of the scope is strong and durable and is waterproof and fogproof with a nitrogen fill and o ring seal.
The only negative thing I found was that the reticle was slightly blurrier around 100 and above yards. And it could’ve been the scope I was using. Other than that, great scope for the money.
The P-223 offers a lot of technology and engineering at a very competitive price. The glass is clear and crisp and you’ll have a hard time finding an equal for your AR for less money.
Reticle slightly blurry above 100 yards.
Below are the rest of the best scopes for the money that you can find. You can find actual user reviews at sites like Amazon for each given scope. Check them all out, you won't be disappointed!
Overall, this list is a big list and some of the best scopes for the money you can find.
Remember, we aren't looking for the cheapest scopes here, we are looking for scopes that have value. Finding the best scopes for the money doesn't have to be a difficult process as long as you make sure that you are maintaining high quality without breaking the bank.
In the end, any of these rifle scopes are great choices for the money, and you can't go wrong getting anything from Nikon. |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Create a Mosaic Dataset in the workspace and add all
# *.img rasters from the input directory
#
# python dems2mosaic.py "C:\workspace\dems" "C:\workspace" "26915.prj"
import arcpy,sys,os
from config import *
from distutils.spawn import *
from arcpy import *
################ Usage check and argument assigning
if len(sys.argv) != 4:
print "Usage: build_mosaic.py <input directory> <workspace/output dir> <.prj file>"
print "The input directory should have the img rasters in it"
print "Contents in the output directory will be overwritten"
exit(-1)
else:
inpath = config.get('paths','dem_output_dir')
workspacedir = config.get('arcgis','workspace')
prjfile = config.get('projection','prj_file')
arcpy.env.workspace = workspacedir
gdbname = config.get('arcgis','dem_mosaic_name')
# Create a File GeoDatabase to house the Mosaic dataset
arcpy.CreateFileGDB_management(workspacedir, gdbname)
# Create Mosaic Dataset
# http://resources.arcgis.com/en/help/main/10.2/index.html#//00170000008n000000
mdname = "DEM_MOSAIC"
noband = "1"
pixtype = "32_BIT_FLOAT"
pdef = "NONE"
wavelength = ""
arcpy.CreateMosaicDataset_management(gdbname, mdname, prjfile, noband, pixtype, pdef, wavelength)
# Add rasters to Mosaic Dataset
# http://resources.arcgis.com/en/help/main/10.2/index.html#//001700000085000000
mdname = gdbname + "/" + mdname
rastype = "ERDAS IMAGINE" # http://resources.arcgis.com/en/help/main/10.2/index.html#//009t0000000v000000
updatecs = "NO_CELL_SIZES"
updatebnd = "NO_BOUNDARY"
updateovr = "UPDATE_OVERVIEWS"
maxlevel = "0" # pyramid level
maxcs = ""
maxdim = ""
spatialref = ""
inputdatafilter = "*.img"
subfolder = "NO_SUBFOLDERS"
duplicate = "EXCLUDE_DUPLICATES"
buildpy = "NO_PYRAMIDS"
calcstats = "NO_STATISTICS" # CALCULATE_STATISTICS
buildthumb = "NO_THUMBNAILS"
comments = "Add Raster Datasets"
forcesr = "NO_FORCE_SPATIAL_REFERENCE"
arcpy.AddRastersToMosaicDataset_management(
mdname, rastype, inpath, updatecs, updatebnd, updateovr,
maxlevel, maxcs, maxdim, spatialref, inputdatafilter,
subfolder, duplicate, buildpy, calcstats,
buildthumb, comments, forcesr)
|
Cabins to rent Bay of plenty - covering Tauranga, Taupo and Rotorua.
Looking for multi-lingual e-commerce solutions? - Health from Nature a company supplying nature health supplements were looking for a site in English and Dutch with the ablity to sell to English or Dutch speaking customers.
4success have provided a suitable solution to an international client supporting their needs for multi-lingual solution and accpeting payments in a foriegn currency. Visit Health from Nature's site to review their solution.
MetalWorx Mobile an on site welding company approached 4success for assistance in setting a web site to promote their business. We delivered a site with carefully prepared SEO and within a week of launch MetalWorx Mobile already had enquiries which resulted in new business.
We recently upgraded NZ Curling Association and Auckland Curling club's joomla web site ensuring their site was atteh latest version of their stream and provided a custom designed template for their sites.
Hazel-Hewitt and Associates are resource consents and environmental independent consultants.
Lovely health and it's subsidiaries Greenlippedmussel and Jakes best mate - full ecommerce solutions accepting orders and payments for NZ natural Health immune system supplements on line.
Saw the article on TV One 1st Sept and wondering why not to list there? Read this..
We've restyled our client site Terra Nova Consultancy and introduced a Newsletter module to allow Terra Nova to increase their communications profile with their clients.
4success's own site was upgraded using Joomla 1.5 allowing for easy content management and new restyling options.
To help you promote your web site we have added an article focusing on web site promotion. |
# -*- coding: utf-8 -*-
"""Unshortener Documentation
This module unshortens URLs
"""
import re
import http
from urllib.parse import urlparse
from http import client
from isurlshortener.exceptions import PathMissing, UnhandledHTTPStatusCode, LocationHeaderMissing, ProtocolException
class Unshortener(object):
#FIXME: Most servers redirect http to https --> special handling for that?
@staticmethod
def unshorten_url(url: str) -> str:
"""Tries to unshorten an URL by requesting it and checking HTTP status
Args:
url: URL to check. The url MUST contain a protocol (e.g., http://), a domain (e.g., example.net), and a path
(e.g., something/) --> http://example.net/something/
Returns:
Unshortened URL
Raises:
IsUrlShortener.LocationHeaderMissing: Server did not return a Location
IsUrlShortener.UnhandledHTTPStatusCode: Unsupported HTTP status code
"""
url = Unshortener._prepare_url(url)
if url.path is '' or url.path is '/':
raise PathMissing()
server_connection = Unshortener._get_connection(url)
server_connection.request('GET', url.path)
response = server_connection.getresponse()
if response.status in range(300, 309):
return Unshortener._get_location_from_header(response.getheaders())
elif response.status in range(200, 201):
return url.geturl()
else:
raise UnhandledHTTPStatusCode(response.status)
@staticmethod
def _get_location_from_header(headers: list) -> str:
"""Returns the location information from the headers
Args:
headers: Header returned from the server
Returns:
Location information
Raises:
IsUrlShortener.LocationHeaderMissing: Location field missing in the header
"""
for header_field in headers:
if header_field[0].lower() == 'location':
return header_field[1]
raise LocationHeaderMissing
@staticmethod
def _prepare_url(url: str) -> dict:
"""Prepares a given URL strict for the unshortener
Args:
url: URL prepare
Returns:
Dict with the prepared URL information
Raises:
IsUrlShortener.ProtocolException: http/https protocol prefix is missing
"""
if not re.findall('^(http[s]?://)', url):
raise ProtocolException('Invalid protocol or no protocol given')
return urlparse(url)
@staticmethod
def _get_connection(url: dict) -> [http.client.HTTPConnection, http.client.HTTPSConnection]:
"""Prepares a connection to a given server
Args:
url: URL with server information
Returns:
Connection to the server
Raises:
IsUrlShortener.ProtocolException: Protocol not supported
"""
if url.scheme == 'http':
return http.client.HTTPConnection(url.netloc)
elif url.scheme == 'https':
return http.client.HTTPSConnection(url.netloc)
else:
raise ProtocolException('Protocol Exception: "{}"'.format(url.scheme))
|
Coloriage Une Ma Tresse Lit Un Livre Aux L Ves images that posted in this website was uploaded by Minnesotaforequality.com. Coloriage Une Ma Tresse Lit Un Livre Aux L Vesequipped with aHD resolution 660 x 654.You can save Coloriage Une Ma Tresse Lit Un Livre Aux L Ves for free to your devices.
If you want to Save Coloriage Une Ma Tresse Lit Un Livre Aux L Veswith original size you can click theDownload link. |
from math import isinf
class Interval:
def __init__(self, val_from, val_to):
self._val_from = float(val_from)
self._val_to = float(val_to)
def __contains__(self, value):
return bool(self._val_from <= float(value) <= self._val_to)
def __str__(self):
return "{}-{}".format(self._val_from, self._val_to)
def is_open(self):
return bool(isinf(self._val_to))
class Intervals:
def __init__(self, intervals):
self._open_intervals = []
self._closed_intervals = []
for i in intervals:
if i.is_open():
self._open_intervals.append(i)
else:
self._closed_intervals.append(i)
def val_in_open_interval(self, value):
return self.val_in(self._open_intervals, value)
def val_in_closed_interval(self, value):
return self.val_in(self._closed_intervals, value)
def val_in(self, intervals, value):
for interval in intervals:
if value in interval:
return True
return False
|
OBP and Dickinson College partnered to create books that offer enhanced key texts in Latin in Open Access format. Our joint Series appears as both free web resources hosted on the DCC website and as interactive texts released in a variety of formats: free to read, digital and printed.
Including embedded audio files of the original text read aloud, these editions also contain commentary, notes and full vocabulary. Both entertaining and thought-provoking, they are an invaluable aid to students of Latin and general readers alike. |
import bpy
import os
def setSceneOpts():
global channels
global sizex
global sizey
global selected_to_active
# VARIABLES
sizex = bpy.context.scene.bake_pbr_channels.sizex
sizey = bpy.context.scene.bake_pbr_channels.sizey
selected_to_active= bpy.context.scene.bake_pbr_channels.seltoact
channels = {"metallic":["ME","GLOSSY"],
"occlusion":["AO","AO"],
"normal":["NM","NORMAL"],
"emit":["EM","EMIT"],
"roughness":["RO","ROUGHNESS"],
"opacity":["OP","TRANSMISSION"],
"albedo":["AT","DIFFUSE"]}
bpy.context.scene.render.image_settings.file_format = "OPEN_EXR"
bpy.context.scene.render.image_settings.color_mode = "RGBA"
bpy.context.scene.render.image_settings.exr_codec = "ZIP"
bpy.context.scene.render.image_settings.color_depth = "16"
#set bake options
bpy.context.scene.render.bake_type = "TEXTURE"
bpy.context.scene.render.bake.use_pass_direct = 0
bpy.context.scene.render.bake.use_pass_indirect = 0
bpy.context.scene.render.bake.use_pass_color = 1
bpy.context.scene.render.bake.use_selected_to_active = selected_to_active
#__________________________________________________________________________________
def mergeObjects():
global selectedObjects
global object
global selObject
#agrupo los seleccionados y el activo
object = bpy.context.active_object
selectedObjects = bpy.context.selected_objects[:].copy()
selectedObjects.remove(bpy.context.active_object)
# si es selected to active hago un merge de los objetos restantes
if selected_to_active:
bpy.ops.object.select_all(action="DESELECT")
for o in selectedObjects:
o.select = True
bpy.context.scene.objects.active = selectedObjects[0]
bpy.ops.object.convert(target="MESH", keep_original=True)
selObject = bpy.context.active_object
bpy.ops.object.join()
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True, properties=True)
else:
selObject=bpy.context.active_object
#seteo el objeto activo
bpy.context.scene.objects.active = object
#__________________________________________________________________________________
def createTempMats():
global ms
global copyMats
global roughMats
global transMats
global glossyMats
#lista de materiales originales
if not selected_to_active:
ms = [mat.material for mat in object.material_slots]
else:
ms = [mat.material for mat in selObject.material_slots]
#sumo materiales copia y reemplazo slots
for matType in ["_glossyTemp","_copyTemp","_roughnessTemp","_trans"]:
ims = 0
for mat in ms:
mc = mat.copy()
mc.name = mat.name+matType
if not selected_to_active:
object.material_slots[ims].material = mc
else:
selObject.material_slots[ims].material = mc
ims += 1
copyMats = [mat for mat in bpy.data.materials if mat.name.endswith("_copyTemp")]
glossyMats = [mat for mat in bpy.data.materials if mat.name.endswith("_glossyTemp")]
roughMats = [mat for mat in bpy.data.materials if mat.name.endswith("_roughnessTemp")]
transMats = [mat for mat in bpy.data.materials if mat.name.endswith("_trans")]
#__________________________________________________________________________________
# mezcloGlossy
def mixGlossy(material):
mat = material
for node in mat.node_tree.nodes[:]:
if node.type == "BSDF_PRINCIPLED":
nprin = mat.node_tree.nodes.new("ShaderNodeBsdfPrincipled") # nuevo principled
mix = mat.node_tree.nodes.new("ShaderNodeMixShader")
mat.node_tree.links.new(mix.inputs[2],nprin.outputs[0])
mat.node_tree.links.new(mix.inputs[1],node.outputs[0])
if node.inputs["Metallic"].is_linked:
mat.node_tree.links.new(mix.inputs[0],node.inputs['Metallic'].links[0].from_socket)
else:
mix.inputs[0].default_value = node.inputs['Metallic'].default_value
#copio metalico
if node.inputs["Metallic"].is_linked:
mat.node_tree.links.new(mix.inputs[0],node.inputs["Metallic"].links[0].from_socket)
mat.node_tree.links.new(node.outputs['BSDF'].links[0].to_socket,mix.outputs[0])
#copio seteos de p a p
for entrada in ["Base Color","Roughness"]:
if node.inputs[entrada].is_linked:
mat.node_tree.links.new(nprin.inputs[entrada],node.inputs[entrada].links[0].from_socket)
nprin.inputs[entrada].default_value = node.inputs[entrada].default_value
node.inputs['Specular'].default_value = 0
node.inputs['Metallic'].default_value = 0 # ambos a cero
nprin.inputs['Specular'].default_value = 0
nprin.inputs['Metallic'].default_value = 1 # nuevo prin a 1
for link in mat.node_tree.links:
if link.to_socket.name == "Metallic":
mat.node_tree.links.remove(link)
#__________________________________________________________________________________
#desmetalizar
def desmetalizar(material):
for link in mat.node_tree.links:
if link.to_socket.name == "Metallic":
mat.node_tree.links.remove(link)
for matnode in mat.node_tree.nodes:
if matnode.type == "BSDF_PRINCIPLED":
# desconecto metallic y seteo cero
if matnode.inputs['Metallic'].is_linked:
matnode.inputs["Metallic"].default_value = 0
matnode.inputs["Specular"].default_value = 0
else:
matnode.inputs["Metallic"].default_value = 0
matnode.inputs['Specular'].default_value = 0
#destransparentizar
def destransparentizar(material):
for link in mat.node_tree.links:
if link.to_socket.name == "Transmission":
mat.node_tree.links.remove(link)
for matnode in mat.node_tree.nodes:
if matnode.type == "BSDF_PRINCIPLED":
# desconecto metallic y seteo cero
if matnode.inputs['Transmission'].is_linked:
matnode.inputs["Transmission"].default_value = 0
else:
matnode.inputs["Transmission"].default_value = 0
#saca todos los speculares
def desespecular(material):
for matnode in material.node_tree.nodes:
if matnode.type == "BSDF_PRINCIPLED":
matnode.inputs["Specular"].default_value = 0
#base color a 1
def baseColorA1(material):
for link in mat.node_tree.links:
if link.to_socket.name == "Base Color":
mat.node_tree.links.remove(link)
for node in mat.node_tree.nodes:
if node.type == "BSDF_PRINCIPLED":
node.inputs['Base Color'].default_value= (1,1,1,1)
#cambia slots
def cambiaSlots(objeto,sufijo):
for ms in objeto.material_slots:
ms.material = bpy.data.materials[ms.material.name.rpartition("_")[0]+sufijo]
#__________________________________________________________________________________
def removeMatProps():
global mat
#saco los metales en las copias de copy
for mat in copyMats:
desmetalizar(mat)
destransparentizar(mat)
#saco los metales en las copias de glossy
for mat in glossyMats:
desespecular(mat)
mixGlossy(mat)
destransparentizar(mat)
#llevo a uno los base color de roughness
for mat in roughMats:
desespecular(mat)
baseColorA1(mat)
destransparentizar(mat)
# saco metales para transmisiones
for mat in transMats:
desmetalizar(mat)
desespecular(mat)
baseColorA1(mat)
#__________________________________________________________________________________
def bake(map):
#crea imagen
imgpath = "%s/IMAGES" % (os.path.dirname(bpy.data.filepath))
img = bpy.data.images.new(channels[map][0], width=sizex, height=sizey, alpha=True,float_buffer=True)
print ("Render: %s" % (channels[map][1]))
img.colorspace_settings.name = 'Linear'
if not selected_to_active:
img.filepath = "%s/%s_%s.exr" % (imgpath, object.name, channels[map][0])
else:
img.filepath = "%s/%s_%s.exr" % (imgpath, object.active_material.name, channels[map][0])
#cambio materiales
if channels[map][0] == "ME":
cambiaSlots(selObject,"_glossyTemp")
if channels[map][0] == "RO":
cambiaSlots(selObject,"_roughnessTemp")
if channels[map][0] in ["AT","AO","NM","EM","OP"]:
cambiaSlots(selObject,"_copyTemp")
if channels[map][0] in ["OP"]:
cambiaSlots(selObject,"_trans")
# creo nodos y bakeo
if not selected_to_active:
for activeMat in selObject.data.materials: #aca estaba el mscopy
# seteo el nodo
node = activeMat.node_tree.nodes.new("ShaderNodeTexImage")
node.image = img
activeMat.node_tree.nodes.active = node
node.color_space = "NONE"
node.select = True
else:
activeMat = object.active_material
# seteo el nodo
node = activeMat.node_tree.nodes.new("ShaderNodeTexImage")
node.image = img
activeMat.node_tree.nodes.active = node
node.color_space = "NONE"
node.select = True
bpy.ops.object.bake(type=channels[map][1])
img.save_render(img.filepath)
bpy.data.images.remove(img)
print ("%s Done!" % (channels[map][1]))
#__________________________________________________________________________________
def executePbr():
#bakeo
setSceneOpts()
mergeObjects()
createTempMats()
removeMatProps()
for map in channels.keys():
if getattr(bpy.context.scene.bake_pbr_channels,map):
bake(map)
#restauro material slots
for matSlot,rms in zip(selObject.material_slots,ms):
matSlot.material = rms
#remuevo materiales copia
for ma in copyMats+glossyMats+roughMats+transMats:
bpy.data.materials.remove(ma)
#borro el merge
if selected_to_active:
bpy.data.objects.remove(selObject, do_unlink=True, do_id_user=True, do_ui_user=True)
class BakePbr (bpy.types.Operator):
"""Bake PBR materials"""
bl_idname = "object.bake_pbr_maps"
bl_label = "Bake PBR Maps"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
executePbr()
return {'FINISHED'}
#__________________________________________________________________________________
class bakeChannels(bpy.types.PropertyGroup):
metallic = bpy.props.BoolProperty(name="Metallic",default=False)
occlusion = bpy.props.BoolProperty(name="Occlusion",default=False)
normal = bpy.props.BoolProperty(name="Normal",default=False)
emit = bpy.props.BoolProperty(name="Emit",default=False)
roughness = bpy.props.BoolProperty(name="Roughness",default=False)
opacity = bpy.props.BoolProperty(name="Opacity",default=False)
albedo = bpy.props.BoolProperty(name="Albedo",default=False)
sizex = bpy.props.IntProperty(name="Size x", default= 1024)
sizey = bpy.props.IntProperty(name="Size y", default= 1024)
seltoact = bpy.props.BoolProperty(name="Selected to active", default= True)
bpy.utils.register_class(bakeChannels)
class LayoutDemoPanel(bpy.types.Panel):
"""Creates a Panel in the scene context of the properties editor"""
bl_label = "Bake PBR"
bl_idname = "RENDER_PT_layout"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "render"
def draw(self, context):
layout = self.layout
scene = context.scene
# Create a simple row.
layout.label(text=" Channels:")
row = layout.row()
row.prop(scene.bake_pbr_channels, "metallic")
row = layout.row()
row.prop(scene.bake_pbr_channels, "occlusion")
row = layout.row()
row.prop(scene.bake_pbr_channels, "normal")
row = layout.row()
row.prop(scene.bake_pbr_channels, "emit")
row = layout.row()
row.prop(scene.bake_pbr_channels, "roughness")
row = layout.row()
row.prop(scene.bake_pbr_channels, "opacity")
row = layout.row()
row.prop(scene.bake_pbr_channels, "albedo")
row = layout.row()
row.prop(scene.bake_pbr_channels, "sizex")
row.prop(scene.bake_pbr_channels, "sizey")
row = layout.row()
row.prop(scene.bake_pbr_channels, "seltoact")
# Big render button
row = layout.row()
row.scale_y = 2
row.operator("object.bake_pbr_maps")
#__________________________________________________________________________________
bpy.types.Scene.bake_pbr_channels = bpy.props.PointerProperty(type=bakeChannels)
bpy.utils.register_class(LayoutDemoPanel)
bpy.utils.register_class(BakePbr) |
Funny H, oween Meme, www.imgkid.com, The Image Kid Has It!
H, oween Meme Funny, www.imgkid.com, The Image Kid Has It!
Happier Than A Pig In Mud: LOL Cats for a Happy H, oween! |
#!/usr/bin/env python
# OpenCenter(TM) is Copyright 2013 by Rackspace US, Inc.
##############################################################################
#
# OpenCenter is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. This
# version of OpenCenter includes Rackspace trademarks and logos, and in
# accordance with Section 6 of the License, the provision of commercial
# support services in conjunction with a version of OpenCenter which includes
# Rackspace trademarks and logos is prohibited. OpenCenter source code and
# details are available at: # https://github.com/rcbops/opencenter or upon
# written request.
#
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 and a copy, including this
# notice, is available in the LICENSE file accompanying this software.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the # specific language governing permissions and limitations
# under the License.
#
##############################################################################
#
#
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def find_files(path_tuples):
output_array = []
for (path, destination) in path_tuples:
if os.path.isdir(path):
for d in os.walk(path):
if len(d[2]) != 0:
output_dir = d[0].replace(path, destination)
output_files = ["%s/%s" % (d[0], x) for x in d[2]]
output_array.append((output_dir, output_files))
else:
output_array.append((destination, [path]))
return output_array
setup(
name='opencenteragent',
version='0.1',
author='Rackspace US, Inc.',
description=('Yet another pluggable, modular host agent'),
license='Apache2',
url='https://github.com/rpedde/opencenter-agent',
long_description=read('README'),
packages=find_packages(),
data_files=find_files([['opencenteragent/plugins',
'share/opencenter-agent/plugins'],
['opencenter-agent.py', 'bin']])
)
|
Sandy Walker is a recent graduate (2016) from the University of the West of Scotland with a First Class honours degree in Computer Networking. He has joined Orton Search as Consultant Support working closely with both Adrian Orton and James Lyon.
Within Orton Search Sandy’s role will include research, administration and assisting consultants during the recruitment process. Over time it is hoped Sandy will grow into the role of a specialist recruiter.
A keen golfer and with a near scratch handicap, Sandy resides in Ayrshire and enjoys the many golf courses surrounding the area. He enjoys football and considers himself an all rounds sports fanatic. |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-03-13 11:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questions', '0041_data_migration'),
]
operations = [
migrations.AlterField(
model_name='catalog',
name='comment',
field=models.TextField(blank=True, help_text='Additional internal information about this catalog.', verbose_name='Comment'),
),
migrations.AlterField(
model_name='catalog',
name='key',
field=models.SlugField(blank=True, help_text='The internal identifier of this catalog.', max_length=128, verbose_name='Key'),
),
migrations.AlterField(
model_name='catalog',
name='title_lang1',
field=models.CharField(blank=True, help_text='The title for this catalog in the primary language.', max_length=256, verbose_name='Title (primary)'),
),
migrations.AlterField(
model_name='catalog',
name='title_lang2',
field=models.CharField(blank=True, help_text='The title for this catalog in the secondary language.', max_length=256, verbose_name='Title (secondary)'),
),
migrations.AlterField(
model_name='catalog',
name='title_lang3',
field=models.CharField(blank=True, help_text='The title for this catalog in the tertiary language.', max_length=256, verbose_name='Title (tertiary)'),
),
migrations.AlterField(
model_name='catalog',
name='title_lang4',
field=models.CharField(blank=True, help_text='The title for this catalog in the quaternary language.', max_length=256, verbose_name='Title (quaternary)'),
),
migrations.AlterField(
model_name='catalog',
name='title_lang5',
field=models.CharField(blank=True, help_text='The title for this catalog in the quinary language.', max_length=256, verbose_name='Title (quinary)'),
),
migrations.AlterField(
model_name='catalog',
name='uri',
field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this catalog (auto-generated).', max_length=640, verbose_name='URI'),
),
migrations.AlterField(
model_name='catalog',
name='uri_prefix',
field=models.URLField(blank=True, help_text='The prefix for the URI of this catalog.', max_length=256, verbose_name='URI Prefix'),
),
migrations.AlterField(
model_name='questionset',
name='comment',
field=models.TextField(blank=True, help_text='Additional internal information about this questionset.', verbose_name='Comment'),
),
migrations.AlterField(
model_name='questionset',
name='help_lang1',
field=models.TextField(blank=True, help_text='The help text for this questionset in the primary language.', verbose_name='Help (primary)'),
),
migrations.AlterField(
model_name='questionset',
name='help_lang2',
field=models.TextField(blank=True, help_text='The help text for this questionset in the secondary language.', verbose_name='Help (secondary)'),
),
migrations.AlterField(
model_name='questionset',
name='help_lang3',
field=models.TextField(blank=True, help_text='The help text for this questionset in the tertiary language.', verbose_name='Help (tertiary)'),
),
migrations.AlterField(
model_name='questionset',
name='help_lang4',
field=models.TextField(blank=True, help_text='The help text for this questionset in the quaternary language.', verbose_name='Help (quaternary)'),
),
migrations.AlterField(
model_name='questionset',
name='help_lang5',
field=models.TextField(blank=True, help_text='The help text for this questionset in the quinary language.', verbose_name='Help (quinary)'),
),
migrations.AlterField(
model_name='questionset',
name='key',
field=models.SlugField(blank=True, help_text='The internal identifier of this questionset.', max_length=128, verbose_name='Key'),
),
migrations.AlterField(
model_name='questionset',
name='path',
field=models.CharField(blank=True, help_text='The path part of the URI of this questionset (auto-generated).', max_length=512, verbose_name='Path'),
),
migrations.AlterField(
model_name='questionset',
name='title_lang1',
field=models.CharField(blank=True, help_text='The title for this questionset in the primary language.', max_length=256, verbose_name='Title (primary)'),
),
migrations.AlterField(
model_name='questionset',
name='title_lang2',
field=models.CharField(blank=True, help_text='The title for this questionset in the secondary language.', max_length=256, verbose_name='Title (secondary)'),
),
migrations.AlterField(
model_name='questionset',
name='title_lang3',
field=models.CharField(blank=True, help_text='The title for this questionset in the tertiary language.', max_length=256, verbose_name='Title (tertiary)'),
),
migrations.AlterField(
model_name='questionset',
name='title_lang4',
field=models.CharField(blank=True, help_text='The title for this questionset in the quaternary language.', max_length=256, verbose_name='Title (quaternary)'),
),
migrations.AlterField(
model_name='questionset',
name='title_lang5',
field=models.CharField(blank=True, help_text='The title for this questionset in the quinary language.', max_length=256, verbose_name='Title (quinary)'),
),
migrations.AlterField(
model_name='questionset',
name='uri',
field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this questionset (auto-generated).', max_length=640, verbose_name='URI'),
),
migrations.AlterField(
model_name='questionset',
name='uri_prefix',
field=models.URLField(blank=True, help_text='The prefix for the URI of this questionset.', max_length=256, verbose_name='URI Prefix'),
),
migrations.AlterField(
model_name='section',
name='comment',
field=models.TextField(blank=True, help_text='Additional internal information about this section.', verbose_name='Comment'),
),
migrations.AlterField(
model_name='section',
name='key',
field=models.SlugField(blank=True, help_text='The internal identifier of this section.', max_length=128, verbose_name='Key'),
),
migrations.AlterField(
model_name='section',
name='path',
field=models.CharField(blank=True, help_text='The path part of the URI of this section (auto-generated).', max_length=512, verbose_name='Label'),
),
migrations.AlterField(
model_name='section',
name='title_lang1',
field=models.CharField(blank=True, help_text='The title for this section in the primary language.', max_length=256, verbose_name='Title (primary)'),
),
migrations.AlterField(
model_name='section',
name='title_lang2',
field=models.CharField(blank=True, help_text='The title for this section in the secondary language.', max_length=256, verbose_name='Title (secondary)'),
),
migrations.AlterField(
model_name='section',
name='title_lang3',
field=models.CharField(blank=True, help_text='The title for this section in the tertiary language.', max_length=256, verbose_name='Title (tertiary)'),
),
migrations.AlterField(
model_name='section',
name='title_lang4',
field=models.CharField(blank=True, help_text='The title for this section in the quaternary language.', max_length=256, verbose_name='Title (quaternary)'),
),
migrations.AlterField(
model_name='section',
name='title_lang5',
field=models.CharField(blank=True, help_text='The title for this section in the quinary language.', max_length=256, verbose_name='Title (quinary)'),
),
migrations.AlterField(
model_name='section',
name='uri',
field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this section (auto-generated).', max_length=640, verbose_name='URI'),
),
migrations.AlterField(
model_name='section',
name='uri_prefix',
field=models.URLField(blank=True, help_text='The prefix for the URI of this section.', max_length=256, verbose_name='URI Prefix'),
),
]
|
Interview with Claudia Kruse, Managing Director Global Responsible Investment & Governance at APG Asset Management during the Pensions & Investments WorldPensionSummit 2017 taking place in The Hague, Louwman Museum on 25-26 October 2017. Theme of the conference was Investing for the Long-term. Claudia reported on the findings of the European Commission's High-Level Expert Group on sustainable finance, of which she is a member. Its work is an important step in the follow-up to the EU's 2030 Agenda for sustainable development as well as the Paris Agreement on climate change. |
from datetime import datetime
from boto.resultset import ResultSet
class Stack:
def __init__(self, connection=None):
self.connection = connection
self.creation_time = None
self.description = None
self.disable_rollback = None
self.notification_arns = []
self.outputs = []
self.parameters = []
self.stack_id = None
self.stack_status = None
self.stack_name = None
self.stack_name_reason = None
self.timeout_in_minutes = None
def startElement(self, name, attrs, connection):
if name == "Parameters":
self.parameters = ResultSet([('member', Parameter)])
return self.parameters
elif name == "Outputs":
self.outputs = ResultSet([('member', Output)])
return self.outputs
else:
return None
def endElement(self, name, value, connection):
if name == 'CreationTime':
self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
elif name == "Description":
self.description = value
elif name == "DisableRollback":
self.disable_rollback = bool(value)
elif name == "NotificationARNs":
self.notification_arns = value
elif name == 'StackId':
self.stack_id = value
elif name == 'StackName':
self.stack_name = value
elif name == 'StackStatus':
self.stack_status = value
elif name == "StackStatusReason":
self.stack_status_reason = value
elif name == "TimeoutInMinutes":
self.timeout_in_minutes = int(value)
elif name == "member":
pass
else:
setattr(self, name, value)
def delete(self):
return self.connection.delete_stack(stack_name_or_id=self.stack_id)
def describe_events(self, next_token=None):
return self.connection.describe_stack_events(
stack_name_or_id=self.stack_id,
next_token=next_token
)
def describe_resource(self, logical_resource_id):
return self.connection.describe_stack_resource(
stack_name_or_id=self.stack_id,
logical_resource_id=logical_resource_id
)
def describe_resources(self, logical_resource_id=None,
physical_resource_id=None):
return self.connection.describe_stack_resources(
stack_name_or_id=self.stack_id,
logical_resource_id=logical_resource_id,
physical_resource_id=physical_resource_id
)
def list_resources(self, next_token=None):
return self.connection.list_stack_resources(
stack_name_or_id=self.stack_id,
next_token=next_token
)
def update(self):
rs = self.connection.describe_stacks(self.stack_id)
if len(rs) == 1 and rs[0].stack_id == self.stack_id:
self.__dict__.update(rs[0].__dict__)
else:
raise ValueError("%s is not a valid Stack ID or Name" %
self.stack_id)
def get_template(self):
return self.connection.get_template(stack_name_or_id=self.stack_id)
class StackSummary:
def __init__(self, connection=None):
self.connection = connection
self.stack_id = None
self.stack_status = None
self.stack_name = None
self.creation_time = None
self.deletion_time = None
self.template_description = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'StackId':
self.stack_id = value
elif name == 'StackStatus':
self.stack_status = value
elif name == 'StackName':
self.stack_name = value
elif name == 'CreationTime':
self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
elif name == "DeletionTime":
self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
elif name == 'TemplateDescription':
self.template_description = value
elif name == "member":
pass
else:
setattr(self, name, value)
class Parameter:
def __init__(self, connection=None):
self.connection = None
self.key = None
self.value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "ParameterKey":
self.key = value
elif name == "ParameterValue":
self.value = value
else:
setattr(self, name, value)
def __repr__(self):
return "Parameter:\"%s\"=\"%s\"" % (self.key, self.value)
class Output:
def __init__(self, connection=None):
self.connection = connection
self.description = None
self.key = None
self.value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "Description":
self.description = value
elif name == "OutputKey":
self.key = value
elif name == "OutputValue":
self.value = value
else:
setattr(self, name, value)
def __repr__(self):
return "Output:\"%s\"=\"%s\"" % (self.key, self.value)
class StackResource:
def __init__(self, connection=None):
self.connection = connection
self.description = None
self.logical_resource_id = None
self.physical_resource_id = None
self.resource_status = None
self.resource_status_reason = None
self.resource_type = None
self.stack_id = None
self.stack_name = None
self.timestamp = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "Description":
self.description = value
elif name == "LogicalResourceId":
self.logical_resource_id = value
elif name == "PhysicalResourceId":
self.physical_resource_id = value
elif name == "ResourceStatus":
self.resource_status = value
elif name == "ResourceStatusReason":
self.resource_status_reason = value
elif name == "ResourceType":
self.resource_type = value
elif name == "StackId":
self.stack_id = value
elif name == "StackName":
self.stack_name = value
elif name == "Timestamp":
self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
else:
setattr(self, name, value)
def __repr__(self):
return "StackResource:%s (%s)" % (self.logical_resource_id,
self.resource_type)
class StackResourceSummary:
def __init__(self, connection=None):
self.connection = connection
self.last_updated_timestamp = None
self.logical_resource_id = None
self.physical_resource_id = None
self.resource_status = None
self.resource_status_reason = None
self.resource_type = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "LastUpdatedTimestamp":
self.last_updated_timestampe = datetime.strptime(value,
'%Y-%m-%dT%H:%M:%SZ')
elif name == "LogicalResourceId":
self.logical_resource_id = value
elif name == "PhysicalResourceId":
self.physical_resource_id = value
elif name == "ResourceStatus":
self.resource_status = value
elif name == "ResourceStatusReason":
self.resource_status_reason = value
elif name == "ResourceType":
self.resource_type = value
else:
setattr(self, name, value)
def __repr__(self):
return "StackResourceSummary:%s (%s)" % (self.logical_resource_id,
self.resource_type)
class StackEvent:
valid_states = ("CREATE_IN_PROGRESS", "CREATE_FAILED", "CREATE_COMPLETE",
"DELETE_IN_PROGRESS", "DELETE_FAILED", "DELETE_COMPLETE")
def __init__(self, connection=None):
self.connection = connection
self.event_id = None
self.logical_resource_id = None
self.physical_resource_id = None
self.resource_properties = None
self.resource_status = None
self.resource_status_reason = None
self.resource_type = None
self.stack_id = None
self.stack_name = None
self.timestamp = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "EventId":
self.event_id = value
elif name == "LogicalResourceId":
self.logical_resource_id = value
elif name == "PhysicalResourceId":
self.physical_resource_id = value
elif name == "ResourceProperties":
self.resource_properties = value
elif name == "ResourceStatus":
self.resource_status = value
elif name == "ResourceStatusReason":
self.resource_status_reason = value
elif name == "ResourceType":
self.resource_type = value
elif name == "StackId":
self.stack_id = value
elif name == "StackName":
self.stack_name = value
elif name == "Timestamp":
self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
else:
setattr(self, name, value)
def __repr__(self):
return "StackEvent %s %s %s" % (self.resource_type,
self.logical_resource_id, self.resource_status)
|
Vectorial icon set of American old cars isolated on white backgrounds. Every cars is in separate layers. File contains gradients and blends. |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
class Regularize(object):
def __init__(self, reg_schedule=None, downscale_reg_with_training_set=False,
**kwargs):
"""
@param reg_schedule (dict)
Mapping from epoch number to the reg_weight to use on that timestep and
afterward.
@param downscale_reg_with_training_set (bool)
If True, multiply the regularization term by (1 / size_of_training_set)
"""
super().__init__(**kwargs)
if downscale_reg_with_training_set:
self.reg_coefficient = 1 / len(self.dataset_manager.get_train_dataset(0))
else:
self.reg_coefficient = 1
if reg_schedule is None:
self.reg_schedule = {}
self.reg_weight = 1.0
else:
self.reg_schedule = reg_schedule
self.reg_weight = reg_schedule[0]
def _regularization(self):
reg = None # Perform accumulation on the device.
for layer in self.network.modules():
if hasattr(layer, "regularization"):
if reg is None:
reg = layer.regularization()
else:
reg += layer.regularization()
if reg is None:
return 0
else:
return (self.reg_weight
* self.reg_coefficient
* reg)
def run_epoch(self, iteration):
if iteration in self.reg_schedule:
self.reg_weight = self.reg_schedule[iteration]
return super().run_epoch(iteration)
|
The Bounty Hunter Pin Pointer pinpoints the exact location of buried metal objects. The Pin Pointer was designed to be used with any metal detector to help precisely locate coins and other treasures.
It features a single knob control which adjusts sensitivity and audio/vibrate indicator making it easy to use and find targets. The small lightweight design makes it easy to handle and carry.
It requires one 9V battery for operation and comes with a one-year warranty from Bounty Hunter. Made in USA. |
"""
Music Scales
Source: http://en.wikipedia.org/wiki/List_of_musical_scales_and_modes
Copyright (C) 2012 Alfred Farrugia
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
ACOUSTIC_SCALE = [0, 2, 4, 6, 7, 9, 10]
ADONAI_MALAKH = [0, 2, 4, 5, 7, 8, 10]
AEOLIAN_MODE = [0, 2, 3, 5, 7, 8, 10]
ALGERIAN_SCALE = [0, 2, 3, 6, 7, 8, 11]
ALTERED_SCALE = [0, 1, 3, 4, 6, 8, 10]
AUGMENTED_SCALE = [0, 3, 4, 7, 8, 11]
BEBOP_DOMINANT = [0, 2, 4, 5, 7, 9, 10, 11]
BLUES_SCALE = [0, 3, 5, 6, 7, 10]
DORIAN_MODE = [0, 2, 3, 5, 7, 9, 10]
DOUBLE_HARMONIC_SCALE = [0, 1, 4, 5, 7, 8, 11]
ENIGMATIC_SCALE = [0, 1, 4, 6, 8, 10, 11]
FLAMENCO_MODE = [0, 1, 4, 5, 7, 8, 11]
GYPSY_SCALE = [0, 2, 3, 6, 7, 8, 10]
HALF_DIMINISHED_SCALE = [0, 2, 3, 5, 6, 8, 10]
HARMONIC_MAJOR_SCALE = [0, 2, 4, 5, 7, 8, 11]
HARMONIC_MINOR_SCALE = [0, 2, 3, 5, 7, 8, 11]
HIRAJOSHI_SCALE = [0, 4, 6, 7, 11]
HUNGARIAN_GYPSY_SCALE = [0, 2, 3, 6, 7, 8, 11]
INSEN_SCALE = [0, 1, 5, 7, 10]
IONIAN_MODE = [0, 2, 4, 5, 7, 9, 11]
IWATO_SCALE = [0, 1, 5, 6, 11]
LOCRIAN_MODE = [0, 1, 3, 5, 6, 8, 10]
LYDIAN_AUGMENTED_SCALE = [0, 2, 4, 6, 8, 9, 11]
LYDIAN_MODE = [0, 2, 4, 6, 7, 9, 11]
MAJOR_LOCRIAN = [0, 2, 4, 5, 6, 8, 10]
MELODIC_MINOR_SCALE = [0, 2, 3, 5, 7, 9, 11]
MIXOLYDIAN_MODE = [0, 2, 4, 5, 7, 9, 10]
NEAPOLITAN_MAJOR_SCALE = [0, 1, 3, 5, 7, 9, 11]
NEAPOLITAN_MINOR_SCALE = [0, 1, 3, 5, 7, 8, 11]
PERSIAN_SCALE = [0, 1, 4, 5, 6, 8, 11]
PHRYGIAN_MODE = [0, 1, 3, 5, 7, 8, 10]
PROMETHEUS_SCALE = [0, 2, 4, 6, 9, 10]
TRITONE_SCALE = [0, 1, 4, 6, 7, 10]
UKRAINIAN_DORIAN_SCALE = [0, 2, 3, 6, 7, 9, 10]
WHOLE_TONE_SCALE = [0, 2, 4, 6, 8, 10]
MAJOR = [0, 2, 4, 5, 7, 9, 11]
MINOR = [0, 2, 3, 5, 7, 8, 10]
"""
Build a scale given an array s
Example: to build a scale between 0 and 128 using the notes C, D, E
buildScale([0,2,4],0,128)
"""
def buildScale(s, min_note=0, max_note=128):
return [x + (12 * j)
for j in range(12)
for x in s
if x + (12 * j) >= min_note and x + (12 * j) <= max_note]
|
Three University of New Hampshire students in Brussels at the time of the attacks were not hurt, according to a school spokesperson.
Junior Danielle Lewis spoke with necn via Skype from her temporary home in Brussels while studying abroad.
She was inside the airport, exactly where the bombs exploded, just 24 hours earlier.
"I have been there and it is just very freaky," Lewis said.
The New Hampshire native lives less than an hour from the airport.
She says after the attacks, everyone felt vulnerable, unsure of who might be next.
"I was shocked," Lewis said. "I didn't expect it, I was very, very worried for Kelsey's family."
Kelsey Avey is Lewis's roommate. She's from Michigan and also studying abroad.
"It was extremely scary, frightening, unbelievable," Avey said. "The difference that not even five minutes makes."
Avey's mom and aunt were catching a flight out of Brussels Tuesday morning. Her mom would have been in the exact spot of the blast, but something held her back.
"As they were walking in, my aunt dropped her suitcase, so she had gone back to help her," Avey explained.
That delayed them just enough that they felt the explosion, but didn't get hurt.
"They heard two extremely loud explosions and immediately it looked like confetti flying around them," Avey said. "There was a huge gust of wind and it blew them back."
Avey says they ran for their lives and when they were far enough outside the airport, they snapped a photo showing blown out windows and debris.
"I am extremely grateful and happy and lucky," Avey said Wednesday.
The two students will finish the semester in Brussels.
Lewis' dad, back home in Manchester, is proud of that decision.
"We know what happened in Boston - it could happen anywhere. It could happen here, in Manchester," Walter Lewis said. "We are not going to stop living our lives."
Lewis and Avey say changing plans out of fear would allow those responsible to claim victory, and they refuse to let that happen.
"I'm starting to love Brussels, and this isn't going to deter me," Lewis said.
The students say these are days of quiet reflection in Brussels. Their thoughts and prayers are with the victims and their families. |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# JewelCraft jewelry design toolkit for Blender.
# Copyright (C) 2015-2019 Mikhail Rachinskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
from bpy.types import Operator
from bpy.props import IntProperty, FloatProperty, BoolProperty
from .scatter_ui import UI
from .scatter_func import Scatter
class OBJECT_OT_jewelcraft_curve_scatter(UI, Scatter, Operator):
bl_label = "JewelCraft Curve Scatter"
bl_description = "Scatter selected object along active curve"
bl_idname = "object.jewelcraft_curve_scatter"
bl_options = {"REGISTER", "UNDO"}
is_scatter = True
number: IntProperty(name="Object Number", default=10, min=1, soft_max=100)
rot_y: FloatProperty(name="Orientation", step=10, unit="ROTATION")
rot_z: FloatProperty(name="Rotation", step=10, unit="ROTATION")
loc_z: FloatProperty(name="Position", unit="LENGTH")
start: FloatProperty(name="Start")
end: FloatProperty(name="End", default=100.0)
use_absolute_offset: BoolProperty(name="Absolute Offset")
spacing: FloatProperty(name="Spacing", default=0.2, unit="LENGTH")
class OBJECT_OT_jewelcraft_curve_redistribute(UI, Scatter, Operator):
bl_label = "JewelCraft Curve Redistribute"
bl_description = "Redistribute selected objects along curve"
bl_idname = "object.jewelcraft_curve_redistribute"
bl_options = {"REGISTER", "UNDO"}
is_scatter = False
rot_y: FloatProperty(name="Orientation", step=10, unit="ROTATION", options={"SKIP_SAVE"})
rot_z: FloatProperty(name="Rotation", step=10, unit="ROTATION", options={"SKIP_SAVE"})
loc_z: FloatProperty(name="Position", unit="LENGTH", options={"SKIP_SAVE"})
start: FloatProperty(name="Start")
end: FloatProperty(name="End", default=100.0)
use_absolute_offset: BoolProperty(name="Absolute Offset", options={"SKIP_SAVE"})
spacing: FloatProperty(name="Spacing", default=0.2, unit="LENGTH")
|
FTSE 100 industrial equipment firm Ashtead (LON:AHT) upped its revenues for the half-year ended 31st October by 19% to 2.25 billion. Profits before taxation for the period rose by 25% to £610 million.
Chief executive Geoff Drabble said: “The Group delivered a strong quarter with good performance across the Group. As a result, Group rental revenue increased 18% for the six months and underlying pre-tax profit increased 19% to £633m, both at constant exchange rates.
“We have invested £1,063m in capital and a further £362m on bolt-on acquisitions in the period which has added 80 locations and resulted in a rental fleet growth of 15%. This investment reflects the structural growth opportunity that we continue to see in the business as we broaden our product offering and geographic reach, and increase market share.
“Whilst these are significant investments we remain focused on responsible growth so, after spending £425m to date on our share buyback programme, we have maintained net debt to EBITDA leverage at 1.8 times. Therefore we remain well within our target range of 1.5 to 2.0 times reflecting the strength of our margins and free cash flow.
The price of shares in Ashtead rose by 4.33% to 1,676p (as of 12:25 GMT). |
#!/usr/bin/env python
# encoding: utf-8
# Christoph Koke, 2013
# Original source: waflib/extras/clang_compilation_database.py from
# waf git 15d14c7bdf2e (New BSD License)
"""
Writes the c and cpp compile commands into build/compile_commands.json
see http://clang.llvm.org/docs/JSONCompilationDatabase.html
Usage:
def configure(conf):
conf.load('compiler_cxx')
...
conf.load('clang_compilation_database')
"""
import sys, os, json, shlex, pipes
from waflib import Logs, TaskGen
from waflib.Tools import c, cxx
if sys.hexversion >= 0x3030000:
quote = shlex.quote
else:
quote = pipes.quote
@TaskGen.feature('*')
@TaskGen.after_method('process_use')
def collect_compilation_db_tasks(self):
"Add a compilation database entry for compiled tasks"
try:
clang_db = self.bld.clang_compilation_database_tasks
except AttributeError:
clang_db = self.bld.clang_compilation_database_tasks = []
self.bld.add_post_fun(write_compilation_database)
for task in getattr(self, 'compiled_tasks', []):
if isinstance(task, (c.c, cxx.cxx)):
clang_db.append(task)
def write_compilation_database(ctx):
"Write the clang compilation database as JSON"
database_file = ctx.bldnode.make_node('compile_commands.json')
Logs.info("Build commands will be stored in %s" % database_file.path_from(ctx.path))
try:
root = json.load(database_file)
except IOError:
root = []
clang_db = dict((x["file"], x) for x in root)
for task in getattr(ctx, 'clang_compilation_database_tasks', []):
try:
cmd = task.last_cmd
except AttributeError:
continue
directory = getattr(task, 'cwd', ctx.variant_dir)
f_node = task.inputs[0]
filename = os.path.relpath(f_node.abspath(), directory)
cmd = " ".join(map(quote, cmd))
entry = {
"directory": directory,
"command": cmd,
"file": filename,
}
clang_db[filename] = entry
root = list(clang_db.values())
database_file.write(json.dumps(root, indent=2))
|
Riverside is a city in Riverside County, California, United States, located in the Inland Empire metropolitan area. Start off your visit on the 6th (Sat): brush up on your military savvy at March Field Air Museum, get your game on at Gold Rush Mining Adventures, and then fly down the slopes at Snow Summit. On the next day, stop by Village Sweet Shoppe, then don't miss a visit to Alpine Slide at Magic Mountain, and then fly down the slopes at Big Bear Lake.
For photos, more things to do, traveler tips, and other tourist information, read Riverside day trip website .
July in Riverside sees daily highs of 94°F and lows of 66°F at night. Wrap up your sightseeing on the 7th (Sun) to allow time to travel back home. |
# -*- coding: utf-8 -*-
'''
pyLauncher: Windows Application Launcher
Copyright (C) Blaga Florentin Gabriel
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
info = {"name" : "pyLWeb",
"author" : "Blaga Florentin Gabriel <https://github.com/BGS/pyLauncher>",
"category": "pylSearchExtensions",
"version": "1.0",
"class" : "execWebSearch"}
class execWebSearch():
def parseQuery(self, query):
query = query.split()
args = query[1:]
if query:
if query[0] == "google":
os.startfile("http://www.google.com/search?source=pyLauncher&q=%s" % " ".join(args))
elif query[0] == "wikipedia":
os.startfile("http://en.wikipedia.org/wiki/Special:Search?search=%s&fulltext=Search" % " ".join(args))
elif query[0] == "youtube":
os.startfile("http://www.youtube.com/results?search_query=%s" % " ".join(args))
else:
pass
|
artists from various disciplines. Boris Hoppek, Harald Naegeli, Honet, Jim Avignon, Lily & Moki and Os Gemeos are some of the artists who exhibited at Galerie Revolver.
'All over the Kiez' was a photography exhibition that took place in and also featured images of St. Pauli, Hamburg. The curiosity of this event was that it did not take place within a gallery, but all around St. Pauli. Therefore, 52 photographs were spread across the district and were hung where they had been taken - in the streets, on buildings, wherever it was possible to place them. |
"""
.. moduleauthor:: Johan Comparat <johan.comparat__at__gmail.com>
General purpose:
................
The class ModelSpectraStacks is dedicated to modelling and extracting information from stacks of spectra.
*Imports*::
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as p
import os
import astropy.cosmology as co
cosmo=co.FlatLambdaCDM(H0=70,Om0=0.3)
import astropy.units as u
import astropy.io.fits as fits
import numpy as n
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
from scipy.stats import scoreatpercentile
import astropy.io.fits as fits
from lineListAir import *
import LineFittingLibrary as lineFit
"""
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as p
import os
from os.path import join
import astropy.cosmology as co
cosmo=co.Planck13 #co.FlatLambdaCDM(H0=70,Om0=0.3)
import astropy.units as u
import astropy.io.fits as fits
import numpy as n
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
from scipy.stats import scoreatpercentile
import astropy.io.fits as fits
from lineListVac import *
allLinesList = n.array([ [Ne3,Ne3_3869,"Ne3_3869","left"], [Ne3,Ne3_3968,"Ne3_3968","left"], [O3,O3_4363,"O3_4363","right"], [O3,O3_4960,"O3_4960","left"], [O3,O3_5007,"O3_5007","right"], [H1,H1_3970,"H1_3970","right"], [H1,H1_4102,"H1_4102","right"], [H1,H1_4341,"H1_4341","right"], [H1,H1_4862,"H1_4862","left"]])
# other lines that are optional
# [N2,N2_6549,"N2_6549","left"], [N2,N2_6585,"N2_6585","right"] , [H1,H1_6564,"H1_6564","left"]
# , [S2,S2_6718,"S2_6718","left"], [S2,S2_6732,"S2_6732","right"], [Ar3,Ar3_7137,"Ar3_7137","left"], [H1,H1_1216,"H1_1216","right"]
doubletList = n.array([[O2_3727,"O2_3727",O2_3729,"O2_3729",O2_mean]])
# import the fitting routines
import LineFittingLibrary as lineFit
#O2a=3727.092
#O2b=3729.875
#O2=(O2a+O2b)/2.
#Hg=4102.892
#Hd=4341.684
#Hb=4862.683
#O3a=4960.295
#O3b=5008.240
#Ha=6564.61
fnu = lambda mAB : 10**(-(mAB+48.6)/2.5) # erg/cm2/s/Hz
flambda= lambda mAB, ll : 10**10 * c*1000 * fnu(mAB) / ll**2. # erg/cm2/s/A
kla=lambda ll :2.659 *(-2.156+1.509/ll-0.198/ll**2+0.011/ll**3 ) + 4.05
klb=lambda ll :2.659 *(-1.857+1.040/ll)+4.05
def kl(ll):
"""Calzetti extinction law"""
if ll>6300:
return klb(ll)
if ll<=6300:
return kla(ll)
class ModelSpectraStacks:
"""
This class fits the emission lines on the continuum-subtracted stack.
:param stack_file: fits file generated with a LF in a luminosity bin.
:param cosmo: cosmology class from astropy
:param firefly_min_wavelength: minimum wavelength considered by firefly (default : 1000)
:param firefly_max_wavelength: minimum wavelength considered by firefly (default : 7500)
:param dV: default value that hold the place (default : -9999.99)
:param N_spectra_limitFraction: If the stack was made with N spectra. N_spectra_limitFraction selects the points that have were computed using more thant N_spectra_limitFraction * N spectra. (default : 0.8)
"""
def __init__(self, stack_file, model_file, mode="MILES", cosmo=cosmo, firefly_min_wavelength= 1000., firefly_max_wavelength=7500., dV=-9999.99, N_spectra_limitFraction=0.8, tutorial = False, eboss_stack = False):
self.stack_file = stack_file
self.stack_file_base = os.path.basename(stack_file)[:-5]
self.lineName = self.stack_file_base[:7]
self.stack_model_file = model_file
self.mode = mode
self.tutorial = tutorial
self.eboss_stack = eboss_stack
# retrieves the firefly model for the stack: stack_model_file
"""
if self.mode=="MILES":
self.stack_model_file = join( os.environ['SPECTRASTACKS_DIR'], "fits", self.lineName, self.stack_file_base + "-SPM-MILES.fits")
if self.mode=="STELIB":
self.stack_model_file = join( os.environ['SPECTRASTACKS_DIR'], "fits", self.lineName, self.stack_file_base + "-SPM-STELIB.fits")
"""
if self.tutorial :
self.stack_model_file = join( os.environ['DATA_DIR'], "ELG-composite", self.stack_file_base + "-SPM-MILES.fits")
if self.mode=="EBOSS": #eboss_stack :
self.stack_model_file = join(os.environ['EBOSS_TARGET'],"elg", "tests", "stacks", "fits", self.stack_file_base[:-6]+ "-SPM-MILES.fits")
self.redshift = 0.85
else :
self.redshift = float(self.stack_file_base.split('-')[2].split('_')[0][1:])
self.cosmo = cosmo
self.firefly_max_wavelength = firefly_max_wavelength
self.firefly_min_wavelength = firefly_min_wavelength
self.dV = dV
self.side = ''
self.N_spectra_limitFraction = N_spectra_limitFraction
# define self.sphereCM, find redshift ...
sphere=4*n.pi*( self.cosmo.luminosity_distance(self.redshift) )**2.
self.sphereCM=sphere.to(u.cm**2)
self.hdus = fits.open(self.stack_file)
self.hdR = self.hdus[0].header
self.hdu1 = self.hdus[1] # .data
print "Loads the data."
#print self.hdu1.data.dtype
if self.tutorial :
wlA, flA, flErrA = self.hdu1.data['WAVE'][0], self.hdu1.data['FLUXMEDIAN'][0]*10**(-17), self.hdu1.data['FLUXMEDIAN_ERR'][0]*10**(-17)
self.selection = (flA>0)
self.wl,self.fl,self.flErr = wlA[self.selection], flA[self.selection], flErrA[self.selection]
self.stack=interp1d(self.wl,self.fl)
self.stackErr=interp1d(self.wl,self.flErr)
# loads model :
hdus = fits.open(self.stack_model_file)
self.hdu2 = hdus[1] # .data
self.wlModel,self.flModel = self.hdu2.data['wavelength'], self.hdu2.data['firefly_model']*10**(-17)
self.model=interp1d(n.hstack((self.wlModel,[n.max(self.wlModel)+10,11000])), n.hstack(( self.flModel, [n.median(self.flModel[:-20]),n.median(self.flModel[:-20])] )) )
# wavelength range common to the stack and the model :
self.wlLineSpectrum = n.arange(n.max([self.stack.x.min(),self.model.x.min()]), n.min([self.stack.x.max(),self.model.x.max()]), 0.5)[2:-1]
self.flLineSpectrum=n.array([self.stack(xx)-self.model(xx) for xx in self.wlLineSpectrum])
self.fl_frac_LineSpectrum=n.array([self.stack(xx)/self.model(xx) for xx in self.wlLineSpectrum])
self.flErrLineSpectrum=self.stackErr(self.wlLineSpectrum)
elif eboss_stack :
print self.hdu1.data.dtype
wlA,flA,flErrA = self.hdu1.data['wavelength'], self.hdu1.data['meanWeightedStack']*10**(-17), self.hdu1.data['jackknifStackErrors'] * 10**(-17)
self.selection = (flA>0)
self.wl,self.fl,self.flErr = wlA[self.selection], flA[self.selection], flErrA[self.selection]
self.stack=interp1d(self.wl,self.fl)
self.stackErr=interp1d(self.wl,self.flErr)
# loads model :
hdus = fits.open(self.stack_model_file)
self.hdu2 = hdus[1] # .data
self.wlModel,self.flModel = self.hdu2.data['wavelength'], self.hdu2.data['firefly_model']*10**(-17)
self.model=interp1d(n.hstack((self.wlModel,[n.max(self.wlModel)+10,11000])), n.hstack(( self.flModel, [n.median(self.flModel[:-20]),n.median(self.flModel[:-20])] )) )
# wavelength range common to the stack and the model :
self.wlLineSpectrum = n.arange(n.max([self.stack.x.min(),self.model.x.min()]), n.min([self.stack.x.max(),self.model.x.max()]), 0.5)[2:-1]
self.flLineSpectrum=n.array([self.stack(xx)-self.model(xx) for xx in self.wlLineSpectrum])
self.fl_frac_LineSpectrum=n.array([self.stack(xx)/self.model(xx) for xx in self.wlLineSpectrum])
self.flErrLineSpectrum=self.stackErr(self.wlLineSpectrum)
else:
wlA,flA,flErrA = self.hdu1.data['wavelength'], self.hdu1.data['meanWeightedStack'], self.hdu1.data['jackknifStackErrors']
self.selection = (flA>0) & (self.hdu1.data['NspectraPerPixel'] > float( self.stack_file.split('_')[-5]) * self.N_spectra_limitFraction )
self.wl,self.fl,self.flErr = wlA[self.selection], flA[self.selection], flErrA[self.selection]
self.stack=interp1d(self.wl,self.fl)
self.stackErr=interp1d(self.wl,self.flErr)
# loads model :
hdus = fits.open(self.stack_model_file)
self.hdu2 = hdus[1] # .data
self.wlModel,self.flModel = self.hdu2.data['wavelength'], self.hdu2.data['firefly_model']*10**(-17)
self.model=interp1d(n.hstack((self.wlModel,[n.max(self.wlModel)+10,11000])), n.hstack(( self.flModel, [n.median(self.flModel[:-20]),n.median(self.flModel[:-20])] )) )
# wavelength range common to the stack and the model :
self.wlLineSpectrum = n.arange(n.max([self.stack.x.min(),self.model.x.min()]), n.min([self.stack.x.max(),self.model.x.max()]), 0.5)[2:-1]
self.flLineSpectrum=n.array([self.stack(xx)-self.model(xx) for xx in self.wlLineSpectrum])
self.fl_frac_LineSpectrum=n.array([self.stack(xx)/self.model(xx) for xx in self.wlLineSpectrum])
self.flErrLineSpectrum=self.stackErr(self.wlLineSpectrum)
def interpolate_stack(self):
"""
Divides the measured stack in overlapping and non-overlapping parts with the model.
"""
self.stack=interp1d(self.wl,self.fl)
self.stackErr=interp1d(self.wl,self.flErr)
# bluer than model
self.stBlue = (self.wl<=self.firefly_min_wavelength)
# optical
self.stOpt = (self.wl<self.firefly_max_wavelength)& (self.wl> self.firefly_min_wavelength)
# redder than model
self.stRed = (self.wl>=self.firefly_max_wavelength)
if len(self.wl)<50 :
print "no data, skips spectrum"
return 0.
if len(self.wl[self.stBlue])>0:
self.contBlue=n.median(self.fl[self.stBlue])
self.side='blue'
if len(self.wl[self.stRed])>0:
self.contRed=n.median(self.fl[self.stRed])
self.side='red'
if len(self.wl[self.stRed])>0 and len(self.wl[self.stBlue])>0:
self.contRed=n.median(self.fl[self.stRed])
self.contBlue=n.median(self.fl[self.stBlue])
self.side='both'
if len(self.wl[self.stRed])==0 and len(self.wl[self.stBlue])==0:
self.side='none'
def interpolate_model(self):
"""
Interpolates the model to an array with the same coverage as the stack.
"""
# overlap region with stack
print "interpolate model"
self.mdOK =(self.wlModel>n.min(self.wl))&(self.wlModel<n.max(self.wl))
mdBlue=(self.wlModel<=n.min(self.wl)) # bluer part than data
mdRed=(self.wlModel>=n.max(self.wl)) # redder part than data
okRed=(self.wlModel>4650)&(self.wlModel<self.firefly_max_wavelength)
# Correction model => stack
CORRection=n.sum((self.wl[self.stOpt][1:]-self.wl[self.stOpt][:-1])* self.fl[self.stOpt][1:]) / n.sum((self.wlModel[ self.mdOK ][1:]-self.wlModel[ self.mdOK ][:-1])* self.flModel [ self.mdOK ][1:])
print "Correction", CORRection
if self.side=='red':
self.model=interp1d(n.hstack((self.wlModel[ self.mdOK ],n.arange(self.wlModel[ self.mdOK ].max()+0.5, stack.x.max(), 0.5))), n.hstack(( self.flModel [ self.mdOK ]*CORRection, n.ones_like(n.arange( self.wlModel[ self.mdOK ].max() + 0.5, stack.x.max(), 0.5))*contRed )) )
elif self.side=='blue':
self.model=interp1d(n.hstack((n.arange(stack.x.min(),self.wlModel[ self.mdOK ].min()-1., 0.5),self.wlModel[ self.mdOK ])),n.hstack(( n.ones_like(n.arange(stack.x.min() ,self.wlModel[ self.mdOK ].min() -1.,0.5))* contBlue, self.flModel [ self.mdOK ]*CORRection )) )
elif self.side=='both':
x1=n.hstack((n.arange(stack.x.min(),self.wlModel[ self.mdOK ].min()-1., 0.5), self.wlModel[ self.mdOK ]))
y1=n.hstack(( n.ones_like(n.arange(stack.x.min(),self.wlModel[ self.mdOK ].min()- 1.,0.5))*contBlue, self.flModel [ self.mdOK ]*CORRection ))
x2=n.hstack((x1,n.arange(self.wlModel[ self.mdOK ].max()+0.5,stack.x.max(),0.5)))
y2=n.hstack((y1,n.ones_like(n.arange(self.wlModel[ self.mdOK ].max()+0.5, stack.x.max(), 0.5))*contRed ))
self.model=interp1d(x2,y2)
elif self.side=='none':
self.model=interp1d(self.wlModel[ self.mdOK ], self.flModel [ self.mdOK ])
def subtract_continuum_model(self):
"""
Creates the continuum substracted spectrum: the 'line' spectrum.
"""
self.interpolate_stack()
self.interpolate_model()
# wavelength range common to the stack and the model :
self.wlLineSpectrum = n.arange(n.max([self.stack.x.min(),self.model.x.min()]), n.min([self.stack.x.max(),self.model.x.max()]), 0.5)[2:-1]
print "range probed", self.wlLineSpectrum[0], self.wlLineSpectrum[-1], len( self.wlLineSpectrum)
self.flLineSpectrum=n.array([self.stack(xx)-self.model(xx) for xx in self.wlLineSpectrum])
self.flErrLineSpectrum=self.stackErr(self.wlLineSpectrum)
def fit_lines_to_lineSpectrum(self):
"""
Fits the emission lines on the line spectrum.
"""
# interpolates the mean spectra.
print "fits to the line spectrum"
lfit = lineFit.LineFittingLibrary()
#self.subtract_continuum_model()
data,h=[],[]
print O2_3727
dat_mean,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wlLineSpectrum, self.flLineSpectrum, self.flErrLineSpectrum, a0= O2_3727 , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
print hI, dat_mean
d_out=[]
for kk in range(10):
fluxRR = interp1d(self.wl, self.hdu1.data['jackknifeSpectra'].T[kk][self.selection])
flLineSpectrumRR=n.array([fluxRR(xx)-self.model(xx) for xx in self.wlLineSpectrum])
d1,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wlLineSpectrum, flLineSpectrumRR, self.flErrLineSpectrum, a0= O2_3727 , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
d_out.append(d1)
d_out = n.array(d_out)
#print "jk out", d_out
err_out = n.std(d_out,axis=0)
#print "before", err_out, dat_mean
# assign error values :
dat_mean[3] = err_out[3-1]
dat_mean[5] = err_out[5-1]
dat_mean[7] = err_out[7-1]
#print "after", dat_mean
data.append(dat_mean)
h.append(hI)
for li in allLinesList :
# measure line properties from the mean weighted stack
print li[2]
dat_mean,mI,hI=lfit.fit_Line_position_C0noise(self.wlLineSpectrum, self.flLineSpectrum, self.flErrLineSpectrum, li[1], lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
print hI, dat_mean
# measure its dispersion using the stacks
d_out=[]
for kk in range(len(self.hdu1.data['jackknifeSpectra'].T)):
fluxRR = interp1d(self.wl, self.hdu1.data['jackknifeSpectra'].T[kk][self.selection])
flLineSpectrumRR=n.array([fluxRR(xx)-self.model(xx) for xx in self.wlLineSpectrum])
d1,mI,hI=lfit.fit_Line_position_C0noise(self.wlLineSpectrum, flLineSpectrumRR, self.flErrLineSpectrum, li[1], lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
d_out.append(d1)
d_out = n.array(d_out)
err_out = n.std(d_out,axis=0)
# assign error values :
dat_mean[2] = err_out[2-1]
dat_mean[4] = err_out[4-1]
dat_mean[6] = err_out[6-1]
data.append(dat_mean)
h.append(hI)
heading="".join(h)
out=n.hstack((data))
#print "out", out
out[n.isnan(out)]=n.ones_like(out[n.isnan(out)])*self.dV
#output = n.array([ out ])
#print "----------------", output.T[0], output.T[1], output
colNames = heading.split()
#print colNames
col0 = fits.Column(name=colNames[0],format='D', array= n.array([out.T[0]]))
col1 = fits.Column(name=colNames[1],format='D', array= n.array([out.T[1]]))
self.lineSpec_cols = fits.ColDefs([col0, col1])
#print self.lineSpec_cols
#print colNames
for ll in range(2,len(colNames),1):
#self.hdR["HIERARCH "+colNames[ll]+"_nc"] = out.T[ll]
self.lineSpec_cols += fits.Column(name=colNames[ll], format='D', array= n.array([out.T[ll]]) )
#print self.lineSpec_cols
self.lineSpec_tb_hdu = fits.BinTableHDU.from_columns(self.lineSpec_cols)
def fit_lines_to_fullSpectrum(self):
"""
Fits the emission lines on the line spectrum.
"""
# interpolates the mean spectra.
print "fits to full spectrum"
lfit = lineFit.LineFittingLibrary()
data,h=[],[]
print O2_3727
dat_mean,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wl, self.fl, self.flErr, a0= O2_3727 , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
print hI, dat_mean
d_out=[]
for kk in range(10):
d1,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wl, self.hdu1.data['jackknifeSpectra'].T[kk][self.selection], self.flErr , a0= O2_3727 , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
d_out.append(d1)
d_out = n.array(d_out)
#print "jk out", d_out
err_out = n.std(d_out,axis=0)
#print "before", err_out, dat_mean
# assign error values :
dat_mean[3] = err_out[3-1]
dat_mean[5] = err_out[5-1]
dat_mean[7] = err_out[7-1]
#print "after", dat_mean
data.append(dat_mean)
h.append(hI)
for li in allLinesList :
print li[2]
# measure line properties from the mean weighted stack
dat_mean,mI,hI=lfit.fit_Line_position_C0noise(self.wl, self.fl, self.flErr, li[1], lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
print hI, dat_mean
# measure its dispersion using the stacks
d_out=[]
for kk in range(len(self.hdu1.data['jackknifeSpectra'].T)):
d1,mI,hI=lfit.fit_Line_position_C0noise(self.wl, self.hdu1.data['jackknifeSpectra'].T[kk][self.selection], self.flErr, li[1], lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
d_out.append(d1)
d_out = n.array(d_out)
err_out = n.std(d_out,axis=0)
# assign error values :
dat_mean[2] = err_out[2-1]
dat_mean[4] = err_out[4-1]
dat_mean[6] = err_out[6-1]
data.append(dat_mean)
#print li[2], dat_mean
h.append(hI)
heading="".join(h)
out=n.hstack((data))
out[n.isnan(out)]=n.ones_like(out[n.isnan(out)])*self.dV
#output = n.array([ out ])
#print "----------------", output.T[0], output.T[1], output
colNames = heading.split()
#print colNames
col0 = fits.Column(name=colNames[0],format='D', array= n.array([out.T[0]]))
col1 = fits.Column(name=colNames[1],format='D', array= n.array([out.T[1]]))
self.fullSpec_cols = fits.ColDefs([col0, col1])
#print colNames
for ll in range(2,len(colNames),1):
#self.hdR["HIERARCH "+colNames[ll]+"_nc"] = out.T[ll]
self.fullSpec_cols += fits.Column(name=colNames[ll], format='D', array= n.array([out.T[ll]]) )
self.fullSpec_tb_hdu = fits.BinTableHDU.from_columns(self.fullSpec_cols)
def fit_lines_to_lineSpectrum_tutorial(self):
"""
Fits the emission lines on the line spectrum.
"""
# interpolates the mean spectra.
print "fits to the line spectrum"
lfit = lineFit.LineFittingLibrary()
#self.subtract_continuum_model()
data,h=[],[]
print O2_3727
dat_mean,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wlLineSpectrum, self.flLineSpectrum, self.flErrLineSpectrum, a0= O2_3727 , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
data.append(dat_mean)
h.append(hI)
for li in allLinesList :
# measure line properties from the mean weighted stack
print li[2]
dat_mean,mI,hI=lfit.fit_Line_position_C0noise(self.wlLineSpectrum, self.flLineSpectrum, self.flErrLineSpectrum, li[1], lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
data.append(dat_mean)
h.append(hI)
heading="".join(h)
out=n.hstack((data))
#print "out", out
out[n.isnan(out)]=n.ones_like(out[n.isnan(out)])*self.dV
#output = n.array([ out ])
#print "----------------", output.T[0], output.T[1], output
colNames = heading.split()
#print colNames
col0 = fits.Column(name=colNames[0],format='D', array= n.array([out.T[0]]))
col1 = fits.Column(name=colNames[1],format='D', array= n.array([out.T[1]]))
self.lineSpec_cols = fits.ColDefs([col0, col1])
#print self.lineSpec_cols
#print colNames
for ll in range(2,len(colNames),1):
#self.hdR["HIERARCH "+colNames[ll]+"_nc"] = out.T[ll]
self.lineSpec_cols += fits.Column(name=colNames[ll], format='D', array= n.array([out.T[ll]]) )
#print self.lineSpec_cols
self.lineSpec_tb_hdu = fits.BinTableHDU.from_columns(self.lineSpec_cols)
def fit_lines_to_fullSpectrum_tutorial(self):
"""
Fits the emission lines on the line spectrum.
"""
# interpolates the mean spectra.
print "fits to full spectrum"
lfit = lineFit.LineFittingLibrary()
data,h=[],[]
print O2_3727
dat_mean,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wl, self.fl, self.flErr, a0= O2_3727 , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
print hI, dat_mean
data.append(dat_mean)
h.append(hI)
for li in allLinesList :
print li[2]
# measure line properties from the mean weighted stack
dat_mean,mI,hI=lfit.fit_Line_position_C0noise(self.wl, self.fl, self.flErr, li[1], lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
print hI, dat_mean
# measure its dispersion using the stacks
data.append(dat_mean)
#print li[2], dat_mean
h.append(hI)
heading="".join(h)
out=n.hstack((data))
out[n.isnan(out)]=n.ones_like(out[n.isnan(out)])*self.dV
#output = n.array([ out ])
#print "----------------", output.T[0], output.T[1], output
colNames = heading.split()
#print colNames
col0 = fits.Column(name=colNames[0],format='D', array= n.array([out.T[0]]))
col1 = fits.Column(name=colNames[1],format='D', array= n.array([out.T[1]]))
self.fullSpec_cols = fits.ColDefs([col0, col1])
#print colNames
for ll in range(2,len(colNames),1):
#self.hdR["HIERARCH "+colNames[ll]+"_nc"] = out.T[ll]
self.fullSpec_cols += fits.Column(name=colNames[ll], format='D', array= n.array([out.T[ll]]) )
self.fullSpec_tb_hdu = fits.BinTableHDU.from_columns(self.fullSpec_cols)
def save_spectrum(self):
"""
Saves the stack spectrum, the model and derived quantities in a single fits file with different hdus.
"""
wavelength = fits.Column(name="wavelength",format="D", unit="Angstrom", array= self.wlLineSpectrum)
flux = fits.Column(name="flux",format="D", unit="Angstrom", array= self.flLineSpectrum)
fluxErr = fits.Column(name="fluxErr",format="D", unit="Angstrom", array= self.flErrLineSpectrum)
# new columns
cols = fits.ColDefs([wavelength, flux, fluxErr])
lineSptbhdu = fits.BinTableHDU.from_columns(cols)
# previous file
prihdu = fits.PrimaryHDU(header=self.hdR)
thdulist = fits.HDUList([prihdu, self.hdu1, self.hdu2, lineSptbhdu, self.lineSpec_tb_hdu, self.fullSpec_tb_hdu])
outPutFileName = self.stack_model_file
outFile = n.core.defchararray.replace(outPutFileName, "fits", "model").item()
if self.tutorial:
outFile = join( os.environ['DATA_DIR'], "ELG-composite", self.stack_file_base[:-5]+".model" )
if self.eboss_stack:
#outFile = join(os.environ['DATA_DIR'],"ELG-composite", "stacks", "model", self.stack_file_base[:-6] + ".model.fits")
outFile = join(os.environ['EBOSS_TARGET'],"elg", "tests", "stacks", "model", self.stack_file_base[:-6] + ".model")
if os.path.isfile(outFile):
os.remove(outFile)
thdulist.writeto(outFile)
|
The results of the German American Business Outlook 2019 are now available.
Wunderbar Together is a year-long festival celebrating the transatlantic partnership between the United States and Germany through dialogue, experience, and exchange.
As part of a strong network including 2,500 member firms and 7 offices, we are your capable and experienced partner in the United States with locations in New York, Philadelphia, and San Francisco.
Our expert team of consultants and attorneys will help your business to enter the U.S. market, as well as provide diverse support in transactions between Germany and the U.S.
The German American Chamber of Commerce, Inc. in New York has promoted commerce between Germany and the U.S. for 70 years. During this time we have established ourselves as a modern service provider and a reliable partner in German-American business inquiries. |
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Anders Nylund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function
from argparse import ArgumentParser
from protocol import Proxy
PORT = 8483 # Controller port
PASSWORD = '0123456789'
def getfunc(args, proxy):
l = proxy.get(args.path)
print('\n'.join(l))
def setfunc(args, proxy):
l = proxy.set(args.path, args.value)
print('\n'.join(l))
def rawfunc(args, proxy):
response = proxy.make_request(int(args.function), args.payload)
if args.verbose:
print('response from:', proxy.addr)
print('IP:', proxy.ip, 'Serial', proxy.serial)
print('received: ' + (response.framedata[1:-1]).decode('ascii'))
print(' status: %d'%response.status)
print(' function: %d'%response.function)
print(' payload:\n ' + '\n '.join(response.payload.split(';')))
else:
print('\n'.join(response.payload.split(';')))
if __name__ == '__main__':
argparser = ArgumentParser()
argparser.add_argument('-v', '--verbose', action='store_true')
argparser.add_argument('-a', '--address', default=None, help='controller address, autodiscovered if omitted')
argparser.add_argument('-p', '--password', default=PASSWORD)
argparser.add_argument('-s', '--serial', default='000000')
subparsers = argparser.add_subparsers(help='sub-command help')
# create the parser for the "raw" command
parser_b = subparsers.add_parser('raw', help='')
parser_b.add_argument('function', help='')
parser_b.add_argument('payload', help='')
parser_b.set_defaults(func=rawfunc)
# create the parser for the "set" command
parser_b = subparsers.add_parser('set', help='write item value')
parser_b.add_argument('path', nargs='?', default = '*', help='path to write')
parser_b.add_argument('value', nargs='?', help='value to write')
parser_b.set_defaults(func=setfunc)
# create the parser for the "get" command
parser_c = subparsers.add_parser('get', help='get all items')
parser_c.add_argument('path', nargs='?', default = '*', help='partial of full path to item')
parser_c.set_defaults(func=getfunc)
args = argparser.parse_args()
if args.address is None:
with Proxy.discover(args.password, PORT, args.serial) as proxy:
args.func(args, proxy)
else:
with Proxy(args.password, PORT, args.address, args.serial) as proxy:
args.func(args, proxy)
|
Tuesday, the Federal Government disclosed plans to increase the price of petrol next month. This came after the Acting Executive Secretary of the Petroleum Products Pricing Regulatory Agency, PPPRA, Mrs. Sotonye Iyoyo, asked the Federal Government to automate the operations of the PPPRA to improve transparency and form a pricing framework to encourage local refining and discourage importation of petroleum products in the long term.
Senator Mao Ohuabunwa has asked President Muhammadu Buhari to apologize to Nigerians for his inability to handle the country’s economy and for creating hardship for them with the fuel crisis. Ohuabunwa said: “It is unfortunate that Nigerians would have to go through this hardship in the midst of plenty; we are acclaimed the fifth largest oil producing nation. I think it is important that the government in power apologizes to Nigerians because the level of hardship is getting too much.” He continued: “It is very clear that this scarcity has gone off hand, we cannot manage it, we have been on it for over two months now. The Minister of State has said it would abate in two weeks, but the Minister of Petroleum who is President Buhari has not said anything. We should look for a lasting solution so that we do not relapse into this again”.
In Abuja, Minister of State for Petroleum Resources, Dr. Ibe Kachikwu, stated the fuel scarcity would end in Lagos and Abuja tomorrow, while it would end in other states by the end of the week. He also stated that the Federal Government was considering the privatization of the petroleum industry, so that the operators would source and run the industry, while the government would only be involved in price regulation. He explained that with regulating the petrol prices, the country had been able to save a lot of money within the first three months of the year, adding that the savings would be used to fund the gap recorded in pricing in April.
He said: “The reality is that in the first three months of the price modulation, our recovery basically enabled us to save quite a lot of money and that is going to fund the gap that we see in April. But for May, obviously, the prices would have to be adjusted to mark the current trends. Our proprietary positions are for two different concerns. As petroleum Minister, my goal is to make sure that the price of crude oil continues to climb, but as NNPC GMD, our goal is to ensure that people buy product at cheaper price. They do not go together somehow. So most times, I try to balance the curve. The whole fuel queues had been a complete nightmare for me. The reality is that I hurt more than every Nigerian who is at the filling stations. I am very emotional about my job and the things I do. We just need to take the right policies. As hard as they are, as difficult as they come, we need to take the right policies to ensure that we do not have a recurrence of this fuel subsidy. It has been with us historically, but I don’t want that to define my legacy in the petroleum sector. Hopefully, by tomorrow, Thursday, the fuel queues in Abuja should be over. Hopefully, the same thing should happen in Lagos and, thereafter, by the weekend, we should see same in Kano, Katsina, Sokoto, Port Harcourt and a few other states,” he said. |
#!/usr/bin/env python
"""Provide a interface for simulating master worker computing on a desktop grid based on traces.
Thanks to Derrick Kondo for the idea.
"""
__author__ = "Eric Heien <pymw@heien.org>"
__date__ = "2 May 2009"
import errno
import heapq
import array
# TODO: have some sort of wraparound for worker availability intervals,
# or cleanly error out for workers that are no longer available
class SimWorker:
def __init__(self, worker_name, worker_speed, worker_avail_lens, worker_avail_fracs):
self._name = worker_name
self._speed = worker_speed
self._avail_lens = array.ArrayType('f')
self._avail_fracs = array.ArrayType('f')
self._avail_lens.fromlist(worker_avail_lens)
self._avail_fracs.fromlist(worker_avail_fracs)
self._avail_ind = 0
self._cur_time = 0
self._sub_avail_time = 0
self._task_wall_times = []
self._task_cpu_times = []
# TODO: handle going out of bounds on avail array
# Simulates the worker performing cpu_secs
# Returns the actual wall time to complete this
def run_cpu(self, cpu_secs):
self._task_cpu_times.append(cpu_secs)
wall_exec_time = 0
while cpu_secs > 0:
# Calculate the speed of this worker during the interval
worker_int_speed = self._avail_fracs[self._avail_ind] * self._speed
# Determine the remaining length of this interval
int_remaining_secs = self._avail_lens[self._avail_ind] - self._sub_avail_time
# Determine the available CPU seconds in this interval
int_cpu_secs = int_remaining_secs * worker_int_speed
# If we won't finish the task in this interval
if int_cpu_secs < cpu_secs:
# Move to the next interval
wall_exec_time += int_remaining_secs
self._avail_ind += 1
self._sub_avail_time = 0
cpu_secs -= int_cpu_secs
else:
# Move to the middle of this interval
executed_secs = cpu_secs/worker_int_speed
wall_exec_time += executed_secs
self._sub_avail_time += executed_secs
cpu_secs = 0
self._cur_time += wall_exec_time
self._task_wall_times.append(wall_exec_time)
# Advances the wall time of this worker by wall_secs
# If the worker is not available at the new time,
# advances the wall time further until the worker is available
def advance_wall_time(self, wall_secs):
rem_secs = wall_secs
# Advance the availablity interval pointer until we've passed wall_secs
while rem_secs > 0:
int_remaining_secs = self._avail_lens[self._avail_ind] - self._sub_avail_time
if int_remaining_secs < rem_secs:
rem_secs -= int_remaining_secs
self._sub_avail_time = 0
self._avail_ind += 1
else:
self._sub_avail_time += rem_secs
rem_secs = 0
# Advance until we're in an available state
additional_secs = 0
while self._avail_fracs[self._avail_ind] == 0:
additional_secs += self._avail_lens[self._avail_ind] - self._sub_avail_time
self._avail_ind += 1
self._sub_avail_time = 0
# Advance the current simulation time
self._cur_time += wall_secs + additional_secs
# Test if this worker is available at sim_time
def past_sim_time(self, sim_time):
if sim_time >= self._cur_time: return True
else: return False
def __str__(self):
return self._name
def __repr__(self):
return self._name
def __cmp__(self, other):
return self._cur_time - other._cur_time
class GridSimulatorInterface:
def __init__(self, trace_files=[]):
self._cur_sim_time = 0
self._num_executed_tasks = 0
self._worker_list = []
self._waiting_list = []
def add_worker(self, worker):
# Advance the new worker to its first available time
worker.advance_wall_time(0)
# If the new worker isn't available at the start, put it on the waiting list
if not worker.past_sim_time(0):
self._worker_list.append(worker)
else:
heapq.heappush(self._waiting_list, worker)
def generate_workers(self, num_workers, speed_func, avail_func):
for wnum in range(num_workers):
new_worker_speed = speed_func(wnum)
new_worker_avail_lens, new_worker_avail_fracs = avail_func(wnum)
new_worker = SimWorker("W"+str(wnum), new_worker_speed, new_worker_avail_lens, new_worker_avail_fracs)
self.add_worker(new_worker)
def read_workers_from_fta_tab_files(self, event_trace_file, num_workers=None):
if event_trace_file:
worker_dict = {}
event_trace_file.readline() # skip the header line
for line in event_trace_file:
split_line = line.split()
node_id, start_time, stop_time = split_line[2], float(split_line[6]), float(split_line[7])
if node_id not in worker_dict:
if num_workers and len(worker_dict) >= num_workers: break
else: worker_dict[node_id] = []
worker_dict[node_id].append([start_time, stop_time])
for worker_id in worker_dict:
avail_lens = []
avail_fracs = []
worker_times = worker_dict[worker_id]
last_interval_end = 0
for int_time in worker_times:
interval_length = int_time[0] - start_time
start_time = int_time[1]
#print((worker_id, worker_times))
# If none of the workers matched the available tasks and there are still workers in the wait queue,
# advance simulation time and tell PyMW to try again
def try_avail_check_again(self):
if len(self._waiting_list) == 0:
return False
self._cur_sim_time = self._waiting_list[0]._cur_time
return True
def get_available_workers(self):
# Pop workers off the sorted waiting list until cur_sim_time
while len(self._waiting_list) > 0 and self._waiting_list[0].past_sim_time(self._cur_sim_time):
self._worker_list.append(heapq.heappop(self._waiting_list))
return self._worker_list
def reserve_worker(self, worker):
self._worker_list.remove(worker)
def worker_finished(self, worker):
heapq.heappush(self._waiting_list, worker)
def execute_task(self, task, worker):
if not worker:
raise Exception("Cannot use NULL worker")
# Get the CPU seconds for the specified task and worker
task_exec_time = task._raw_exec(worker)
# Run the worker for task_exec_time CPU seconds
worker.run_cpu(task_exec_time)
self._num_executed_tasks += 1
task.task_finished(None) # notify the task
# Compute statistics (mean, median, stddev) on values in the array
def compute_stats(self, times):
times.sort()
total_time = 0
for x in times: total_time += x
mean_time = total_time / len(times)
median_time = times[len(times)/2]
stddev_time = 0
for time_n in times:
stddev_time += pow(mean_time - time_n, 2)
stddev_time = pow(stddev_time/len(times), 0.5)
return total_time, mean_time, median_time, stddev_time
def get_status(self):
wall_times = []
cpu_times = []
for worker in self._worker_list:
wall_times.extend(worker._task_wall_times)
cpu_times.extend(worker._task_cpu_times)
for worker in self._waiting_list:
wall_times.extend(worker._task_wall_times)
cpu_times.extend(worker._task_cpu_times)
if len(wall_times) > 0:
total_wall_time, mean_wall_time, median_wall_time, stddev_wall_time = self.compute_stats(wall_times)
total_cpu_time, mean_cpu_time, median_cpu_time, stddev_cpu_time = self.compute_stats(cpu_times)
else:
total_wall_time = mean_wall_time = median_wall_time = stddev_wall_time = 0
total_cpu_time = mean_cpu_time = median_cpu_time = stddev_cpu_time = 0
worker_sim_times = [worker._cur_time for worker in self._worker_list]
worker_sim_times.append(0)
cur_sim_time = max(worker_sim_times)
num_workers = len(self._worker_list) + len(self._waiting_list)
return {"num_total_workers" : num_workers, "num_executed_tasks" : self._num_executed_tasks,
"cur_sim_time": cur_sim_time,
"total_wall_time": total_wall_time, "mean_wall_time": mean_wall_time,
"median_wall_time": median_wall_time, "stddev_wall_time": stddev_wall_time,
"total_cpu_time": total_cpu_time, "mean_cpu_time": mean_cpu_time,
"median_cpu_time": median_cpu_time, "stddev_cpu_time": stddev_cpu_time,
}
def pymw_master_read(self, loc):
return None, None, None
def pymw_master_write(self, output, loc):
return None
def pymw_worker_read(loc):
return None
def pymw_worker_write(output, loc):
return None
def pymw_worker_func(func_name_to_call):
return None
|
As some of you may know from our preview postings and rave review, Marshall has gotten into the personal audio game and they came wearing their big boy pants. They have been well known for their amps and while they haven't been around for fifty years without knowing their stuff. Their little headphones blew my mind and are in my list of top three headphones for their look, build, and audio quality.
The Hanwell is now upon us. The Hanwell Anniversary Edition is a tribute to Marshall's 50 year legacy and is built to carry the vintage look while throwing the front row concert feel in your face. This thing looks amazing and after reviewing the technical specs, it can quickly become the life of the party. If you want the Hanwell, it is going to set you back 800 dollars, and you will have to act fast as they are a limited production of only 10,000 units. |
# -*- encoding: utf-8 -*-
'''
Given a binary tree, imagine yourself standing on the right side of it, return the values of the nodes you can see ordered from top to bottom.
For example:
Given the following binary tree,
1 <---
/ \
2 3 <---
\ \
5 4 <---
You should return [1, 3, 4].
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def rightSideView(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if root == None:
return []
res = []
q = [root]
while len(q) != 0:
res.append(q[len(q) - 1].val)
temp = []
for item in q:
if item.left != None:
temp.append(item.left)
if item.right != None:
temp.append(item.right)
q = temp
return res
|
Find Rental Assistance in Pipestone, Minnesota. Welcome to our Pipestone, Minnesota local listing page. We have listed out the public housing authorities below that provide US Government HUD Housing Choice Vouchers (formerly Section 8). Click on the listings below to find out more detailed information. |
# -*- coding: utf8 -*-
#!/usr/bin/python
#
#****************************************************************************
#* *
#* base classes for generating part models in STEP AP214 *
#* *
#* This is part of FreeCAD & cadquery tools *
#* to export generated models in STEP & VRML format. *
#* Copyright (c) 2017 *
#* Terje Io https://github.com/terjeio *
#* Maurice https://launchpad.net/~easyw *
#* *
#* All trademarks within this guide belong to their legitimate owners. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., *
#* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA *
#* *
#****************************************************************************
# 2017-11-25
#
# parts of this code is based on work by other contributors
#
from collections import namedtuple
### use enums (Phyton 3+)
class CaseType:
r"""A class for holding constants for part types
.. note:: will be changed to enum when Python version allows it
"""
THT = 'THT'
r"""THT - trough hole part
"""
SMD = 'SMD'
r"""SMD - surface mounted part
"""
class PinStyle:
r"""A class for holding constants for pin styles
.. note:: will be changed to enum when Python version allows it
"""
STRAIGHT = 'Straight'
ANGLED = 'Angled'
###
#
# The following classes must be subclassed
#
class PartParametersBase:
"""
.. document private functions
.. automethod:: _make_params
"""
Model = namedtuple("Model", [
'variant', # generic model name
'params', # parameters
'model' # model creator class
])
""" Internally used for passing information from the base parameters to the class instance used for creating models
.. py:attribute:: variant
The generic name from the list of parameters
.. py:attribute:: params
The final parameters passed to the class instance
.. py:attribute:: model
The class instance itself
"""
Params = namedtuple("Params", [
'num_pins',
'pin_pitch',
'pin_style',
'type'
])
""" Basic parameters for parts, if further parameters are required this should be subclassed/overriden
.. note:: The existing parameters should be kept with the same name when overriden as the framework requires them
.. py:attribute:: num_pins
Number of pins, for parts with this is usually set to None for
.. py:attribute:: pin_pitch
The final parameters passed to the class instance
.. py:attribute:: pin_style
The class instance itself
.. py:attribute:: type
The class instance itself
"""
def __init__(self):
self.base_params = {}
def _make_params(self, pin_pitch, num_pin_rows, pin_style, type):
r"""add a list of new points
"""
return self.Params(
num_pins = None, # to be added programmatically
pin_pitch = pin_pitch, # pin pitch
pin_style = pin_style, # pin style: 'Straight' or 'Angled'
type = type # part type: 'THT' or 'SMD'
)
def getAllModels(self, model_classes):
r"""Generate model parameters for all series and variants
Loops through all base parameters and model classes instantiating the classes and checks whether a variant should be made.
If a variant is to be made a namedtuple is made with the index from a call to the model instance makeModelName method
and the base parameters are copied to this. When copying the base parameters others may be added such as number of pins (num_pins).
.. note:: Typically this method is overriden in order to add calculated parameters like number of pins.
The model specific parameters are contained in the model class itself.
:param model_classes:
list of part creator classes inherited from :class:`cq_base_model.PartBase`
:type model_classes: ``list of classes``
:rtype: ```tuple````
"""
models = {}
# instantiate generator classes in order to make a dictionary of all model names
for i in range(0, len(model_classes)):
for variant in self.base_params.keys():
params = self.base_params[variant]
model = model_classes[i](params)
if model.make_me:
models[model.makeModelName(variant)] = self.Model(variant, params, model_classes[i])
return models
def getSampleModels(self, model_classes):
r"""Generate model parameters for all series and variants
Loops through all base parameters and model classes instantiating the classes and checks whether a variant should be made.
If a variant is to be made a namedtuple is made with the index from a call to the model instance makeModelName method
and the base parameters are copied to this. When copying the base parameters others may be added such as number of pins (num_pins).
.. note:: Typically this method is overriden in order to add calculated parameters like number of pins.
The model specific parameters are contained in the model class itself.
:param model_classes:
list of part creator classes inherited from :class:`cq_base_model.PartBase`
:type model_classes: ``list of classes``
:rtype: ```tuple````
"""
models = {}
# instantiate generator classes in order to make a dictionary of all default variants
for i in range(0, len(model_classes)):
variant = model_classes[i].default_model
params = self.base_params[variant]
model = model_classes[i](params)
if model.make_me:
models[model.makeModelName(variant)] = self. Model(variant, params, model_classes[i])
return models
def getModel(self, model_class, variant):
r"""Generate model parameters for all series and variants
Gets the parameters for a single variant.
If a variant is to be made a namedtuple is made with the index from a call to the model instance makeModelName method
and the base parameters are copied to this. When copying the base parameters others may be added such as number of pins (num_pins).
.. note:: Typically this method is overriden in order to add calculated parameters like number of pins.
The model specific parameters are contained in the model class itself.
:param model_classe:
part creator class inherited from :class:`cq_base_model.PartBase`
:type model_classes: ``list of classes``
:rtype: ```tuple````
"""
model = self.base_params.has_key(variant)
# instantiate generator class in order to make a dictionary entry for a single variant
if model:
params = self.base_params[variant]
model = model_class(params)
if not model.make_me:
model = False
return model
### EOF ###
|
Three Restaurants. Three $250 gift certificates.
One easy switch to paperless billing.
Enroll in paperless billing and enter to win a delicious dining tour of your area, sponsored by NW Natural.
Some of the best restaurants in the Pacific Northwest use efficient natural gas ranges and cooktops to craft their creations. That’s why we selected restaurants around the region to promote our environmentally-friendly paperless billing.
Enroll in paperless billing below.
Enroll between March 1– April 30, 2019.
Start receiving your monthly gas bill online, plus helpful payment reminders.
You’ll be automatically entered to win three $250 gift certificates to different restaurants, so you can experience the best that natural gas can cook up. And if you’re already enrolled in paperless billing, you’re automatically entered, too! We’ll email the winner in mid-May.
View rules and requirements to see select restaurants throughout Oregon and Southwest Washington. |
# Represents the object file recognized by the Luz architecture.
# An object file is relocatable. It is created by the assembler,
# and later combined with other object files by the linker into
# an executable.
#
# Luz micro-controller assembler
# Eli Bendersky (C) 2008-2010
#
class ObjectFile(object):
""" Use one of the factory methods to create ObjectFile
instances: from_assembler, from_file
The name of the object can be accessed via the .name
attribute.
"""
def __init__(self):
self.seg_data = {}
self.export_table = []
self.import_table = []
self.reloc_table = []
self.name = None
@classmethod
def from_assembler( cls,
seg_data,
export_table,
import_table,
reloc_table):
""" Create a new ObjectFile from assembler-generated data
structures.
"""
obj = cls()
assert isinstance(seg_data, dict)
for table in (export_table, import_table, reloc_table):
assert isinstance(table, list)
obj.seg_data = seg_data
obj.export_table = export_table
obj.import_table = import_table
obj.reloc_table = reloc_table
return obj
@classmethod
def from_file(cls, file):
""" 'file' is either a filename (a String), or a readable
IO object.
"""
pass
|
As part of our run of posts focusing on the important role social media plays within inbound marketing, we dive in and discuss how important social media content is in relation to your wider inbound campaign. This is why it's one of the key inbound marketing resources.
We have been using our own inbound marketing experience (we are a Platinum HubSpot agency, host the Manchester HUG, spoke at the national HUG in Oxford and have helped many clients) as well as findings from the latest State Of Social Media report released by Buffer.
We will look at the role that social media plays in an inbound marketing campaign as well as providing some real life examples to help put the theory into context.
Helping visitors at this early stage of the Buyer's Journey is important because they are fresh to the problem and they can be nurtured into becoming a customer/conversion.
Using social media is a cost effective means of potentially reaching billions of potential customers. It's how you share your content and offers and put it in front of users in a non-intrusive way.
Not that paid posts should be ignored.
You can also use promoted posts on social media to quickly extend the reach of your content. This is great for getting your brand in front of highly targeted and segmented contacts. You can pay to appear in front of specific demographics and behaviours which are in line with your Target Persona.
But the content you pay to promote will still follow the inbound methodology and help them with their Pain Points. Learn more about crafting an inbound campaign from our own director.
As you help users and visitors along their Journey, you are building a relationship with them which is established on trust and helpfulness. And this is a recipe for turning them into a Promoter once they have paid for your services.
Which is another reason that social media is of massive importance when it comes to inbound marketing.
At the time of writing, a big proponent of turning customers into Promoters on social media is the supermarket chain Lidl, with their #LidlSurprises campaign.
Building on their tactic of using screenshots of customers Tweets on in-store signage and on the side of delivery vans, their Christmas campaign involves lowering the price of items the more they get discussed on social media.
And the thread from this tweet, as well as the general conversation on the hashtag "#LidlSurprises", about buying Christmas treats from Lidl is fuller than the inn at Bethlehem all those years ago.
Transport this principle into your own setting and it is easy to see the massive potential for your social media accounts appearing to new users.
As more and more users find their content on social media - the management of your accounts needs to be spot on. It needs to be shareable (so users can pass it on with a share), accessible and relevant.
So How Important Is Social Media Content?
There is another enormous reason that your social media content matters to your inbound marketing efforts.
The majority of your visitors, over time, will be finding your informative and helpful content via search engines.
When users are in the Awareness stage and looking for information around their Pain Point, they will likely head to search engines, along with social media.
And the most used search engines are pulling social media content into the SERP so that users can get a feel for that particular result's style.
This means that you need to be always aware of what image your social media is showing.
For example, Bing has started rolling out a feature that includes a company's latest two Facebook posts in their answer card. At the very least, these posts need to be recently updated and engaging.
Bing is pulling company Facebook posts onto the SERP, as noted by The SEM Post.
Google has been showing more and more Twitter carousels on their results page.
As a side note, this particular example (below) is interesting in terms of using the SERP to build your authority and help your inbound efforts.
A search for a company name which two businesses share highlights the authority issue. The top organic result and Twitter carousel is dominated by a clothing brand (on the left), but the larger SERP real estate space and contact information is owned by an insurance firm.
Both would boost their authority by owning the Twitter carousel as well as the company information box.
But purely in terms of inbound marketing, the ability to have your company Twitter feed showcased in a prime SERP position is crucial for driving home your authority to users in the later Consideration and Decision stages.
If they are heading to Google to give yours and a competitor's site one last look over before picking up the phone or placing an order, this is the type of thing which could sway the decision in your favour.
Providing the Tweets which are showcased are authoritative, relevant and impressive, it's yet another opportunity to nurture a user along their Journey.
Look at how this nationwide car dealership keeps their feed fresh and on-topic, and it works towards their goal - attracting more visitors.
From left to right, Message 1 and Message 2 are aimed at the latter two stages of the Buyer's Journey, whereas Message 3 (4 hours ago), builds brand awareness and spreads a positive message without pushing any products directly.
Message 1 would appeal to somebody deciding between electric vehicles (Decision Stage), Message 2 would resonate with a searcher who is in Consideration of the options open to them, whilst Message 3 might spark the interest of someone in the very early stages of their research process. It makes the sales team at "#Ford #Preston" seem approachable and human.
This makes for effective management of the dealership's social media content, as the three tweets cover all stages of the Buyer's Journey, include relevant copy ("#Preston") to attract users, and are authoritative enough of an account to be pulled through into the SERP.
To make your inbound marketing as great as it possibly can be, social media is just one aspect. This post has touched upon PPC and SEO, but not even addressed Content, Strategy and everything else you need to improve.
Luckily, we've put together over 30 great tips and techniques into one reference tool packed with inbound marketing resources. Check it out by pressing the button below. |
## \file
## \ingroup tutorial_roofit
## \notebook
## Addition and convolution: options for plotting components of composite p.d.f.s.
##
## \macro_code
##
## \date February 2018
## \author Clemens Lange, Wouter Verkerke (C++ version)
import ROOT
# Set up composite pdf
# --------------------------------------
# Declare observable x
x = ROOT.RooRealVar("x", "x", 0, 10)
# Create two Gaussian PDFs g1(x,mean1,sigma) anf g2(x,mean2,sigma) and
# their parameters
mean = ROOT.RooRealVar("mean", "mean of gaussians", 5)
sigma1 = ROOT.RooRealVar("sigma1", "width of gaussians", 0.5)
sigma2 = ROOT.RooRealVar("sigma2", "width of gaussians", 1)
sig1 = ROOT.RooGaussian("sig1", "Signal component 1", x, mean, sigma1)
sig2 = ROOT.RooGaussian("sig2", "Signal component 2", x, mean, sigma2)
# Sum the signal components into a composite signal p.d.f.
sig1frac = ROOT.RooRealVar(
"sig1frac", "fraction of component 1 in signal", 0.8, 0., 1.)
sig = ROOT.RooAddPdf(
"sig", "Signal", ROOT.RooArgList(sig1, sig2), ROOT.RooArgList(sig1frac))
# Build Chebychev polynomial p.d.f.
a0 = ROOT.RooRealVar("a0", "a0", 0.5, 0., 1.)
a1 = ROOT.RooRealVar("a1", "a1", -0.2, 0., 1.)
bkg1 = ROOT.RooChebychev("bkg1", "Background 1",
x, ROOT.RooArgList(a0, a1))
# Build expontential pdf
alpha = ROOT.RooRealVar("alpha", "alpha", -1)
bkg2 = ROOT.RooExponential("bkg2", "Background 2", x, alpha)
# Sum the background components into a composite background p.d.f.
bkg1frac = ROOT.RooRealVar(
"sig1frac", "fraction of component 1 in background", 0.2, 0., 1.)
bkg = ROOT.RooAddPdf(
"bkg", "Signal", ROOT.RooArgList(bkg1, bkg2), ROOT.RooArgList(sig1frac))
# Sum the composite signal and background
bkgfrac = ROOT.RooRealVar("bkgfrac", "fraction of background", 0.5, 0., 1.)
model = ROOT.RooAddPdf(
"model", "g1+g2+a", ROOT.RooArgList(bkg, sig), ROOT.RooArgList(bkgfrac))
# Set up basic plot with data and full pdf
# ------------------------------------------------------------------------------
# Generate a data sample of 1000 events in x from model
data = model.generate(ROOT.RooArgSet(x), 1000)
# Plot data and complete PDF overlaid
xframe = x.frame(ROOT.RooFit.Title(
"Component plotting of pdf=(sig1+sig2)+(bkg1+bkg2)"))
data.plotOn(xframe)
model.plotOn(xframe)
# Clone xframe for use below
xframe2 = xframe.Clone("xframe2")
# Make component by object reference
# --------------------------------------------------------------------
# Plot single background component specified by object reference
ras_bkg = ROOT.RooArgSet(bkg)
model.plotOn(xframe, ROOT.RooFit.Components(
ras_bkg), ROOT.RooFit.LineColor(ROOT.kRed))
# Plot single background component specified by object reference
ras_bkg2 = ROOT.RooArgSet(bkg2)
model.plotOn(xframe, ROOT.RooFit.Components(ras_bkg2), ROOT.RooFit.LineStyle(
ROOT.kDashed), ROOT.RooFit.LineColor(ROOT.kRed))
# Plot multiple background components specified by object reference
# Note that specified components may occur at any level in object tree
# (e.g bkg is component of 'model' and 'sig2' is component 'sig')
ras_bkg_sig2 = ROOT.RooArgSet(bkg, sig2)
model.plotOn(xframe, ROOT.RooFit.Components(ras_bkg_sig2),
ROOT.RooFit.LineStyle(ROOT.kDotted))
# Make component by name/regexp
# ------------------------------------------------------------
# Plot single background component specified by name
model.plotOn(xframe2, ROOT.RooFit.Components(
"bkg"), ROOT.RooFit.LineColor(ROOT.kCyan))
# Plot multiple background components specified by name
model.plotOn(
xframe2,
ROOT.RooFit.Components("bkg1,sig2"),
ROOT.RooFit.LineStyle(
ROOT.kDotted),
ROOT.RooFit.LineColor(
ROOT.kCyan))
# Plot multiple background components specified by regular expression on
# name
model.plotOn(
xframe2,
ROOT.RooFit.Components("sig*"),
ROOT.RooFit.LineStyle(
ROOT.kDashed),
ROOT.RooFit.LineColor(
ROOT.kCyan))
# Plot multiple background components specified by multiple regular
# expressions on name
model.plotOn(
xframe2,
ROOT.RooFit.Components("bkg1,sig*"),
ROOT.RooFit.LineStyle(
ROOT.kDashed),
ROOT.RooFit.LineColor(
ROOT.kYellow),
ROOT.RooFit.Invisible())
# Draw the frame on the canvas
c = ROOT.TCanvas("rf205_compplot", "rf205_compplot", 800, 400)
c.Divide(2)
c.cd(1)
ROOT.gPad.SetLeftMargin(0.15)
xframe.GetYaxis().SetTitleOffset(1.4)
xframe.Draw()
c.cd(2)
ROOT.gPad.SetLeftMargin(0.15)
xframe2.GetYaxis().SetTitleOffset(1.4)
xframe2.Draw()
c.SaveAs("rf205_compplot.png")
|
To watch the film click the play button above.
Looking for something to help you get up and go? Welcome to our first ever Brake Magazine Original Film, Our Garden. As a publication, we’re really blessed. We live and work in one of the coolest spots for bike testing in the UK. Sure it means we have to drive further to get bikes but when we fancy a ride we’ve got epic trails and even better black top.
Our Garden is a film designed to inspire you to get up and go riding. It’s about breaking out of that work cycle and doing something epic because you love it. Even in the UK, we don’t have to travel far to get out of the city and into some remote, stunning terrain. We spent a full day with an epic slow motion camera, an even more epic camera man called Lee and our Editor Llel ripping a Ducati Multistrada on a perfect, sunny South Wales day.
Spend the next four minutes of your life getting excited and spend your weekend doing you best to get away from the real world. Have a great few days. Throw your epic photos down in the comments, #brakemagazine on all of the social platforms and remember, life’s better when you’re riding. |
Subsets and Splits