code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import logging
"""
Formatter
"""
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d:%H:%M:%S')
"""
Set Flask logger
"""
logger = logging.getLogger('FLASK_LOG')
logger.setLevel(logging.DEBUG)
stream_log = logging.StreamHandler()
stream_log.setFormatter(formatter)
logger.addHandler(stream_log)
# if disabled
# logger.disabled = True
| [
"logging.getLogger",
"logging.Formatter",
"logging.StreamHandler"
] | [((50, 156), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {'datefmt': '"""%Y-%m-%d:%H:%M:%S"""'}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n datefmt='%Y-%m-%d:%H:%M:%S')\n", (67, 156), False, 'import logging\n'), ((193, 223), 'logging.getLogger', 'logging.getLogger', (['"""FLASK_LOG"""'], {}), "('FLASK_LOG')\n", (210, 223), False, 'import logging\n'), ((268, 291), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (289, 291), False, 'import logging\n')] |
#!/usr/bin/python3
import os
import sys
import socket
CURRENT_DIR = os.path.dirname(__file__)
NEWSBLUR_DIR = ''.join([CURRENT_DIR, '/../../'])
sys.path.insert(0, NEWSBLUR_DIR)
os.environ['DJANGO_SETTINGS_MODULE'] = 'newsblur_web.settings'
import threading
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify, assume this is hooked up to a single filename
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\r%s %s / %s (%.2f%%)" % (
self._filename, self._seen_so_far, self._size,
percentage))
sys.stdout.flush()
import time
import boto3
from django.conf import settings
BACKUP_DIR = '/srv/newsblur/backup/'
s3 = boto3.client('s3', aws_access_key_id=settings.S3_ACCESS_KEY, aws_secret_access_key=settings.S3_SECRET)
hostname = socket.gethostname().replace('-','_')
s3_object_name = f'backup_{hostname}/backup_{hostname}_{time.strftime("%Y-%m-%d-%H-%M")}.sql'
path = os.listdir(BACKUP_DIR)[0]
full_path = os.path.join(BACKUP_DIR, path)
print('Uploading %s to %s on S3 bucket %s' % (full_path, s3_object_name, settings.S3_BACKUP_BUCKET))
s3.upload_file(full_path, settings.S3_BACKUP_BUCKET, s3_object_name, Callback=ProgressPercentage(full_path))
os.remove(full_path)
| [
"os.path.getsize",
"sys.path.insert",
"boto3.client",
"os.listdir",
"threading.Lock",
"time.strftime",
"os.path.join",
"sys.stdout.write",
"os.path.dirname",
"sys.stdout.flush",
"socket.gethostname",
"os.remove"
] | [((69, 94), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (84, 94), False, 'import os\n'), ((144, 176), 'sys.path.insert', 'sys.path.insert', (['(0)', 'NEWSBLUR_DIR'], {}), '(0, NEWSBLUR_DIR)\n', (159, 176), False, 'import sys\n'), ((1037, 1143), 'boto3.client', 'boto3.client', (['"""s3"""'], {'aws_access_key_id': 'settings.S3_ACCESS_KEY', 'aws_secret_access_key': 'settings.S3_SECRET'}), "('s3', aws_access_key_id=settings.S3_ACCESS_KEY,\n aws_secret_access_key=settings.S3_SECRET)\n", (1049, 1143), False, 'import boto3\n'), ((1330, 1360), 'os.path.join', 'os.path.join', (['BACKUP_DIR', 'path'], {}), '(BACKUP_DIR, path)\n', (1342, 1360), False, 'import os\n'), ((1571, 1591), 'os.remove', 'os.remove', (['full_path'], {}), '(full_path)\n', (1580, 1591), False, 'import os\n'), ((1292, 1314), 'os.listdir', 'os.listdir', (['BACKUP_DIR'], {}), '(BACKUP_DIR)\n', (1302, 1314), False, 'import os\n'), ((467, 483), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (481, 483), False, 'import threading\n'), ((1153, 1173), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1171, 1173), False, 'import socket\n'), ((1247, 1278), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d-%H-%M"""'], {}), "('%Y-%m-%d-%H-%M')\n", (1260, 1278), False, 'import time\n'), ((389, 414), 'os.path.getsize', 'os.path.getsize', (['filename'], {}), '(filename)\n', (404, 414), False, 'import os\n'), ((739, 849), 'sys.stdout.write', 'sys.stdout.write', (["('\\r%s %s / %s (%.2f%%)' % (self._filename, self._seen_so_far, self._size,\n percentage))"], {}), "('\\r%s %s / %s (%.2f%%)' % (self._filename, self.\n _seen_so_far, self._size, percentage))\n", (755, 849), False, 'import sys\n'), ((915, 933), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (931, 933), False, 'import sys\n')] |
from typing import Tuple
import numpy as np
import rasterio.warp
from opensfm import features
from .orthophoto_manager import OrthoPhotoManager
from .view import View
class OrthoPhotoView(View):
def __init__(
self,
main_ui,
path: str,
init_lat: float,
init_lon: float,
is_geo_reference: bool = False,
):
"""[summary]
Args:
main_ui (GUI.Gui)
path (str): path containing geotiffs
"""
self.image_manager = OrthoPhotoManager(path, 100.0)
self.images_in_list = self.image_manager.image_keys
self.zoom_window_size_px = 500
self.is_geo_reference = is_geo_reference
self.size = 50 # TODO add widget for zoom level
super(OrthoPhotoView, self).__init__(main_ui, False)
self.refocus(init_lat, init_lon)
self.populate_image_list()
if self.images_in_list:
self.bring_new_image(self.images_in_list[0])
self.set_title()
def get_image(self, new_image):
crop, image_window, geot = self.image_manager.read_image_around_latlon(
new_image, self.center_lat, self.center_lon, self.size
)
self.image_window = image_window
self.geot = geot
return crop
def get_candidate_images(self):
return self.image_manager.get_candidate_images(
self.center_lat, self.center_lon, self.size
)
def pixel_to_latlon(self, x: float, y: float):
"""
From pixels (in the viewing window) to latlon
"""
if not self.is_geo_reference:
return None
# Pixel to whatever crs the image is in
# pyre-fixme[16]: `OrthoPhotoView` has no attribute `geot`.
x, y = self.geot.xy(y, x)
# And then to WSG84 (lat/lon)
lons, lats = rasterio.warp.transform(self.geot.crs, "EPSG:4326", [x], [y])
return lats[0], lons[0]
def gcp_to_pixel_coordinates(self, x: float, y: float) -> Tuple[float, float]:
"""
Transforms from normalized coordinates (in the whole geotiff) to
pixels (in the viewing window)
"""
h, w = self.image_manager.get_image_size(self.current_image)
px = features.denormalized_image_coordinates(np.array([[x, y]]), w, h)[0]
# pyre-fixme[16]: `OrthoPhotoView` has no attribute `image_window`.
x = px[0] - self.image_window.col_off
y = px[1] - self.image_window.row_off
# pyre-fixme[7]: Expected `Tuple[float, float]` but got `List[typing.Any]`.
return [x, y]
def pixel_to_gcp_coordinates(self, x: float, y: float) -> Tuple[float, float]:
"""
Transforms from pixels (in the viewing window) to normalized coordinates
(in the whole geotiff)
"""
# pyre-fixme[16]: `OrthoPhotoView` has no attribute `image_window`.
x += self.image_window.col_off
y += self.image_window.row_off
h, w = self.image_manager.get_image_size(self.current_image)
coords = features.normalized_image_coordinates(np.array([[x, y]]), w, h)[0]
return coords.tolist()
def refocus(self, lat, lon):
self.center_lat = lat
self.center_lon = lon
self.populate_image_list()
if self.images_in_list:
if self.current_image not in self.images_in_list:
self.bring_new_image(self.images_in_list[0])
else:
self.bring_new_image(self.current_image)
self.set_title()
def bring_new_image(self, new_image):
super(OrthoPhotoView, self).bring_new_image(new_image, force=True)
xlim = self.ax.get_xlim()
ylim = self.ax.get_ylim()
artists = self.ax.plot(np.mean(xlim), np.mean(ylim), "rx")
self.plt_artists.extend(artists)
self.canvas.draw_idle()
def set_title(self):
lat, lon = self.center_lat, self.center_lon
if self.images_in_list:
t = "Images covering lat:{:.4f}, lon:{:.4f}".format(lat, lon)
shot = self.current_image
seq_ix = self.images_in_list.index(shot)
title = f"{t} [{seq_ix+1}/{len(self.images_in_list)}]: {shot}"
else:
title = f"No orthophotos around {lat}, {lon}"
self.current_image = None
self.ax.clear()
self.ax.axis("off")
self.canvas.draw_idle()
self.window.title(title)
| [
"numpy.mean",
"numpy.array"
] | [((3748, 3761), 'numpy.mean', 'np.mean', (['xlim'], {}), '(xlim)\n', (3755, 3761), True, 'import numpy as np\n'), ((3763, 3776), 'numpy.mean', 'np.mean', (['ylim'], {}), '(ylim)\n', (3770, 3776), True, 'import numpy as np\n'), ((2286, 2304), 'numpy.array', 'np.array', (['[[x, y]]'], {}), '([[x, y]])\n', (2294, 2304), True, 'import numpy as np\n'), ((3087, 3105), 'numpy.array', 'np.array', (['[[x, y]]'], {}), '([[x, y]])\n', (3095, 3105), True, 'import numpy as np\n')] |
"""
The model file for a Memo
"""
import re
import os
import shutil
import json
from datetime import datetime
from flask import current_app
from memos import db
from memos.models.User import User
from memos.models.MemoState import MemoState
from memos.models.MemoFile import MemoFile
from memos.models.MemoSignature import MemoSignature
from memos.models.MemoReference import MemoReference
from memos.models.MemoHistory import MemoHistory
from memos.models.MemoActivity import MemoActivity
from memos.revletter import b10_to_rev, rev_to_b10
class Memo(db.Model):
"""This class is the single interface to a "memo" and all of the "memos"
"""
id = db.Column(db.Integer, primary_key=True)
number = db.Column(db.Integer) # Memo Number
version = db.Column(db.String) # A,B,..Z,AA,AB,...AZ,BA
confidential = db.Column(db.Boolean, default=False) # if true only author, signer, distribution can read
distribution = db.Column(db.String(128), default='') # user names on the distribution
keywords = db.Column(db.String(128), default='') # any keyword
title = db.Column(db.String(128), nullable=False, default='') # The title of the memo
num_files = db.Column(db.Integer, default=0) # The number of files attached to the memo
action_date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow) # The last time anything happened
create_date = db.Column(db.DateTime) # when the memo was created
submit_date = db.Column(db.DateTime) # when the memo was most recently submitted (from created)
active_date = db.Column(db.DateTime) # when the memo was moved to active state (from submitted)
obsolete_date = db.Column(db.DateTime) # when the memo was moved to obsolete state (from active)
user_id = db.Column(db.String(120), db.ForeignKey('user.username'),nullable=False) # The key of the user who owns the memo
_signers = db.Column(db.String(128),default='') # the hidden list of signer usernames
_references = db.Column(db.String(128),default='') # The hidden list of references
memo_state = db.Column(db.Enum(MemoState)) # Draft, Signoff, Active, Obsolete
def __init__(self, **kwargs):
super().__init__(**kwargs)
# do custom initialization here
def __repr__(self):
return f"{self.user.username}-{self.number}{self.version}"
def __str__(self):
return f"{self.user.username}-{self.number}{self.version}"
########################################
# Permission Functions
########################################
@staticmethod
def can_create(owner=None, delegate=None):
"""Will return true if the delegate can create a memo for the owner"""
if owner is None:
return False
if delegate is None:
delegate = owner
return owner.is_delegate(delegate=delegate)
def can_revise(self, delegate=None):
"""Is the delgate allowed to update "this" memo?"""
if delegate is None:
return False
if not self.user.is_delegate(delegate):
return False
if self.memo_state == MemoState.Active or self.memo_state == MemoState.Obsolete:
return True
def can_sign(self, signer=None, delegate=None):
"""Can this memo be signed by delegate for the signers"""
if signer is None or delegate is None:
return False
if self.memo_state != MemoState.Signoff:
return False
if not signer.is_delegate(delegate=delegate):
return False
# The list of signers and if they have signed are kept in the MemoSignature table
status = MemoSignature.is_signer(self.id,signer)
return status['is_signer'] and not status['status']
def can_unsign(self, signer=None, delegate=None):
"""Can this memo be unsigned by delegate for the signer """
if signer is None or delegate is None:
return False
if self.memo_state != MemoState.Signoff:
return False
if not signer.is_delegate(delegate=delegate):
return False
status = MemoSignature.is_signer(self.id,signer)
return status['is_signer'] and status['status']
def can_obsolete(self, delegate=None):
""" Can this memo be obsoleted by the delegate? Only active memos can be obsoleted """
if delegate is None:
return False
if not self.user.is_delegate(delegate):
return False
if self.memo_state == MemoState.Active:
return True
return False
def can_cancel(self, delegate=None):
""" can this memo be cancled by the delegate. Only drafts memos can be canceled"""
if delegate is None:
return False
if self.memo_state != MemoState.Draft:
return False
if not self.user.is_delegate(delegate=delegate):
return False
return True
def can_reject(self, signer=None, delegate=None):
""" can this memo be rejected by the delegate. Only memos in signoff can be rejected"""
if signer is None or delegate is None:
return False
if self.memo_state != MemoState.Signoff:
return False
if not signer.is_delegate(delegate):
return False
status = MemoSignature.is_signer(memo_id=self.id,signer=signer)
# if you are a signer you can reject.. even if you have already signed
return status['is_signer']
def has_access(self, user=None):
"""This function will return True of the "username" has access to self"""
# if it is not confidential than anyone can access
if self.confidential == False:
return True
# at this point we know it is confidential so ... they must provide a username
if user is None:
return False
# you alway have access to your own memo's
if self.user.username == user.username:
return True
if user.admin:
return True
if user.readAll:
return True
# if the username is in the distribution list then provide access TODO: ARH do something better
if user.username in re.split('\s|\,|\t|\;|\:',self.distribution):
return True
return False
########################################
# ??? Functions
########################################
def get_fullpath(self):
""" This function gives the os path to a file """
path = os.path.join(current_app.root_path,"static","memos",f"{self.user_id}",f"{self.number}",f"{self.version}")
return path
def get_relpath(self):
""" Return the relative path of this memo """
path = os.path.join("/static","memos",f"{self.user_id}",f"{self.number}",f"{self.version}")
return path
def get_files(self):
""" Return a list of the files attached to this memo"""
memo_list = MemoFile.query.filter_by(memo_id=self.id).all()
return memo_list
def saveJson(self):
""" Create the JSON file which is a copy of all of the meta data """
js = {}
js['title']=self.title
js['number']=self.number
js['version']=self.version
js['confidential']=self.confidential
js['distribution']=self.distribution
js['keywords']=self.keywords
js['userid']=self.user_id
js['memo_state']=f"{self.memo_state}"
js['keywords']= self.keywords
js['signers']=self.signers['signers']
js['references']= self.references['ref_string']
js['files']=[]
for file in self.get_files():
js['files'].append(file.filename)
path = os.path.join(self.get_fullpath())
#current_app.logger.info(f"Making Directory {path}")
os.makedirs(path,exist_ok=True)
#current_app.logger.info(f"Making Succeeded {path}")
path = os.path.join(path,f"meta-{self.user_id}-{self.number}-{self.version}.json")
f = open(path,"w")
json.dump(js,f)
f.close()
@property
def signers(self):
# get the signers from the signing table and turn it back to a string and a list
siglist = MemoSignature.get_signers(self)
for sig in siglist:
sig.signer = User.find(username=sig.signer_id)
sig.delegate = User.find(username=sig.delegate_id)
return {'signers':self._signers,'siglist':siglist}
@signers.setter
def signers(self,signer_names):
self._signers = signer_names
MemoSignature.delete_signers(self)
users = User.valid_usernames(signer_names)
for signer in users['valid_users']:
MemoSignature.add_signer(memo=self,signer=signer)
######################################################################
# References
######################################################################
@staticmethod
def parse_reference(reference):
parts = re.split(r'-',reference)
if len(parts) == 2:
parts.append(None)
return parts
@staticmethod
def valid_references(references):
current_app.logger.info(f'references ={references}')
valid_memos = []
valid_refs = []
invalid = []
for memo_ref in re.split(r'\s|\,|\t|\;|\:',references):
if memo_ref == '':
continue
parts = Memo.parse_reference(memo_ref)
if len(parts) > 3 or len(parts) < 2:
invalid.append(memo_ref)
current_app.logger.info(f"INVALID length append {memo_ref} valid={valid_memos} invalid {invalid}")
continue
username = parts[0]
memo_number = parts[1]
memo_version = parts[2]
memo = Memo.find(username=username,memo_number=memo_number,memo_version=memo_version)
current_app.logger.info(f"Memo = {memo}")
if memo != None and (memo.memo_state == MemoState.Active or memo.memo_state == MemoState.Obsolete):
valid_memos.append(memo)
valid_refs.append(memo_ref)
else:
invalid.append(memo_ref)
rval = {'valid_refs':valid_refs, 'valid_memos' : valid_memos,'invalid':invalid}
return rval
@property
def references(self):
# this function will return a list of refeference objects + a string of the references
refs = MemoReference.get_refs(self)
rval = []
for ref in refs:
userid=ref[0]
memo = Memo.find(username=userid,memo_number=ref[1],memo_version=ref[2])
if ref[2] == None:
refstring=f"{userid}-{ref[1]}"
else:
refstring=f"{userid}-{ref[1]}-{ref[2]}"
rval.append((refstring,memo))
return {'reflist':rval,'ref_string':self._references}
@references.setter
def references(self,references):
self._references = references
refs = Memo.valid_references(references)
for i in range(len(refs['valid_refs'])):
parsed_ref = Memo.parse_reference(refs['valid_refs'][i])
user = User.find(username=parsed_ref[0])
MemoReference.add_ref(self.id,ref_user_id=user.username,ref_memo_number=parsed_ref[1],ref_memo_version=parsed_ref[2])
@property
def backrefs(self):
return MemoReference.get_back_refs(self)
######################################################################
#
######################################################################
def get_next_version(self):
memo = Memo.query.join(User).filter(Memo.number == self.number)\
.order_by(Memo.version.desc()).first()
current_app.logger.info(f"get_next_version {memo.id} {memo.number} {memo.version}")
if memo:
return b10_to_rev(rev_to_b10(memo.version)+1)
return b10_to_rev(1) # also known as 'A'
def save(self):
db.session.add(self)
db.session.commit()
self.saveJson()
################################################################################
# functions used to process the state
# these function would classiavally be called private
################################################################################
def obsolete_previous(self,acting=None):
prev_list = Memo.query.join(User).filter(Memo.number == self.number,Memo.version != self.version).all()
for memo in prev_list:
if memo.memo_state == MemoState.Active:
memo.memo_state = MemoState.Obsolete
MemoHistory.activity(memo=memo,memo_activity=MemoActivity.Obsolete,user=acting)
memo.save()
# This function is called when:
# 1- a valid draft is created
# 2- a signature happens
# 3- an unsign happens
def process_state(self,acting=None):
if self.memo_state == MemoState.Draft:
if MemoSignature.status(self.id) == False:
self.memo_state = MemoState.Signoff
self.submit_date = datetime.utcnow()
MemoHistory.activity(memo=self,memo_activity=MemoActivity.Signoff,user=acting)
self.notify_signers(f"memo {self.user.username}-{self.number}-{self.version} has gone into signoff")
else:
self.memo_state = MemoState.Active
self.active_date = datetime.utcnow()
MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting)
self.obsolete_previous(acting=acting)
self.notify_distribution(f"memo {self.user.username}-{self.number}-{self.version} has been published")
if self.memo_state == MemoState.Signoff:
if MemoSignature.status(self.id):
self.memo_state = MemoState.Active
self.active_date = datetime.utcnow()
self.notify_distribution(f"memo {self.user.username}-{self.number}-{self.version} has been published")
MemoHistory.activity(memo=self,memo_activity=MemoActivity.Activate,user=acting)
self.obsolete_previous(acting=acting)
else:
current_app.logger.info(f"Signatures Still Required")
self.action_date = datetime.utcnow()
self.save()
# TODO: ARH
def notify_distribution(self,message):
current_app.logger.info(F"Notify Distribution {self.distribution} {message}")
# TODO: ARH
def notify_signers(self,message):
current_app.logger.info(F"Notify signers {message}")
################################################################################
# State machine functions called by the viewcontroller
################################################################################
# Owner Function
@staticmethod
def create_revise(owner=None,delegate=None,memo_number=None):
""" This function will return None or a new Memo if the owner/delgate and revise this memo """
assert owner != None and delegate != None
if owner == None or delegate == None:
return None
if owner.is_delegate(delegate) != True:
return None
memo = Memo.query.join(User).filter(User.username==owner.username,Memo.number==memo_number).order_by(Memo.version.desc()).first()
# create a new memo (i.e. not a new version of an existing memo)
if memo_number == None or memo==None:
memo_number = Memo.get_next_number(owner)
new_memo = Memo(number = memo_number,\
version = 'A',\
confidential = False,\
distribution = '',\
keywords = '',\
title = '',\
num_files = 0,\
user_id = owner.username,\
memo_state = MemoState.Draft,\
action_date = datetime.utcnow(),\
create_date = datetime.utcnow(),\
signers = '' )
new_memo.save()
MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate)
current_app.logger.info(f"Creating new memo {new_memo}")
return new_memo
if memo.memo_state == MemoState.Draft:
current_app.logger.info(f"Found a draft memo {memo}")
return memo
# revise an existing memo
new_memo = Memo(number = memo_number,\
version = memo.get_next_version(),\
confidential = memo.confidential,\
distribution = memo.distribution,\
keywords = memo.keywords,\
title = memo.title,\
num_files = 0,\
user_id = memo.user_id,\
memo_state = MemoState.Draft,\
action_date = datetime.utcnow(),\
create_date = datetime.utcnow(),\
)
new_memo.save()
new_memo.references = memo.references['ref_string'] # cannot be done until there is an id assigned by the save
new_memo.signers = memo._signers # cannot be done until there is an id assigned by the save
new_memo.save()
MemoHistory.activity(memo=new_memo,memo_activity=MemoActivity.Create,user=delegate)
return new_memo
# signer function
def sign(self,signer=None,delegate=None):
current_app.logger.info(f"signer = {signer} delegate={delegate}")
if not self.can_sign(signer,delegate):
current_app.logger.info("NOT!!@ allowed to sign")
return False
current_app.logger.info("allowed to sign")
MemoSignature.sign(self.id,signer,delegate)
MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Sign)
self.process_state(acting=delegate)
return True
# signer function
def unsign(self,signer=None,delegate=None):
if not self.can_unsign(signer,delegate):
return False
MemoSignature.unsign(self.id,signer,delegate)
MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Unsign)
self.process_state(acting=delegate)
return True
# Owner Function
def obsolete(self,delegate=None):
current_app.logger.info(f"Obsolete: {self} Delegate={delegate}")
if not self.can_obsolete(delegate=delegate):
return False
self.memo_state = MemoState.Obsolete
self.action_date = datetime.utcnow()
self.obsolete_date = datetime.utcnow()
MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Obsolete)
self.save()
return True
# Owner Function
def cancel(self,delegate=None):
current_app.logger.info(f"Cancel: {self} Delegate={delegate}")
memostring = f"{self}"
if not self.can_cancel(delegate=delegate):
return False
MemoFile.delete(self)
# delete all of the files in that directory & the directory
shutil.rmtree(self.get_fullpath())
MemoReference.delete(self)
MemoSignature.delete_signers(self)
MemoHistory.activity(memo=self,user=delegate,memo_activity=MemoActivity.Cancel)
db.session.delete(self)
db.session.commit()
current_app.logger.info(f"Canceling")
return True
# signer function
def reject(self,signer=None,delegate=None):
current_app.logger.info(f"signer = {signer} delegate={delegate}")
if not self.can_reject(signer,delegate):
return False
self.memo_state = MemoState.Draft
self.action_date = datetime.utcnow()
self.submit_date = None
self.active_date = None
self.obsolete_date = None
MemoHistory.activity(memo=self,memo_activity=MemoActivity.Reject,user=delegate)
MemoSignature.unsign_all(self)
self.save()
self.notify_signers(f"Memo {self.user.username}-{self.number}-{self.version} has been rejected for {signer.username} by {delegate.username}")
return True
################################################################################
# End of State machine functions
################################################################################
@staticmethod
def find(memo_id=None,username=None,memo_number=None,memo_version=None):
if memo_id != None:
return Memo.query.filter_by(id=memo_id).first()
current_app.logger.debug(f"FIND: Looking for {username}/{memo_number}/{memo_version}")
memoQry = Memo.query.filter_by(user_id=username,number=memo_number)
if memo_version != None:
memoQry.filter_by(version=memo_version)
memo = memoQry.first()
current_app.logger.debug(f"Found Memo id={memo}")
return memo
@staticmethod
def get_memo_list(username=None,memo_number=None,memo_version=None,page=1,pagesize=None):
if memo_version:
memo_list = Memo.query.join(User).filter(User.username==username,\
Memo.number==memo_number,\
Memo.version==memo_version)\
.paginate(page = page,per_page=pagesize)
elif memo_number:
memo_list = Memo.query.join(User).filter(User.username==username,Memo.number==memo_number)\
.order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
elif username:
memo_list = Memo.query.join(User).filter(User.username==username,Memo.memo_state == MemoState.Active)\
.order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
else:
memo_list = Memo.query.join(User).filter(Memo.memo_state == MemoState.Active)\
.order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
return memo_list
@staticmethod
def search(title=None,keywords=None,page=1,pagesize=None):
current_app.logger.info(f"Search title={title}")
if title != None:
memo_list = Memo.query.filter(Memo.title.like(f"%{title}%")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
if keywords != None:
memo_list = Memo.query.filter(Memo.keywords.like(f"%{keywords}%")).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
return memo_list
@staticmethod
def get_next_number(user=None):
assert user!=None
memo_list = Memo.query.join(User).filter(User.username==user.username)\
.order_by(Memo.number.desc()).first()
if memo_list == None:
return 1
return memo_list.number+1
@staticmethod
def get_inbox(user=None,page=1,pagesize=None):
assert user!=None,"User must not be none"
if user == None:
return None
msigs = MemoSignature.get_signatures(user,signed=False)
memolist = Memo.query.join(User).filter(Memo.memo_state==MemoState.Signoff,Memo.id.in_(msigs)).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
current_app.logger.info(f"Inbox for {user.username} = Items={len(memolist.items)} {memolist}")
return memolist
@staticmethod
def get_drafts(user=None,page=1,pagesize=None):
assert user!=None,"User must not be none"
if user == None:
return None
memolist = Memo.query.join(User).filter(Memo.memo_state==MemoState.Draft,User.username==user.username).order_by(Memo.action_date.desc()).paginate(page = page,per_page=pagesize)
return memolist | [
"flask.current_app.logger.debug",
"memos.models.MemoSignature.MemoSignature.unsign_all",
"memos.models.MemoHistory.MemoHistory.activity",
"memos.db.String",
"memos.models.MemoSignature.MemoSignature.sign",
"memos.models.MemoSignature.MemoSignature.is_signer",
"re.split",
"memos.models.MemoReference.MemoReference.get_back_refs",
"memos.models.MemoSignature.MemoSignature.get_signatures",
"flask.current_app.logger.info",
"memos.models.MemoFile.MemoFile.delete",
"memos.models.MemoReference.MemoReference.delete",
"memos.models.MemoSignature.MemoSignature.delete_signers",
"memos.models.MemoSignature.MemoSignature.add_signer",
"memos.models.User.User.find",
"memos.models.MemoSignature.MemoSignature.unsign",
"memos.db.session.delete",
"memos.revletter.b10_to_rev",
"memos.models.MemoSignature.MemoSignature.status",
"memos.models.MemoReference.MemoReference.get_refs",
"memos.db.ForeignKey",
"memos.models.User.User.valid_usernames",
"memos.db.session.add",
"memos.db.Column",
"memos.revletter.rev_to_b10",
"os.makedirs",
"memos.models.MemoSignature.MemoSignature.get_signers",
"datetime.datetime.utcnow",
"memos.models.MemoReference.MemoReference.add_ref",
"os.path.join",
"memos.db.session.commit",
"memos.db.Enum",
"memos.models.MemoFile.MemoFile.query.filter_by",
"json.dump"
] | [((661, 700), 'memos.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (670, 700), False, 'from memos import db\n'), ((714, 735), 'memos.db.Column', 'db.Column', (['db.Integer'], {}), '(db.Integer)\n', (723, 735), False, 'from memos import db\n'), ((801, 821), 'memos.db.Column', 'db.Column', (['db.String'], {}), '(db.String)\n', (810, 821), False, 'from memos import db\n'), ((903, 939), 'memos.db.Column', 'db.Column', (['db.Boolean'], {'default': '(False)'}), '(db.Boolean, default=False)\n', (912, 939), False, 'from memos import db\n'), ((1312, 1344), 'memos.db.Column', 'db.Column', (['db.Integer'], {'default': '(0)'}), '(db.Integer, default=0)\n', (1321, 1344), False, 'from memos import db\n'), ((1430, 1493), 'memos.db.Column', 'db.Column', (['db.DateTime'], {'nullable': '(False)', 'default': 'datetime.utcnow'}), '(db.DateTime, nullable=False, default=datetime.utcnow)\n', (1439, 1493), False, 'from memos import db\n'), ((1547, 1569), 'memos.db.Column', 'db.Column', (['db.DateTime'], {}), '(db.DateTime)\n', (1556, 1569), False, 'from memos import db\n'), ((1619, 1641), 'memos.db.Column', 'db.Column', (['db.DateTime'], {}), '(db.DateTime)\n', (1628, 1641), False, 'from memos import db\n'), ((1723, 1745), 'memos.db.Column', 'db.Column', (['db.DateTime'], {}), '(db.DateTime)\n', (1732, 1745), False, 'from memos import db\n'), ((1828, 1850), 'memos.db.Column', 'db.Column', (['db.DateTime'], {}), '(db.DateTime)\n', (1837, 1850), False, 'from memos import db\n'), ((1038, 1052), 'memos.db.String', 'db.String', (['(128)'], {}), '(128)\n', (1047, 1052), False, 'from memos import db\n'), ((1139, 1153), 'memos.db.String', 'db.String', (['(128)'], {}), '(128)\n', (1148, 1153), False, 'from memos import db\n'), ((1222, 1236), 'memos.db.String', 'db.String', (['(128)'], {}), '(128)\n', (1231, 1236), False, 'from memos import db\n'), ((1939, 1953), 'memos.db.String', 'db.String', (['(120)'], {}), '(120)\n', (1948, 1953), False, 'from memos import db\n'), ((1955, 1985), 'memos.db.ForeignKey', 'db.ForeignKey', (['"""user.username"""'], {}), "('user.username')\n", (1968, 1985), False, 'from memos import db\n'), ((2074, 2088), 'memos.db.String', 'db.String', (['(128)'], {}), '(128)\n', (2083, 2088), False, 'from memos import db\n'), ((2199, 2213), 'memos.db.String', 'db.String', (['(128)'], {}), '(128)\n', (2208, 2213), False, 'from memos import db\n'), ((2314, 2332), 'memos.db.Enum', 'db.Enum', (['MemoState'], {}), '(MemoState)\n', (2321, 2332), False, 'from memos import db\n'), ((3945, 3985), 'memos.models.MemoSignature.MemoSignature.is_signer', 'MemoSignature.is_signer', (['self.id', 'signer'], {}), '(self.id, signer)\n', (3968, 3985), False, 'from memos.models.MemoSignature import MemoSignature\n'), ((4413, 4453), 'memos.models.MemoSignature.MemoSignature.is_signer', 'MemoSignature.is_signer', (['self.id', 'signer'], {}), '(self.id, signer)\n', (4436, 4453), False, 'from memos.models.MemoSignature import MemoSignature\n'), ((5625, 5680), 'memos.models.MemoSignature.MemoSignature.is_signer', 'MemoSignature.is_signer', ([], {'memo_id': 'self.id', 'signer': 'signer'}), '(memo_id=self.id, signer=signer)\n', (5648, 5680), False, 'from memos.models.MemoSignature import MemoSignature\n'), ((6837, 6951), 'os.path.join', 'os.path.join', (['current_app.root_path', '"""static"""', '"""memos"""', 'f"""{self.user_id}"""', 'f"""{self.number}"""', 'f"""{self.version}"""'], {}), "(current_app.root_path, 'static', 'memos', f'{self.user_id}',\n f'{self.number}', f'{self.version}')\n", (6849, 6951), False, 'import os\n'), ((7060, 7152), 'os.path.join', 'os.path.join', (['"""/static"""', '"""memos"""', 'f"""{self.user_id}"""', 'f"""{self.number}"""', 'f"""{self.version}"""'], {}), "('/static', 'memos', f'{self.user_id}', f'{self.number}',\n f'{self.version}')\n", (7072, 7152), False, 'import os\n'), ((8138, 8170), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (8149, 8170), False, 'import os\n'), ((8247, 8323), 'os.path.join', 'os.path.join', (['path', 'f"""meta-{self.user_id}-{self.number}-{self.version}.json"""'], {}), "(path, f'meta-{self.user_id}-{self.number}-{self.version}.json')\n", (8259, 8323), False, 'import os\n'), ((8358, 8374), 'json.dump', 'json.dump', (['js', 'f'], {}), '(js, f)\n', (8367, 8374), False, 'import json\n'), ((8537, 8568), 'memos.models.MemoSignature.MemoSignature.get_signers', 'MemoSignature.get_signers', (['self'], {}), '(self)\n', (8562, 8568), False, 'from memos.models.MemoSignature import MemoSignature\n'), ((8880, 8914), 'memos.models.MemoSignature.MemoSignature.delete_signers', 'MemoSignature.delete_signers', (['self'], {}), '(self)\n', (8908, 8914), False, 'from memos.models.MemoSignature import MemoSignature\n'), ((8932, 8966), 'memos.models.User.User.valid_usernames', 'User.valid_usernames', (['signer_names'], {}), '(signer_names)\n', (8952, 8966), False, 'from memos.models.User import User\n'), ((9301, 9325), 're.split', 're.split', (['"""-"""', 'reference'], {}), "('-', reference)\n", (9309, 9325), False, 'import re\n'), ((9483, 9535), 'flask.current_app.logger.info', 'current_app.logger.info', (['f"""references ={references}"""'], {}), "(f'references ={references}')\n", (9506, 9535), False, 'from flask import current_app\n'), ((9630, 9673), 're.split', 're.split', (['"""\\\\s|\\\\,|\\\\t|\\\\;|\\\\:"""', 'references'], {}), "('\\\\s|\\\\,|\\\\t|\\\\;|\\\\:', references)\n", (9638, 9673), False, 'import re\n'), ((10799, 10827), 'memos.models.MemoReference.MemoReference.get_refs', 'MemoReference.get_refs', (['self'], {}), '(self)\n', (10821, 10827), False, 'from memos.models.MemoReference import MemoReference\n'), ((11746, 11779), 'memos.models.MemoReference.MemoReference.get_back_refs', 'MemoReference.get_back_refs', (['self'], {}), '(self)\n', (11773, 11779), False, 'from memos.models.MemoReference import MemoReference\n'), ((12100, 12188), 'flask.current_app.logger.info', 'current_app.logger.info', (['f"""get_next_version {memo.id} {memo.number} {memo.version}"""'], {}), "(\n f'get_next_version {memo.id} {memo.number} {memo.version}')\n", (12123, 12188), False, 'from flask import current_app\n'), ((12275, 12288), 'memos.revletter.b10_to_rev', 'b10_to_rev', (['(1)'], {}), '(1)\n', (12285, 12288), False, 'from memos.revletter import b10_to_rev, rev_to_b10\n'), ((12338, 12358), 'memos.db.session.add', 'db.session.add', (['self'], {}), '(self)\n', (12352, 12358), False, 'from memos import db\n'), ((12367, 12386), 'memos.db.session.commit', 'db.session.commit', ([], {}), '()\n', (12384, 12386), False, 'from memos import db\n'), ((14677, 14694), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (14692, 14694), False, 'from datetime import datetime\n'), ((14784, 14861), 'flask.current_app.logger.info', 'current_app.logger.info', (['f"""Notify Distribution {self.distribution} {message}"""'], {}), "(f'Notify Distribution {self.distribution} {message}')\n", (14807, 14861), False, 'from flask import current_app\n'), ((14925, 14977), 'flask.current_app.logger.info', 'current_app.logger.info', (['f"""Notify signers {message}"""'], {}), "(f'Notify signers {message}')\n", (14948, 14977), False, 'from flask import current_app\n'), ((17922, 18012), 'memos.models.MemoHistory.MemoHistory.activity', 'MemoHistory.activity', ([], {'memo': 'new_memo', 'memo_activity': 'MemoActivity.Create', 'user': 'delegate'}), '(memo=new_memo, memo_activity=MemoActivity.Create, user\n =delegate)\n', (17942, 18012), False, 'from memos.models.MemoHistory import MemoHistory\n'), ((18104, 18169), 'flask.current_app.logger.info', 'current_app.logger.info', (['f"""signer = {signer} delegate={delegate}"""'], {}), "(f'signer = {signer} delegate={delegate}')\n", (18127, 18169), False, 'from flask import current_app\n'), ((18321, 18363), 'flask.current_app.logger.info', 'current_app.logger.info', (['"""allowed to sign"""'], {}), "('allowed to sign')\n", (18344, 18363), False, 'from flask import current_app\n'), ((18372, 18417), 'memos.models.MemoSignature.MemoSignature.sign', 'MemoSignature.sign', (['self.id', 'signer', 'delegate'], {}), '(self.id, signer, delegate)\n', (18390, 18417), False, 'from memos.models.MemoSignature import MemoSignature\n'), ((18424, 18503), 'memos.models.MemoHistory.MemoHistory.activity', 'MemoHistory.activity', ([], {'memo': 'self', 'user': 'delegate', 'memo_activity': 'MemoActivity.Sign'}), '(memo=self, user=delegate, memo_activity=MemoActivity.Sign)\n', (18444, 18503), False, 'from memos.models.MemoHistory import MemoHistory\n'), ((18738, 18785), 'memos.models.MemoSignature.MemoSignature.unsign', 'MemoSignature.unsign', (['self.id', 'signer', 'delegate'], {}), '(self.id, signer, delegate)\n', (18758, 18785), False, 'from memos.models.MemoSignature import MemoSignature\n'), ((18792, 18878), 'memos.models.MemoHistory.MemoHistory.activity', 'MemoHistory.activity', ([], {'memo': 'self', 'user': 'delegate', 'memo_activity': 'MemoActivity.Unsign'}), '(memo=self, user=delegate, memo_activity=MemoActivity.\n Unsign)\n', (18812, 18878), False, 'from memos.models.MemoHistory import MemoHistory\n'), ((19023, 19087), 'flask.current_app.logger.info', 'current_app.logger.info', (['f"""Obsolete: {self} Delegate={delegate}"""'], {}), "(f'Obsolete: {self} Delegate={delegate}')\n", (19046, 19087), False, 'from flask import current_app\n'), ((19256, 19273), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (19271, 19273), False, 'from datetime import datetime\n'), ((19303, 19320), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (19318, 19320), False, 'from datetime import datetime\n'), ((19329, 19417), 'memos.models.MemoHistory.MemoHistory.activity', 'MemoHistory.activity', ([], {'memo': 'self', 'user': 'delegate', 'memo_activity': 'MemoActivity.Obsolete'}), '(memo=self, user=delegate, memo_activity=MemoActivity.\n Obsolete)\n', (19349, 19417), False, 'from memos.models.MemoHistory import MemoHistory\n'), ((19513, 19575), 'flask.current_app.logger.info', 'current_app.logger.info', (['f"""Cancel: {self} Delegate={delegate}"""'], {}), "(f'Cancel: {self} Delegate={delegate}')\n", (19536, 19575), False, 'from flask import current_app\n'), ((19723, 19744), 'memos.models.MemoFile.MemoFile.delete', 'MemoFile.delete', (['self'], {}), '(self)\n', (19738, 19744), False, 'from memos.models.MemoFile import MemoFile\n'), ((19882, 19908), 'memos.models.MemoReference.MemoReference.delete', 'MemoReference.delete', (['self'], {}), '(self)\n', (19902, 19908), False, 'from memos.models.MemoReference import MemoReference\n'), ((19917, 19951), 'memos.models.MemoSignature.MemoSignature.delete_signers', 'MemoSignature.delete_signers', (['self'], {}), '(self)\n', (19945, 19951), False, 'from memos.models.MemoSignature import MemoSignature\n'), ((19960, 20046), 'memos.models.MemoHistory.MemoHistory.activity', 'MemoHistory.activity', ([], {'memo': 'self', 'user': 'delegate', 'memo_activity': 'MemoActivity.Cancel'}), '(memo=self, user=delegate, memo_activity=MemoActivity.\n Cancel)\n', (19980, 20046), False, 'from memos.models.MemoHistory import MemoHistory\n'), ((20049, 20072), 'memos.db.session.delete', 'db.session.delete', (['self'], {}), '(self)\n', (20066, 20072), False, 'from memos import db\n'), ((20081, 20100), 'memos.db.session.commit', 'db.session.commit', ([], {}), '()\n', (20098, 20100), False, 'from memos import db\n'), ((20116, 20153), 'flask.current_app.logger.info', 'current_app.logger.info', (['f"""Canceling"""'], {}), "(f'Canceling')\n", (20139, 20153), False, 'from flask import current_app\n'), ((20259, 20324), 'flask.current_app.logger.info', 'current_app.logger.info', (['f"""signer = {signer} delegate={delegate}"""'], {}), "(f'signer = {signer} delegate={delegate}')\n", (20282, 20324), False, 'from flask import current_app\n'), ((20486, 20503), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (20501, 20503), False, 'from datetime import datetime\n'), ((20610, 20696), 'memos.models.MemoHistory.MemoHistory.activity', 'MemoHistory.activity', ([], {'memo': 'self', 'memo_activity': 'MemoActivity.Reject', 'user': 'delegate'}), '(memo=self, memo_activity=MemoActivity.Reject, user=\n delegate)\n', (20630, 20696), False, 'from memos.models.MemoHistory import MemoHistory\n'), ((20698, 20728), 'memos.models.MemoSignature.MemoSignature.unsign_all', 'MemoSignature.unsign_all', (['self'], {}), '(self)\n', (20722, 20728), False, 'from memos.models.MemoSignature import MemoSignature\n'), ((21336, 21427), 'flask.current_app.logger.debug', 'current_app.logger.debug', (['f"""FIND: Looking for {username}/{memo_number}/{memo_version}"""'], {}), "(\n f'FIND: Looking for {username}/{memo_number}/{memo_version}')\n", (21360, 21427), False, 'from flask import current_app\n'), ((21641, 21690), 'flask.current_app.logger.debug', 'current_app.logger.debug', (['f"""Found Memo id={memo}"""'], {}), "(f'Found Memo id={memo}')\n", (21665, 21690), False, 'from flask import current_app\n'), ((22966, 23014), 'flask.current_app.logger.info', 'current_app.logger.info', (['f"""Search title={title}"""'], {}), "(f'Search title={title}')\n", (22989, 23014), False, 'from flask import current_app\n'), ((23954, 24002), 'memos.models.MemoSignature.MemoSignature.get_signatures', 'MemoSignature.get_signatures', (['user'], {'signed': '(False)'}), '(user, signed=False)\n', (23982, 24002), False, 'from memos.models.MemoSignature import MemoSignature\n'), ((6539, 6588), 're.split', 're.split', (['"""\\\\s|\\\\,|\t|\\\\;|\\\\:"""', 'self.distribution'], {}), "('\\\\s|\\\\,|\\t|\\\\;|\\\\:', self.distribution)\n", (6547, 6588), False, 'import re\n'), ((8622, 8655), 'memos.models.User.User.find', 'User.find', ([], {'username': 'sig.signer_id'}), '(username=sig.signer_id)\n', (8631, 8655), False, 'from memos.models.User import User\n'), ((8683, 8718), 'memos.models.User.User.find', 'User.find', ([], {'username': 'sig.delegate_id'}), '(username=sig.delegate_id)\n', (8692, 8718), False, 'from memos.models.User import User\n'), ((9024, 9074), 'memos.models.MemoSignature.MemoSignature.add_signer', 'MemoSignature.add_signer', ([], {'memo': 'self', 'signer': 'signer'}), '(memo=self, signer=signer)\n', (9048, 9074), False, 'from memos.models.MemoSignature import MemoSignature\n'), ((10221, 10262), 'flask.current_app.logger.info', 'current_app.logger.info', (['f"""Memo = {memo}"""'], {}), "(f'Memo = {memo}')\n", (10244, 10262), False, 'from flask import current_app\n'), ((11528, 11561), 'memos.models.User.User.find', 'User.find', ([], {'username': 'parsed_ref[0]'}), '(username=parsed_ref[0])\n', (11537, 11561), False, 'from memos.models.User import User\n'), ((11574, 11699), 'memos.models.MemoReference.MemoReference.add_ref', 'MemoReference.add_ref', (['self.id'], {'ref_user_id': 'user.username', 'ref_memo_number': 'parsed_ref[1]', 'ref_memo_version': 'parsed_ref[2]'}), '(self.id, ref_user_id=user.username, ref_memo_number=\n parsed_ref[1], ref_memo_version=parsed_ref[2])\n', (11595, 11699), False, 'from memos.models.MemoReference import MemoReference\n'), ((14148, 14177), 'memos.models.MemoSignature.MemoSignature.status', 'MemoSignature.status', (['self.id'], {}), '(self.id)\n', (14168, 14177), False, 'from memos.models.MemoSignature import MemoSignature\n'), ((16590, 16680), 'memos.models.MemoHistory.MemoHistory.activity', 'MemoHistory.activity', ([], {'memo': 'new_memo', 'memo_activity': 'MemoActivity.Create', 'user': 'delegate'}), '(memo=new_memo, memo_activity=MemoActivity.Create, user\n =delegate)\n', (16610, 16680), False, 'from memos.models.MemoHistory import MemoHistory\n'), ((16699, 16755), 'flask.current_app.logger.info', 'current_app.logger.info', (['f"""Creating new memo {new_memo}"""'], {}), "(f'Creating new memo {new_memo}')\n", (16722, 16755), False, 'from flask import current_app\n'), ((16860, 16913), 'flask.current_app.logger.info', 'current_app.logger.info', (['f"""Found a draft memo {memo}"""'], {}), "(f'Found a draft memo {memo}')\n", (16883, 16913), False, 'from flask import current_app\n'), ((18229, 18278), 'flask.current_app.logger.info', 'current_app.logger.info', (['"""NOT!!@ allowed to sign"""'], {}), "('NOT!!@ allowed to sign')\n", (18252, 18278), False, 'from flask import current_app\n'), ((7275, 7316), 'memos.models.MemoFile.MemoFile.query.filter_by', 'MemoFile.query.filter_by', ([], {'memo_id': 'self.id'}), '(memo_id=self.id)\n', (7299, 7316), False, 'from memos.models.MemoFile import MemoFile\n'), ((9883, 9986), 'flask.current_app.logger.info', 'current_app.logger.info', (['f"""INVALID length append {memo_ref} valid={valid_memos} invalid {invalid}"""'], {}), "(\n f'INVALID length append {memo_ref} valid={valid_memos} invalid {invalid}')\n", (9906, 9986), False, 'from flask import current_app\n'), ((12994, 13080), 'memos.models.MemoHistory.MemoHistory.activity', 'MemoHistory.activity', ([], {'memo': 'memo', 'memo_activity': 'MemoActivity.Obsolete', 'user': 'acting'}), '(memo=memo, memo_activity=MemoActivity.Obsolete, user=\n acting)\n', (13014, 13080), False, 'from memos.models.MemoHistory import MemoHistory\n'), ((13332, 13361), 'memos.models.MemoSignature.MemoSignature.status', 'MemoSignature.status', (['self.id'], {}), '(self.id)\n', (13352, 13361), False, 'from memos.models.MemoSignature import MemoSignature\n'), ((13459, 13476), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (13474, 13476), False, 'from datetime import datetime\n'), ((13493, 13578), 'memos.models.MemoHistory.MemoHistory.activity', 'MemoHistory.activity', ([], {'memo': 'self', 'memo_activity': 'MemoActivity.Signoff', 'user': 'acting'}), '(memo=self, memo_activity=MemoActivity.Signoff, user=acting\n )\n', (13513, 13578), False, 'from memos.models.MemoHistory import MemoHistory\n'), ((13793, 13810), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (13808, 13810), False, 'from datetime import datetime\n'), ((13827, 13913), 'memos.models.MemoHistory.MemoHistory.activity', 'MemoHistory.activity', ([], {'memo': 'self', 'memo_activity': 'MemoActivity.Activate', 'user': 'acting'}), '(memo=self, memo_activity=MemoActivity.Activate, user=\n acting)\n', (13847, 13913), False, 'from memos.models.MemoHistory import MemoHistory\n'), ((14265, 14282), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (14280, 14282), False, 'from datetime import datetime\n'), ((14418, 14504), 'memos.models.MemoHistory.MemoHistory.activity', 'MemoHistory.activity', ([], {'memo': 'self', 'memo_activity': 'MemoActivity.Activate', 'user': 'acting'}), '(memo=self, memo_activity=MemoActivity.Activate, user=\n acting)\n', (14438, 14504), False, 'from memos.models.MemoHistory import MemoHistory\n'), ((14587, 14640), 'flask.current_app.logger.info', 'current_app.logger.info', (['f"""Signatures Still Required"""'], {}), "(f'Signatures Still Required')\n", (14610, 14640), False, 'from flask import current_app\n'), ((17513, 17530), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (17528, 17530), False, 'from datetime import datetime\n'), ((17575, 17592), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (17590, 17592), False, 'from datetime import datetime\n'), ((12231, 12255), 'memos.revletter.rev_to_b10', 'rev_to_b10', (['memo.version'], {}), '(memo.version)\n', (12241, 12255), False, 'from memos.revletter import b10_to_rev, rev_to_b10\n'), ((16412, 16429), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (16427, 16429), False, 'from datetime import datetime\n'), ((16474, 16491), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (16489, 16491), False, 'from datetime import datetime\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import random
class JuliaSet:
def __init__(self):
"""
Constructor of the JuliaSet class
:param size: size in pixels (for both width and height)
:param dpi: dots per inch (default 300)
"""
# Initialize image related parameters
self.size = 256
self.dpi = 300
self.norm = True
self.mirror = False
# Initialize process related parameters
self.escrad = 3
self.niter = 250
def param(self, **kwargs):
"""
Get parameters from input dictionary and set attributes.
:param kwargs: a dictionary in the form
`{'arg1':value, ..., 'argN': value}`
"""
# Check if kwargs in not empty
if kwargs is not None:
# Image related parameters
if 'size' in kwargs:
self.size = kwargs.pop('size', 256)
if 'dpi' in kwargs:
self.dpi = kwargs.pop('dpi', 300)
if 'norm' in kwargs:
self.norm = kwargs.pop('norm', True)
if 'mirror' in kwargs:
self.mirror = kwargs.pop('mirror', False)
# Process related parameters
if 'escrad' in kwargs:
self.escrad = kwargs.pop('escrad', 3)
if 'niter' in kwargs:
self.niter = kwargs.pop('niter', 250)
# If kwargs is not empty there is some invalid keywords
if kwargs:
print("{} are invalid keyword arguments!".format(kwargs.keys()))
def run(self, show=False, fname='juilaset-output'):
"""
Run the Julia set generator
:param mirror: if True the julia is mirrored horizontally and
vertically; each mirror is concatenate with the original
to produce a new image
:param norm: if true the Julia set is normalized by its
absolute maximum value.
:param show: if show is `False` th eoutput image will be
written as a PNG file named `fname`
:param fname: Name of the output PNG file to write on disk
"""
# Get a complex value among a list of best Julia sets
cpxNum = self.getComplexValue()
# Get the target area
# For more randomness, the target area is a random
# subset of a wide one defined with x[-1.5, 1.5] and
# y[-1.5, 1.5]
xrng, yrng = self.getTargetArea()
# Process
julia = self.processJulia(cpxNum, xrng, yrng)
# Normalization
if(self.norm):
julia /= np.amax(np.abs(julia))
# Mirroring
if(self.mirror):
# Horizontal mirroring and concatenate
juliamirror = np.flip(julia, axis=1)
julia = np.concatenate((julia, juliamirror), axis=1)
# Vertical mirroring and concatenate
juliamirror = np.flip(julia, axis=0)
julia = np.concatenate((julia, juliamirror), axis=0)
# Plot the output with a random colormap using matplotlib
self.plotJuliaSet(julia, show=show, fname=fname)
def getComplexValue(self):
"""
Random choice in a list of best complex values for Julia
sets (real, imag).
:return cpxNum: a semi-random complex value
"""
# Define the list of best complex values
cpxList = [
(-0.10, 0.650), (0.00, 0.80), (0.370, 0.100),
(0.355, 0.355), (-0.54, 0.54), (0.340, -0.05),
(0.37, 0.10), (0.355, 0.355)
]
# Randomly choose one
cpxTmp = random.choice(cpxList)
# Manipulate the base value slightly to make it a little more unique
cpxNum = self.twearkComplex(cpxTmp)
return cpxNum
def twearkComplex(self, cpxTmp):
"""
Manipulate the base value slightly to make it a little more unique.
:param cpxTmp: complex value to modify
:param cpxNum: a slightly manipulate version of the input
"""
# Get the signs for the imaginary parts
isign = random.randrange(-1, 1, 2)
# Get a value variation for for real and imaginary parts
# The possible variation range is fixed at +/- 2% to stay
# In the neightborhood of the initial value
rsigma = random.uniform(0.98, 1.02)
isigma = random.uniform(0.98, 1.02)
# Apply modification and return the new complex value
realPart = cpxTmp[0] * rsigma
imagPart = cpxTmp[1] * isigma * isign
return complex(realPart, imagPart)
def getTargetArea(self):
"""
For more randomness, the target area is a random
subset of a wide one defined with x[-1.5, 1.5] and
y[-1.5, 1.5]
:return xrng, yrng: tuples containing (xmin, xmax)
and (ymin, ymax)
"""
# Randomly choose the center of the target area
# Possible values are in [-1.0, 1.0] to stay in an
# area where there are always pieces of fractals
xctr = random.uniform(-1.0,1.0)
yctr = random.uniform(-1.0,1.0)
# Extend around the center
xrng = (xctr-0.5, xctr+0.5)
yrng = (yctr-0.5, yctr+0.5)
return xrng, yrng
def processJulia(self, cpxNum, xrng, yrng):
"""
Calculate the Julia set for the given input parameters.
:param cpxNum: complex value acting as a seed for the Julia set
:param xrng: range of values (min, max) for the x-axis
:param yrng: range of values (min, max) for the y-axis
:param escrad: escape radius
:param niter: maximum number of iterations
"""
# Initialize numpy array of dimensions (size, size) with zeros
julia = np.ones((self.size, self.size), dtype=np.float32)
# Calculate the width (equal to height) of the image since the
# image is defined as a square
width = xrng[1] - xrng[0] # xmax - xmin = ymax - ymin
# Randomly choose the sign of the shade
#ssign = random.randrange(-1, 1, 2)
ssign = -1.
# Loop over x range
for ix in range(self.size):
# Get the pixel position in the complex plane
# For the real part
realPart = float(ix) / self.size * width + xrng[0]
# Loop over y range
for iy in range(self.size):
# Get the pixel position in the complex plane
# For the imaginary part
imagPart = float(iy) / self.size * width + yrng[0]
# Build the complex
cpxTmp = complex(realPart, imagPart)
# Initialize iteration counter
it = 0
# Loop over iterations
while(np.abs(cpxTmp) <= self.escrad**2 and it < self.niter):
# Quadratic polynomial
cpxTmp = cpxTmp**2 + cpxNum
# Increment iteration counter
it += 1
# Calculate the shade (a cool thing find somewhere on the net)
shade = 1. - np.sqrt(it/self.niter)
# Fill the outpout array
julia[ix][iy] = ssign * shade
return julia
def plotJuliaSet(self, julia, fname='juilaset-output', show=False):
"""
Plot the output Julia set and show it in matplotlib window or
write it on disk as a png file.
:param julia: the Julia set
:param show: if show is `False` th eoutput image will be
written as a PNG file named `fname`
:param fname: Name of the output PNG file to write on disk
"""
# List of beautiful colormap for Julia sets
cmapList = [
cm.Blues, cm.Greens, cm.Purples, cm.hot, cm.inferno,
cm.binary, cm.rainbow, cm.twilight_shifted, cm.plasma
]
# Randomly chose one colormap
cmapName = random.choice(cmapList)
# Plot the image with a gaussian interpolation
fig = plt.gcf()
fig.set_size_inches(3., 3.)
plt.imshow(julia, interpolation='gaussian', cmap=cmapName)
# Disable axis
plt.axis('off')
if(show):
plt.show()
else:
# Write on disk
fig.savefig(fname+".png", dpi=self.dpi, pad_inches=0.05, bbox_inches='tight')
def julia(**kwargs):
"""
temp
"""
# Initialize Julia Set instance
juliaInstance = JuliaSet()
# If kwargs not empty update the attributes
if kwargs is not None:
juliaInstance.param(**kwargs)
return juliaInstance
if __name__ == "__main__":
# execute only if run as a script
genJuliaSet = JuliaSet()
genJuliaSet.param()
genJuliaSet.run()
| [
"matplotlib.pyplot.imshow",
"numpy.flip",
"random.uniform",
"random.choice",
"numpy.abs",
"numpy.ones",
"numpy.sqrt",
"random.randrange",
"matplotlib.pyplot.gcf",
"numpy.concatenate",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show"
] | [((3661, 3683), 'random.choice', 'random.choice', (['cpxList'], {}), '(cpxList)\n', (3674, 3683), False, 'import random\n'), ((4146, 4172), 'random.randrange', 'random.randrange', (['(-1)', '(1)', '(2)'], {}), '(-1, 1, 2)\n', (4162, 4172), False, 'import random\n'), ((4374, 4400), 'random.uniform', 'random.uniform', (['(0.98)', '(1.02)'], {}), '(0.98, 1.02)\n', (4388, 4400), False, 'import random\n'), ((4418, 4444), 'random.uniform', 'random.uniform', (['(0.98)', '(1.02)'], {}), '(0.98, 1.02)\n', (4432, 4444), False, 'import random\n'), ((5105, 5130), 'random.uniform', 'random.uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (5119, 5130), False, 'import random\n'), ((5145, 5170), 'random.uniform', 'random.uniform', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (5159, 5170), False, 'import random\n'), ((5817, 5866), 'numpy.ones', 'np.ones', (['(self.size, self.size)'], {'dtype': 'np.float32'}), '((self.size, self.size), dtype=np.float32)\n', (5824, 5866), True, 'import numpy as np\n'), ((8018, 8041), 'random.choice', 'random.choice', (['cmapList'], {}), '(cmapList)\n', (8031, 8041), False, 'import random\n'), ((8112, 8121), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (8119, 8121), True, 'import matplotlib.pyplot as plt\n'), ((8166, 8224), 'matplotlib.pyplot.imshow', 'plt.imshow', (['julia'], {'interpolation': '"""gaussian"""', 'cmap': 'cmapName'}), "(julia, interpolation='gaussian', cmap=cmapName)\n", (8176, 8224), True, 'import matplotlib.pyplot as plt\n'), ((8265, 8280), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8273, 8280), True, 'import matplotlib.pyplot as plt\n'), ((2795, 2817), 'numpy.flip', 'np.flip', (['julia'], {'axis': '(1)'}), '(julia, axis=1)\n', (2802, 2817), True, 'import numpy as np\n'), ((2838, 2882), 'numpy.concatenate', 'np.concatenate', (['(julia, juliamirror)'], {'axis': '(1)'}), '((julia, juliamirror), axis=1)\n', (2852, 2882), True, 'import numpy as np\n'), ((2958, 2980), 'numpy.flip', 'np.flip', (['julia'], {'axis': '(0)'}), '(julia, axis=0)\n', (2965, 2980), True, 'import numpy as np\n'), ((3001, 3045), 'numpy.concatenate', 'np.concatenate', (['(julia, juliamirror)'], {'axis': '(0)'}), '((julia, juliamirror), axis=0)\n', (3015, 3045), True, 'import numpy as np\n'), ((8312, 8322), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8320, 8322), True, 'import matplotlib.pyplot as plt\n'), ((2657, 2670), 'numpy.abs', 'np.abs', (['julia'], {}), '(julia)\n', (2663, 2670), True, 'import numpy as np\n'), ((7187, 7211), 'numpy.sqrt', 'np.sqrt', (['(it / self.niter)'], {}), '(it / self.niter)\n', (7194, 7211), True, 'import numpy as np\n'), ((6838, 6852), 'numpy.abs', 'np.abs', (['cpxTmp'], {}), '(cpxTmp)\n', (6844, 6852), True, 'import numpy as np\n')] |
import numpy as np
from keras import backend as K
import os
import sys
K.set_image_dim_ordering('tf')
def patch_path(path):
return os.path.join(os.path.dirname(__file__), path)
def main():
sys.path.append(patch_path('..'))
data_dir_path = patch_path('very_large_data')
model_dir_path = patch_path('models/UCF-101')
from keras_video_classifier.library.convolutional import CnnVideoClassifier
from keras_video_classifier.library.utility.ucf.UCF101_loader import load_ucf, scan_ucf_with_labels
config_file_path = CnnVideoClassifier.get_config_file_path(model_dir_path)
weight_file_path = CnnVideoClassifier.get_weight_file_path(model_dir_path)
np.random.seed(42)
load_ucf(data_dir_path)
predictor = CnnVideoClassifier()
predictor.load_model(config_file_path, weight_file_path)
videos = scan_ucf_with_labels(data_dir_path, [label for (label, label_index) in predictor.labels.items()])
video_file_path_list = np.array([file_path for file_path in videos.keys()])
np.random.shuffle(video_file_path_list)
for video_file_path in video_file_path_list:
label = videos[video_file_path]
predicted_label = predictor.predict(video_file_path)
print('predicted: ' + predicted_label + ' actual: ' + label)
if __name__ == '__main__':
main() | [
"keras_video_classifier.library.utility.ucf.UCF101_loader.load_ucf",
"numpy.random.shuffle",
"os.path.dirname",
"keras_video_classifier.library.convolutional.CnnVideoClassifier.get_weight_file_path",
"numpy.random.seed",
"keras_video_classifier.library.convolutional.CnnVideoClassifier.get_config_file_path",
"keras.backend.set_image_dim_ordering",
"keras_video_classifier.library.convolutional.CnnVideoClassifier"
] | [((72, 102), 'keras.backend.set_image_dim_ordering', 'K.set_image_dim_ordering', (['"""tf"""'], {}), "('tf')\n", (96, 102), True, 'from keras import backend as K\n'), ((545, 600), 'keras_video_classifier.library.convolutional.CnnVideoClassifier.get_config_file_path', 'CnnVideoClassifier.get_config_file_path', (['model_dir_path'], {}), '(model_dir_path)\n', (584, 600), False, 'from keras_video_classifier.library.convolutional import CnnVideoClassifier\n'), ((624, 679), 'keras_video_classifier.library.convolutional.CnnVideoClassifier.get_weight_file_path', 'CnnVideoClassifier.get_weight_file_path', (['model_dir_path'], {}), '(model_dir_path)\n', (663, 679), False, 'from keras_video_classifier.library.convolutional import CnnVideoClassifier\n'), ((685, 703), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (699, 703), True, 'import numpy as np\n'), ((709, 732), 'keras_video_classifier.library.utility.ucf.UCF101_loader.load_ucf', 'load_ucf', (['data_dir_path'], {}), '(data_dir_path)\n', (717, 732), False, 'from keras_video_classifier.library.utility.ucf.UCF101_loader import load_ucf, scan_ucf_with_labels\n'), ((750, 770), 'keras_video_classifier.library.convolutional.CnnVideoClassifier', 'CnnVideoClassifier', ([], {}), '()\n', (768, 770), False, 'from keras_video_classifier.library.convolutional import CnnVideoClassifier\n'), ((1029, 1068), 'numpy.random.shuffle', 'np.random.shuffle', (['video_file_path_list'], {}), '(video_file_path_list)\n', (1046, 1068), True, 'import numpy as np\n'), ((151, 176), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (166, 176), False, 'import os\n')] |
'''
-------------------------------------
Assignment 2 - EE2703 (Jan-May 2020)
Done by <NAME> (EE18B122)
Created on 18/01/20
Last Modified on 04/02/20
-------------------------------------
'''
# importing necessary libraries
import sys
import cmath
import numpy as np
import pandas as pd
# To improve readability
CIRCUIT_START = ".circuit"
CIRCUIT_END = ".end"
RESISTOR = "R"
CAPACITOR = "C"
INDUCTOR = "L"
IVS = "V"
ICS = "I"
VCVS = "E"
VCCS = "G"
CCVS = "H"
CCCS = "F"
PI = np.pi
# Classes for each circuit component
class resistor:
def __init__(self, name, n1, n2, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
class inductor:
def __init__(self, name, n1, n2, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
class capacitor:
def __init__(self, name, n1, n2, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
class voltageSource:
def __init__(self, name, n1, n2, val, phase=0):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.phase = float(phase)
class currentSource:
def __init__(self, name, n1, n2, val, phase=0):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.phase = float(phase)
class vcvs:
def __init__(self, name, n1, n2, n3, n4, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.node3 = n3
self.node4 = n4
class vccs:
def __init__(self, name, n1, n2, n3, n4, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.node3 = n3
self.node4 = n4
class ccvs:
def __init__(self, name, n1, n2, vName, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.vSource = vName
class cccs:
def __init__(self, name, n1, n2, vName, val):
self.name = name
self.value = enggToMath(val)
self.node1 = n1
self.node2 = n2
self.vSource = vName
# Convert a number in engineer's format to math
def enggToMath(enggNumber):
try:
return float(enggNumber)
except:
lenEnggNumber = len(enggNumber)
# Kilo
if enggNumber[lenEnggNumber-1] == 'k':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e3
# Milli
elif enggNumber[lenEnggNumber-1] == 'm':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e-3
# Micro
elif enggNumber[lenEnggNumber-1] == 'u':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e-6
# Nano
elif enggNumber[lenEnggNumber-1] == 'n':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e-9
# Mega
elif enggNumber[lenEnggNumber-1] == 'M':
base = int(enggNumber[0:lenEnggNumber-1])
return base*1e6
else:
sys.exit("Please check the component values given. Supported engineer units are: M, k, m, u, n\nYou can also enter values in exponential format (eg. 1e3 = 1000).")
if __name__ == "__main__":
# checking number of command line arguments
if len(sys.argv)!=2 :
sys.exit("Invalid number of arguments!")
else:
try:
circuitFile = sys.argv[1]
circuitFreq = 1e-100
circuitComponents = { RESISTOR: [], CAPACITOR: [], INDUCTOR: [], IVS: [], ICS: [], VCVS: [], VCCS: [], CCVS: [], CCCS: [] }
circuitNodes = []
# checking if given netlist file is of correct type
if (not circuitFile.endswith(".netlist")):
print("Wrong file type!")
else:
netlistFileLines = []
with open (circuitFile, "r") as f:
for line in f.readlines():
netlistFileLines.append(line.split('#')[0].split('\n')[0])
# Getting frequency, if any
if(line[:3] == '.ac'):
circuitFreq = float(line.split()[2])
# Setting Angular Frequency w
w = 2*PI*circuitFreq
try:
# Finding the location of the identifiers
identifier1 = netlistFileLines.index(CIRCUIT_START)
identifier2 = netlistFileLines.index(CIRCUIT_END)
circuitBody = netlistFileLines[identifier1+1:identifier2]
for line in circuitBody:
# Extracting the data from the line
lineTokens = line.split()
# Appending new nodes to a list
try:
if lineTokens[1] not in circuitNodes:
circuitNodes.append(lineTokens[1])
if lineTokens[2] not in circuitNodes:
circuitNodes.append(lineTokens[2])
except IndexError:
continue
# Resistor
if lineTokens[0][0] == RESISTOR:
circuitComponents[RESISTOR].append(resistor(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3]))
# Capacitor
elif lineTokens[0][0] == CAPACITOR:
circuitComponents[CAPACITOR].append(capacitor(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3]))
# Inductor
elif lineTokens[0][0] == INDUCTOR:
circuitComponents[INDUCTOR].append(inductor(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3]))
# Voltage Source
elif lineTokens[0][0] == IVS:
if len(lineTokens) == 5: # DC Source
circuitComponents[IVS].append(voltageSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])))
elif len(lineTokens) == 6: # AC Source
if circuitFreq == 1e-100:
sys.exit("Frequency of AC Source not specified!!")
circuitComponents[IVS].append(voltageSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])/2, lineTokens[5]))
# Current Source
elif lineTokens[0][0] == ICS:
if len(lineTokens) == 5: # DC Source
circuitComponents[ICS].append(currentSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])))
elif len(lineTokens) == 6: # AC Source
if circuitFreq == 1e-100:
sys.exit("Frequency of AC Source not specified!!")
circuitComponents[ICS].append(currentSource(lineTokens[0], lineTokens[1], lineTokens[2], float(lineTokens[4])/2, lineTokens[5]))
# VCVS
elif lineTokens[0][0] == VCVS:
circuitComponents[VCVS].append(vcvs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4], lineTokens[5]))
# VCCS
elif lineTokens[0][0] == VCCS:
circuitComponents[VCCS].append(vcvs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4], lineTokens[5]))
# CCVS
elif lineTokens[0][0] == CCVS:
circuitComponents[CCVS].append(ccvs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4]))
# CCCS
elif lineTokens[0][0] == CCCS:
circuitComponents[CCCS].append(cccs(lineTokens[0], lineTokens[1], lineTokens[2], lineTokens[3], lineTokens[4]))
# Erroneous Component Name
else:
sys.exit("Wrong Component Given. ABORT!")
try:
circuitNodes.remove('GND')
circuitNodes = ['GND'] + circuitNodes
except:
sys.exit("No ground node specified in the circuit!!")
# Creating a dictionary with node names and their numbers (to reduce the time taken by later parts of the program)
nodeNumbers = {circuitNodes[i]:i for i in range(len(circuitNodes))}
numNodes = len(circuitNodes)
numVS = len(circuitComponents[IVS])+len(circuitComponents[VCVS])+len(circuitComponents[CCVS])
# Creating Matrices M and b
matrixM = np.zeros((numNodes+numVS, numNodes+numVS), np.complex)
matrixB = np.zeros((numNodes+numVS,), np.complex)
# GND Equation
matrixM[0][0] = 1.0
# Resistor Equations
for r in circuitComponents[RESISTOR]:
if r.node1 != 'GND':
matrixM[nodeNumbers[r.node1]][nodeNumbers[r.node1]] += 1/r.value
matrixM[nodeNumbers[r.node1]][nodeNumbers[r.node2]] -= 1/r.value
if r.node2 != 'GND':
matrixM[nodeNumbers[r.node2]][nodeNumbers[r.node1]] -= 1/r.value
matrixM[nodeNumbers[r.node2]][nodeNumbers[r.node2]] += 1/r.value
# Capacitor Equations
for c in circuitComponents[CAPACITOR]:
if c.node1 != 'GND':
matrixM[nodeNumbers[c.node1]][nodeNumbers[c.node1]] += complex(0, w*c.value)
matrixM[nodeNumbers[c.node1]][nodeNumbers[c.node2]] -= complex(0, w*c.value)
if c.node2 != 'GND':
matrixM[nodeNumbers[c.node2]][nodeNumbers[c.node1]] -= complex(0, w*c.value)
matrixM[nodeNumbers[c.node2]][nodeNumbers[c.node2]] += complex(0, w*c.value)
# Inductor Equations
for l in circuitComponents[INDUCTOR]:
if l.node1 != 'GND':
matrixM[nodeNumbers[l.node1]][nodeNumbers[l.node1]] += complex(0, -1.0/(w*l.value))
matrixM[nodeNumbers[l.node1]][nodeNumbers[l.node2]] -= complex(0, -1.0/(w*l.value))
if l.node2 != 'GND':
matrixM[nodeNumbers[l.node2]][nodeNumbers[l.node1]] -= complex(0, -1.0/(w*l.value))
matrixM[nodeNumbers[l.node2]][nodeNumbers[l.node2]] += complex(0, -1.0/(w*l.value))
# Voltage Source Equations
for i in range(len(circuitComponents[IVS])):
# Equation accounting for current through the source
if circuitComponents[IVS][i].node1 != 'GND':
matrixM[nodeNumbers[circuitComponents[IVS][i].node1]][numNodes+i] = 1.0
if circuitComponents[IVS][i].node2 != 'GND':
matrixM[nodeNumbers[circuitComponents[IVS][i].node2]][numNodes+i] = -1.0
# Auxiliary Equations
matrixM[numNodes+i][nodeNumbers[circuitComponents[IVS][i].node1]] = -1.0
matrixM[numNodes+i][nodeNumbers[circuitComponents[IVS][i].node2]] = +1.0
matrixB[numNodes+i] = cmath.rect(circuitComponents[IVS][i].value, circuitComponents[IVS][i].phase*PI/180)
# Current Source Equations
for i in circuitComponents[ICS]:
if i.node1 != 'GND':
matrixB[nodeNumbers[i.node1]] = -1*i.value
if i.node2 != 'GND':
matrixB[nodeNumbers[i.node2]] = i.value
# VCVS Equations
for i in range(len(circuitComponents[VCVS])):
# Equation accounting for current through the source
if circuitComponents[VCVS][i].node1 != 'GND':
matrixM[nodeNumbers[circuitComponents[VCVS][i].node1]][numNodes+len(circuitComponents[IVS])+i] = 1.0
if circuitComponents[VCVS][i].node2 != 'GND':
matrixM[nodeNumbers[circuitComponents[VCVS][i].node2]][numNodes+len(circuitComponents[IVS])+i] = -1.0
matrixM[numNodes+len(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node1]] = 1.0
matrixM[numNodes+len(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node2]] = -1.0
matrixM[numNodes+len(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node3]] = -1.0*circuitComponents[VCVS][i].value
matrixM[numNodes+len(circuitComponents[IVS])+i][nodeNumbers[circuitComponents[VCVS][i].node4]] = 1.0*circuitComponents[VCVS][i].value
# CCVS Equations
for i in range(len(circuitComponents[CCVS])):
# Equation accounting for current through the source
if circuitComponents[VCVS][i].node1 != 'GND':
matrixM[nodeNumbers[circuitComponents[CCVS][i].node1]][numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i] = 1.0
if circuitComponents[VCVS][i].node2 != 'GND':
matrixM[nodeNumbers[circuitComponents[VCVS][i].node2]][numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i] = -1.0
matrixM[numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i][nodeNumbers[circuitComponents[CCVS][i].node1]] = 1.0
matrixM[numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i][nodeNumbers[circuitComponents[CCVS][i].node2]] = -1.0
matrixM[numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i][numNodes+len(circuitComponents[IVS])+len(circuitComponents[VCVS])+i] = -1.0*circuitComponents[CCVS][i].value
# VCCS Equations
for vccs in circuitComponents[VCCS]:
if vccs.node1 != 'GND':
matrixM[nodeNumbers[vccs.node1]][nodeNumbers[vccs.node4]]+=vccs.value
matrixM[nodeNumbers[vccs.node1]][nodeNumbers[vccs.node3]]-=vccs.value
if vccs.node2 != 'GND':
matrixM[nodeNumbers[vccs.node2]][nodeNumbers[vccs.node4]]-=vccs.value
matrixM[nodeNumbers[vccs.node3]][nodeNumbers[vccs.node3]]+=vccs.value
# CCCS Equations
for cccs in circuitComponents[CCCS]:
def getIndexIVS(vName):
for i in range(len(circuitComponents[IVS])):
if circuitComponents[IVS][i].name == vName:
return i
if cccs.node1 != 'GND':
matrixM[nodeNumbers[cccs.node1]][numNodes+getIndexIVS(cccs.vSource)]-=cccs.value
if cccs.node2 != 'GND':
matrixM[nodeNumbers[cccs.node2]][numNodes+getIndexIVS(cccs.vSource)]+=cccs.value
try:
x = np.linalg.solve(matrixM, matrixB)
circuitCurrents = []
# Formatting Output Data
for v in circuitComponents[IVS]:
circuitCurrents.append("current in "+v.name)
for v in circuitComponents[VCVS]:
circuitCurrents.append("current in "+v.name)
for v in circuitComponents[CCVS]:
circuitCurrents.append("current in "+v.name)
# Printing output in table format
print(pd.DataFrame(x, circuitNodes+circuitCurrents, columns=['Voltage / Current']))
print("The values given above are AMPLITUDE values and NOT RMS values.")
except np.linalg.LinAlgError:
sys.exit("Singular Matrix Formed! Please check if you have entered the circuit definition correctly!")
except ValueError:
sys.exit("Netlist does not abide to given format!")
except FileNotFoundError:
sys.exit("Given file does not exist!")
| [
"numpy.linalg.solve",
"cmath.rect",
"numpy.zeros",
"sys.exit",
"pandas.DataFrame"
] | [((3524, 3564), 'sys.exit', 'sys.exit', (['"""Invalid number of arguments!"""'], {}), "('Invalid number of arguments!')\n", (3532, 3564), False, 'import sys\n'), ((17150, 17188), 'sys.exit', 'sys.exit', (['"""Given file does not exist!"""'], {}), "('Given file does not exist!')\n", (17158, 17188), False, 'import sys\n'), ((9213, 9271), 'numpy.zeros', 'np.zeros', (['(numNodes + numVS, numNodes + numVS)', 'np.complex'], {}), '((numNodes + numVS, numNodes + numVS), np.complex)\n', (9221, 9271), True, 'import numpy as np\n'), ((9298, 9339), 'numpy.zeros', 'np.zeros', (['(numNodes + numVS,)', 'np.complex'], {}), '((numNodes + numVS,), np.complex)\n', (9306, 9339), True, 'import numpy as np\n'), ((12036, 12127), 'cmath.rect', 'cmath.rect', (['circuitComponents[IVS][i].value', '(circuitComponents[IVS][i].phase * PI / 180)'], {}), '(circuitComponents[IVS][i].value, circuitComponents[IVS][i].phase *\n PI / 180)\n', (12046, 12127), False, 'import cmath\n'), ((16037, 16070), 'numpy.linalg.solve', 'np.linalg.solve', (['matrixM', 'matrixB'], {}), '(matrixM, matrixB)\n', (16052, 16070), True, 'import numpy as np\n'), ((17052, 17103), 'sys.exit', 'sys.exit', (['"""Netlist does not abide to given format!"""'], {}), "('Netlist does not abide to given format!')\n", (17060, 17103), False, 'import sys\n'), ((8695, 8748), 'sys.exit', 'sys.exit', (['"""No ground node specified in the circuit!!"""'], {}), "('No ground node specified in the circuit!!')\n", (8703, 8748), False, 'import sys\n'), ((16645, 16723), 'pandas.DataFrame', 'pd.DataFrame', (['x', '(circuitNodes + circuitCurrents)'], {'columns': "['Voltage / Current']"}), "(x, circuitNodes + circuitCurrents, columns=['Voltage / Current'])\n", (16657, 16723), True, 'import pandas as pd\n'), ((16894, 17006), 'sys.exit', 'sys.exit', (['"""Singular Matrix Formed! Please check if you have entered the circuit definition correctly!"""'], {}), "(\n 'Singular Matrix Formed! Please check if you have entered the circuit definition correctly!'\n )\n", (16902, 17006), False, 'import sys\n'), ((3250, 3426), 'sys.exit', 'sys.exit', (['"""Please check the component values given. Supported engineer units are: M, k, m, u, n\nYou can also enter values in exponential format (eg. 1e3 = 1000)."""'], {}), '(\n """Please check the component values given. Supported engineer units are: M, k, m, u, n\nYou can also enter values in exponential format (eg. 1e3 = 1000)."""\n )\n', (3258, 3426), False, 'import sys\n'), ((6531, 6581), 'sys.exit', 'sys.exit', (['"""Frequency of AC Source not specified!!"""'], {}), "('Frequency of AC Source not specified!!')\n", (6539, 6581), False, 'import sys\n'), ((7208, 7258), 'sys.exit', 'sys.exit', (['"""Frequency of AC Source not specified!!"""'], {}), "('Frequency of AC Source not specified!!')\n", (7216, 7258), False, 'import sys\n'), ((8463, 8504), 'sys.exit', 'sys.exit', (['"""Wrong Component Given. ABORT!"""'], {}), "('Wrong Component Given. ABORT!')\n", (8471, 8504), False, 'import sys\n')] |
import logging
logger = logging.getLogger(__name__)
print(f"!!!!!!!!!! getEffectiveLevel: {logger.getEffectiveLevel()} !!!!!!!!!!!!!")
from dltb.base.observer import Observable, change
from network import Network, loader
from network.lucid import Network as LucidNetwork
# lucid.modelzoo.vision_models:
# A module providinge the pretrained networks by name, e.g.
# models.AlexNet
import lucid.modelzoo.vision_models as models
import lucid.modelzoo.nets_factory as nets
from lucid.modelzoo.vision_base import Model as LucidModel
import lucid.optvis.objectives as objectives
import lucid.optvis.param as param
import lucid.optvis.render as render
import lucid.optvis.transform as transform
class Engine(Observable, method='engine_changed',
changes={'engine_changed', 'model_changed', 'unit_changed'}):
"""The Engine is a wrapper around the lucid module.
Attributes
----------
_network: LucidNetwork
The currently selected lucid network. None if no model
is selected.
_model: LucidModel
The currently selected lucid model. None if no model is
selected.
"""
def __init__(self):
super().__init__()
self._network = None
self._model = None
self._layer = None
self._unit = None
self.image = None
self.running = False
@property
def model(self) -> LucidModel:
"""The currently selected lucid model. None if no model is
selected.
"""
return self._model
@property
def model_name(self) -> str:
"""The name of the currently selected lucid model. None if
no model is selected.
"""
return None if self._network is None else self._network.name
@change
def load_model(self, name: str) -> LucidModel:
"""Load the Lucid model with the given name.
Returns
-------
model: LucidModel
A reference to the LucidModel.
"""
logger.info(f"load_model({name})")
try:
#self._network = LucidNetwork(name=name)
self._network = loader.load_lucid(name)
self._model = self._network.model
except KeyError as e:
self._network = None
self._model = None
logger.info(f"NAME={name}/{self.model_name} : {self._model}")
self._layer = None
self._unit = None
self.change(model_changed=True, unit_changed=True)
return self._model
@change
def set_layer(self, name: str, unit: int=0) -> None:
"""Set the currently selected layer.
Arguments
---------
name: str
The name of the layer.
unit: int
The index of the unit in the layer.
"""
if name == self.layer:
return
if self._model is None:
return
try:
self._layer = next(x for x in self._model.layers
if x['name'] == name)
self._unit = unit
except StopIteration: # name not in layer list
self._layer = None
self._unit = None
self.change(unit_changed=True)
@property
def layer(self) -> str:
"""The name of the currently selected layer.
"""
return None if self._layer is None else self._layer['name']
@layer.setter
def layer(self, name: str) -> None:
"""Set the currently selected layer.
"""
self.set_layer(name)
@property
def layer_type(self) -> str:
"""The type of the currently selected layer.
"""
return None if self._layer is None else self._layer['type']
@property
def layer_units(self) -> int:
"""The number of units in the currently selected layer.
"""
return None if self._layer is None else self._layer['size']
@change
def _set_unit(self, unit: int) -> None:
if unit == self.unit:
return
if unit is None:
self._unit = None
self.change(unit_changed=True)
elif self._layer is None:
raise ValueError('Setting unit failed as no layer is selected')
elif not 0 <= unit < self._layer['size']:
raise ValueError(f"Invalid unit {unit} for current layer"
f" of size {self._layer['size']}")
else:
self._unit = unit
self.change(unit_changed=True)
@property
def unit(self) -> int:
"""The index of the currently selected unit or None if no
unit is selected.
"""
return None if self._unit is None else self._unit
@unit.setter
def unit(self, unit: int) -> None:
"""The index of the currently selected unit or None if no
unit is selected.
"""
self._set_unit(unit)
@property
def layer_id(self) -> str:
"""The id of the currently selected layer or None if no
unit is selected.
"""
if self._layer is None:
return None
if self._layer['type'] == 'conv':
return self._layer['name'] + '_pre_relu'
return self._layer['name']
@property
def unit_id(self) -> str:
"""The id of the currently selected unit or None if no
unit is selected.
"""
return (None if self._layer is None
else self.layer_id + ':' + str(self._unit))
def _doRun(self, running: bool=True) -> None:
self.running = running
self.notify_observers(EngineChange(engine_changed=True))
def start(self):
self.image = None
self._doRun(True)
obj = objectives.channel(self.layer_id, self.unit)
self.image = render.render_vis(self.model, obj)
#self.image = render.render_vis(self.model, self.unit_id)
self._doRun(False)
def stop(self):
self._doRun(False)
def start_multi(self):
self.image = None
self._doRun(True)
logger.info("!!! running all:")
for unit in range(self.layer_units):
self.unit = unit
self.notify_observers(EngineChange(unit_changed=True))
logger.info(f"!!! running unit {unit}")
obj = objectives.channel(self.layer_id, unit)
self.image = render.render_vis(self.model, obj)
if not self.running:
break
self._doRun(True)
self._doRun(False)
# FIXME[old]: this is too make old code happy. New code should use
# Engine.Change and Engine.Observer directly.
EngineChange = Engine.Change
EngineObserver = Engine.Observer
| [
"logging.getLogger",
"lucid.optvis.objectives.channel",
"network.loader.load_lucid",
"lucid.optvis.render.render_vis"
] | [((24, 51), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (41, 51), False, 'import logging\n'), ((5696, 5740), 'lucid.optvis.objectives.channel', 'objectives.channel', (['self.layer_id', 'self.unit'], {}), '(self.layer_id, self.unit)\n', (5714, 5740), True, 'import lucid.optvis.objectives as objectives\n'), ((5762, 5796), 'lucid.optvis.render.render_vis', 'render.render_vis', (['self.model', 'obj'], {}), '(self.model, obj)\n', (5779, 5796), True, 'import lucid.optvis.render as render\n'), ((2132, 2155), 'network.loader.load_lucid', 'loader.load_lucid', (['name'], {}), '(name)\n', (2149, 2155), False, 'from network import Network, loader\n'), ((6271, 6310), 'lucid.optvis.objectives.channel', 'objectives.channel', (['self.layer_id', 'unit'], {}), '(self.layer_id, unit)\n', (6289, 6310), True, 'import lucid.optvis.objectives as objectives\n'), ((6336, 6370), 'lucid.optvis.render.render_vis', 'render.render_vis', (['self.model', 'obj'], {}), '(self.model, obj)\n', (6353, 6370), True, 'import lucid.optvis.render as render\n')] |
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2018-2019 New Vector Ltd
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
from collections import Counter as c_counter, OrderedDict, deque, namedtuple
from functools import wraps
from six import iteritems, text_type
from six.moves import range
from canonicaljson import json
from prometheus_client import Counter, Histogram
from twisted.internet import defer
import synapse.metrics
from synapse.api.constants import EventTypes
from synapse.api.errors import SynapseError
from synapse.events import EventBase # noqa: F401
from synapse.events.snapshot import EventContext # noqa: F401
from synapse.metrics import BucketCollector
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.state import StateResolutionStore
from synapse.storage.background_updates import BackgroundUpdateStore
from synapse.storage.event_federation import EventFederationStore
from synapse.storage.events_worker import EventsWorkerStore
from synapse.storage.state import StateGroupWorkerStore
from synapse.types import RoomStreamToken, get_domain_from_id
from synapse.util import batch_iter
from synapse.util.async_helpers import ObservableDeferred
from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
from synapse.util.frozenutils import frozendict_json_encoder
from synapse.util.logcontext import PreserveLoggingContext, make_deferred_yieldable
from synapse.util.logutils import log_function
from synapse.util.metrics import Measure
logger = logging.getLogger(__name__)
persist_event_counter = Counter("synapse_storage_events_persisted_events", "")
event_counter = Counter(
"synapse_storage_events_persisted_events_sep",
"",
["type", "origin_type", "origin_entity"],
)
# The number of times we are recalculating the current state
state_delta_counter = Counter("synapse_storage_events_state_delta", "")
# The number of times we are recalculating state when there is only a
# single forward extremity
state_delta_single_event_counter = Counter(
"synapse_storage_events_state_delta_single_event", ""
)
# The number of times we are reculating state when we could have resonably
# calculated the delta when we calculated the state for an event we were
# persisting.
state_delta_reuse_delta_counter = Counter(
"synapse_storage_events_state_delta_reuse_delta", ""
)
# The number of forward extremities for each new event.
forward_extremities_counter = Histogram(
"synapse_storage_events_forward_extremities_persisted",
"Number of forward extremities for each new event",
buckets=(1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
)
# The number of stale forward extremities for each new event. Stale extremities
# are those that were in the previous set of extremities as well as the new.
stale_forward_extremities_counter = Histogram(
"synapse_storage_events_stale_forward_extremities_persisted",
"Number of unchanged forward extremities for each new event",
buckets=(0, 1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
)
def encode_json(json_object):
"""
Encode a Python object as JSON and return it in a Unicode string.
"""
out = frozendict_json_encoder.encode(json_object)
if isinstance(out, bytes):
out = out.decode("utf8")
return out
class _EventPeristenceQueue(object):
"""Queues up events so that they can be persisted in bulk with only one
concurrent transaction per room.
"""
_EventPersistQueueItem = namedtuple(
"_EventPersistQueueItem", ("events_and_contexts", "backfilled", "deferred")
)
def __init__(self):
self._event_persist_queues = {}
self._currently_persisting_rooms = set()
def add_to_queue(self, room_id, events_and_contexts, backfilled):
"""Add events to the queue, with the given persist_event options.
NB: due to the normal usage pattern of this method, it does *not*
follow the synapse logcontext rules, and leaves the logcontext in
place whether or not the returned deferred is ready.
Args:
room_id (str):
events_and_contexts (list[(EventBase, EventContext)]):
backfilled (bool):
Returns:
defer.Deferred: a deferred which will resolve once the events are
persisted. Runs its callbacks *without* a logcontext.
"""
queue = self._event_persist_queues.setdefault(room_id, deque())
if queue:
# if the last item in the queue has the same `backfilled` setting,
# we can just add these new events to that item.
end_item = queue[-1]
if end_item.backfilled == backfilled:
end_item.events_and_contexts.extend(events_and_contexts)
return end_item.deferred.observe()
deferred = ObservableDeferred(defer.Deferred(), consumeErrors=True)
queue.append(
self._EventPersistQueueItem(
events_and_contexts=events_and_contexts,
backfilled=backfilled,
deferred=deferred,
)
)
return deferred.observe()
def handle_queue(self, room_id, per_item_callback):
"""Attempts to handle the queue for a room if not already being handled.
The given callback will be invoked with for each item in the queue,
of type _EventPersistQueueItem. The per_item_callback will continuously
be called with new items, unless the queue becomnes empty. The return
value of the function will be given to the deferreds waiting on the item,
exceptions will be passed to the deferreds as well.
This function should therefore be called whenever anything is added
to the queue.
If another callback is currently handling the queue then it will not be
invoked.
"""
if room_id in self._currently_persisting_rooms:
return
self._currently_persisting_rooms.add(room_id)
@defer.inlineCallbacks
def handle_queue_loop():
try:
queue = self._get_drainining_queue(room_id)
for item in queue:
try:
ret = yield per_item_callback(item)
except Exception:
with PreserveLoggingContext():
item.deferred.errback()
else:
with PreserveLoggingContext():
item.deferred.callback(ret)
finally:
queue = self._event_persist_queues.pop(room_id, None)
if queue:
self._event_persist_queues[room_id] = queue
self._currently_persisting_rooms.discard(room_id)
# set handle_queue_loop off in the background
run_as_background_process("persist_events", handle_queue_loop)
def _get_drainining_queue(self, room_id):
queue = self._event_persist_queues.setdefault(room_id, deque())
try:
while True:
yield queue.popleft()
except IndexError:
# Queue has been drained.
pass
_EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event"))
def _retry_on_integrity_error(func):
"""Wraps a database function so that it gets retried on IntegrityError,
with `delete_existing=True` passed in.
Args:
func: function that returns a Deferred and accepts a `delete_existing` arg
"""
@wraps(func)
@defer.inlineCallbacks
def f(self, *args, **kwargs):
try:
res = yield func(self, *args, **kwargs)
except self.database_engine.module.IntegrityError:
logger.exception("IntegrityError, retrying.")
res = yield func(self, *args, delete_existing=True, **kwargs)
defer.returnValue(res)
return f
# inherits from EventFederationStore so that we can call _update_backward_extremities
# and _handle_mult_prev_events (though arguably those could both be moved in here)
class EventsStore(
StateGroupWorkerStore,
EventFederationStore,
EventsWorkerStore,
BackgroundUpdateStore,
):
def __init__(self, db_conn, hs):
super(EventsStore, self).__init__(db_conn, hs)
self._event_persist_queue = _EventPeristenceQueue()
self._state_resolution_handler = hs.get_state_resolution_handler()
# Collect metrics on the number of forward extremities that exist.
# Counter of number of extremities to count
self._current_forward_extremities_amount = c_counter()
BucketCollector(
"synapse_forward_extremities",
lambda: self._current_forward_extremities_amount,
buckets=[1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"],
)
# Read the extrems every 60 minutes
def read_forward_extremities():
# run as a background process to make sure that the database transactions
# have a logcontext to report to
return run_as_background_process(
"read_forward_extremities", self._read_forward_extremities
)
hs.get_clock().looping_call(read_forward_extremities, 60 * 60 * 1000)
@defer.inlineCallbacks
def _read_forward_extremities(self):
def fetch(txn):
txn.execute(
"""
select count(*) c from event_forward_extremities
group by room_id
"""
)
return txn.fetchall()
res = yield self.runInteraction("read_forward_extremities", fetch)
self._current_forward_extremities_amount = c_counter(list(x[0] for x in res))
@defer.inlineCallbacks
def persist_events(self, events_and_contexts, backfilled=False):
"""
Write events to the database
Args:
events_and_contexts: list of tuples of (event, context)
backfilled (bool): Whether the results are retrieved from federation
via backfill or not. Used to determine if they're "new" events
which might update the current state etc.
Returns:
Deferred[int]: the stream ordering of the latest persisted event
"""
partitioned = {}
for event, ctx in events_and_contexts:
partitioned.setdefault(event.room_id, []).append((event, ctx))
deferreds = []
for room_id, evs_ctxs in iteritems(partitioned):
d = self._event_persist_queue.add_to_queue(
room_id, evs_ctxs, backfilled=backfilled
)
deferreds.append(d)
for room_id in partitioned:
self._maybe_start_persisting(room_id)
yield make_deferred_yieldable(
defer.gatherResults(deferreds, consumeErrors=True)
)
max_persisted_id = yield self._stream_id_gen.get_current_token()
defer.returnValue(max_persisted_id)
@defer.inlineCallbacks
@log_function
def persist_event(self, event, context, backfilled=False):
"""
Args:
event (EventBase):
context (EventContext):
backfilled (bool):
Returns:
Deferred: resolves to (int, int): the stream ordering of ``event``,
and the stream ordering of the latest persisted event
"""
deferred = self._event_persist_queue.add_to_queue(
event.room_id, [(event, context)], backfilled=backfilled
)
self._maybe_start_persisting(event.room_id)
yield make_deferred_yieldable(deferred)
max_persisted_id = yield self._stream_id_gen.get_current_token()
defer.returnValue((event.internal_metadata.stream_ordering, max_persisted_id))
def _maybe_start_persisting(self, room_id):
@defer.inlineCallbacks
def persisting_queue(item):
with Measure(self._clock, "persist_events"):
yield self._persist_events(
item.events_and_contexts, backfilled=item.backfilled
)
self._event_persist_queue.handle_queue(room_id, persisting_queue)
@_retry_on_integrity_error
@defer.inlineCallbacks
def _persist_events(
self, events_and_contexts, backfilled=False, delete_existing=False
):
"""Persist events to db
Args:
events_and_contexts (list[(EventBase, EventContext)]):
backfilled (bool):
delete_existing (bool):
Returns:
Deferred: resolves when the events have been persisted
"""
if not events_and_contexts:
return
if backfilled:
stream_ordering_manager = self._backfill_id_gen.get_next_mult(
len(events_and_contexts)
)
else:
stream_ordering_manager = self._stream_id_gen.get_next_mult(
len(events_and_contexts)
)
with stream_ordering_manager as stream_orderings:
for (event, context), stream in zip(events_and_contexts, stream_orderings):
event.internal_metadata.stream_ordering = stream
chunks = [
events_and_contexts[x : x + 100]
for x in range(0, len(events_and_contexts), 100)
]
for chunk in chunks:
# We can't easily parallelize these since different chunks
# might contain the same event. :(
# NB: Assumes that we are only persisting events for one room
# at a time.
# map room_id->list[event_ids] giving the new forward
# extremities in each room
new_forward_extremeties = {}
# map room_id->(type,state_key)->event_id tracking the full
# state in each room after adding these events.
# This is simply used to prefill the get_current_state_ids
# cache
current_state_for_room = {}
# map room_id->(to_delete, to_insert) where to_delete is a list
# of type/state keys to remove from current state, and to_insert
# is a map (type,key)->event_id giving the state delta in each
# room
state_delta_for_room = {}
if not backfilled:
with Measure(self._clock, "_calculate_state_and_extrem"):
# Work out the new "current state" for each room.
# We do this by working out what the new extremities are and then
# calculating the state from that.
events_by_room = {}
for event, context in chunk:
events_by_room.setdefault(event.room_id, []).append(
(event, context)
)
for room_id, ev_ctx_rm in iteritems(events_by_room):
latest_event_ids = yield self.get_latest_event_ids_in_room(
room_id
)
new_latest_event_ids = yield self._calculate_new_extremities(
room_id, ev_ctx_rm, latest_event_ids
)
latest_event_ids = set(latest_event_ids)
if new_latest_event_ids == latest_event_ids:
# No change in extremities, so no change in state
continue
# there should always be at least one forward extremity.
# (except during the initial persistence of the send_join
# results, in which case there will be no existing
# extremities, so we'll `continue` above and skip this bit.)
assert new_latest_event_ids, "No forward extremities left!"
new_forward_extremeties[room_id] = new_latest_event_ids
len_1 = (
len(latest_event_ids) == 1
and len(new_latest_event_ids) == 1
)
if len_1:
all_single_prev_not_state = all(
len(event.prev_event_ids()) == 1
and not event.is_state()
for event, ctx in ev_ctx_rm
)
# Don't bother calculating state if they're just
# a long chain of single ancestor non-state events.
if all_single_prev_not_state:
continue
state_delta_counter.inc()
if len(new_latest_event_ids) == 1:
state_delta_single_event_counter.inc()
# This is a fairly handwavey check to see if we could
# have guessed what the delta would have been when
# processing one of these events.
# What we're interested in is if the latest extremities
# were the same when we created the event as they are
# now. When this server creates a new event (as opposed
# to receiving it over federation) it will use the
# forward extremities as the prev_events, so we can
# guess this by looking at the prev_events and checking
# if they match the current forward extremities.
for ev, _ in ev_ctx_rm:
prev_event_ids = set(ev.prev_event_ids())
if latest_event_ids == prev_event_ids:
state_delta_reuse_delta_counter.inc()
break
logger.info("Calculating state delta for room %s", room_id)
with Measure(
self._clock, "persist_events.get_new_state_after_events"
):
res = yield self._get_new_state_after_events(
room_id,
ev_ctx_rm,
latest_event_ids,
new_latest_event_ids,
)
current_state, delta_ids = res
# If either are not None then there has been a change,
# and we need to work out the delta (or use that
# given)
if delta_ids is not None:
# If there is a delta we know that we've
# only added or replaced state, never
# removed keys entirely.
state_delta_for_room[room_id] = ([], delta_ids)
elif current_state is not None:
with Measure(
self._clock, "persist_events.calculate_state_delta"
):
delta = yield self._calculate_state_delta(
room_id, current_state
)
state_delta_for_room[room_id] = delta
# If we have the current_state then lets prefill
# the cache with it.
if current_state is not None:
current_state_for_room[room_id] = current_state
yield self.runInteraction(
"persist_events",
self._persist_events_txn,
events_and_contexts=chunk,
backfilled=backfilled,
delete_existing=delete_existing,
state_delta_for_room=state_delta_for_room,
new_forward_extremeties=new_forward_extremeties,
)
persist_event_counter.inc(len(chunk))
if not backfilled:
# backfilled events have negative stream orderings, so we don't
# want to set the event_persisted_position to that.
synapse.metrics.event_persisted_position.set(
chunk[-1][0].internal_metadata.stream_ordering
)
for event, context in chunk:
if context.app_service:
origin_type = "local"
origin_entity = context.app_service.id
elif self.hs.is_mine_id(event.sender):
origin_type = "local"
origin_entity = "*client*"
else:
origin_type = "remote"
origin_entity = get_domain_from_id(event.sender)
event_counter.labels(event.type, origin_type, origin_entity).inc()
for room_id, new_state in iteritems(current_state_for_room):
self.get_current_state_ids.prefill((room_id,), new_state)
for room_id, latest_event_ids in iteritems(new_forward_extremeties):
self.get_latest_event_ids_in_room.prefill(
(room_id,), list(latest_event_ids)
)
@defer.inlineCallbacks
def _calculate_new_extremities(self, room_id, event_contexts, latest_event_ids):
"""Calculates the new forward extremities for a room given events to
persist.
Assumes that we are only persisting events for one room at a time.
"""
# we're only interested in new events which aren't outliers and which aren't
# being rejected.
new_events = [
event
for event, ctx in event_contexts
if not event.internal_metadata.is_outlier()
and not ctx.rejected
and not event.internal_metadata.is_soft_failed()
]
latest_event_ids = set(latest_event_ids)
# start with the existing forward extremities
result = set(latest_event_ids)
# add all the new events to the list
result.update(event.event_id for event in new_events)
# Now remove all events which are prev_events of any of the new events
result.difference_update(
e_id for event in new_events for e_id in event.prev_event_ids()
)
# Remove any events which are prev_events of any existing events.
existing_prevs = yield self._get_events_which_are_prevs(result)
result.difference_update(existing_prevs)
# Finally handle the case where the new events have soft-failed prev
# events. If they do we need to remove them and their prev events,
# otherwise we end up with dangling extremities.
existing_prevs = yield self._get_prevs_before_rejected(
e_id for event in new_events for e_id in event.prev_event_ids()
)
result.difference_update(existing_prevs)
# We only update metrics for events that change forward extremities
# (e.g. we ignore backfill/outliers/etc)
if result != latest_event_ids:
forward_extremities_counter.observe(len(result))
stale = latest_event_ids & result
stale_forward_extremities_counter.observe(len(stale))
defer.returnValue(result)
@defer.inlineCallbacks
def _get_events_which_are_prevs(self, event_ids):
"""Filter the supplied list of event_ids to get those which are prev_events of
existing (non-outlier/rejected) events.
Args:
event_ids (Iterable[str]): event ids to filter
Returns:
Deferred[List[str]]: filtered event ids
"""
results = []
def _get_events_which_are_prevs_txn(txn, batch):
sql = """
SELECT prev_event_id, internal_metadata
FROM event_edges
INNER JOIN events USING (event_id)
LEFT JOIN rejections USING (event_id)
LEFT JOIN event_json USING (event_id)
WHERE
prev_event_id IN (%s)
AND NOT events.outlier
AND rejections.event_id IS NULL
""" % (
",".join("?" for _ in batch),
)
txn.execute(sql, batch)
results.extend(r[0] for r in txn if not json.loads(r[1]).get("soft_failed"))
for chunk in batch_iter(event_ids, 100):
yield self.runInteraction(
"_get_events_which_are_prevs", _get_events_which_are_prevs_txn, chunk
)
defer.returnValue(results)
@defer.inlineCallbacks
def _get_prevs_before_rejected(self, event_ids):
"""Get soft-failed ancestors to remove from the extremities.
Given a set of events, find all those that have been soft-failed or
rejected. Returns those soft failed/rejected events and their prev
events (whether soft-failed/rejected or not), and recurses up the
prev-event graph until it finds no more soft-failed/rejected events.
This is used to find extremities that are ancestors of new events, but
are separated by soft failed events.
Args:
event_ids (Iterable[str]): Events to find prev events for. Note
that these must have already been persisted.
Returns:
Deferred[set[str]]
"""
# The set of event_ids to return. This includes all soft-failed events
# and their prev events.
existing_prevs = set()
def _get_prevs_before_rejected_txn(txn, batch):
to_recursively_check = batch
while to_recursively_check:
sql = """
SELECT
event_id, prev_event_id, internal_metadata,
rejections.event_id IS NOT NULL
FROM event_edges
INNER JOIN events USING (event_id)
LEFT JOIN rejections USING (event_id)
LEFT JOIN event_json USING (event_id)
WHERE
event_id IN (%s)
AND NOT events.outlier
""" % (
",".join("?" for _ in to_recursively_check),
)
txn.execute(sql, to_recursively_check)
to_recursively_check = []
for event_id, prev_event_id, metadata, rejected in txn:
if prev_event_id in existing_prevs:
continue
soft_failed = json.loads(metadata).get("soft_failed")
if soft_failed or rejected:
to_recursively_check.append(prev_event_id)
existing_prevs.add(prev_event_id)
for chunk in batch_iter(event_ids, 100):
yield self.runInteraction(
"_get_prevs_before_rejected", _get_prevs_before_rejected_txn, chunk
)
defer.returnValue(existing_prevs)
@defer.inlineCallbacks
def _get_new_state_after_events(
self, room_id, events_context, old_latest_event_ids, new_latest_event_ids
):
"""Calculate the current state dict after adding some new events to
a room
Args:
room_id (str):
room to which the events are being added. Used for logging etc
events_context (list[(EventBase, EventContext)]):
events and contexts which are being added to the room
old_latest_event_ids (iterable[str]):
the old forward extremities for the room.
new_latest_event_ids (iterable[str]):
the new forward extremities for the room.
Returns:
Deferred[tuple[dict[(str,str), str]|None, dict[(str,str), str]|None]]:
Returns a tuple of two state maps, the first being the full new current
state and the second being the delta to the existing current state.
If both are None then there has been no change.
If there has been a change then we only return the delta if its
already been calculated. Conversely if we do know the delta then
the new current state is only returned if we've already calculated
it.
"""
# map from state_group to ((type, key) -> event_id) state map
state_groups_map = {}
# Map from (prev state group, new state group) -> delta state dict
state_group_deltas = {}
for ev, ctx in events_context:
if ctx.state_group is None:
# This should only happen for outlier events.
if not ev.internal_metadata.is_outlier():
raise Exception(
"Context for new event %s has no state "
"group" % (ev.event_id,)
)
continue
if ctx.state_group in state_groups_map:
continue
# We're only interested in pulling out state that has already
# been cached in the context. We'll pull stuff out of the DB later
# if necessary.
current_state_ids = ctx.get_cached_current_state_ids()
if current_state_ids is not None:
state_groups_map[ctx.state_group] = current_state_ids
if ctx.prev_group:
state_group_deltas[(ctx.prev_group, ctx.state_group)] = ctx.delta_ids
# We need to map the event_ids to their state groups. First, let's
# check if the event is one we're persisting, in which case we can
# pull the state group from its context.
# Otherwise we need to pull the state group from the database.
# Set of events we need to fetch groups for. (We know none of the old
# extremities are going to be in events_context).
missing_event_ids = set(old_latest_event_ids)
event_id_to_state_group = {}
for event_id in new_latest_event_ids:
# First search in the list of new events we're adding.
for ev, ctx in events_context:
if event_id == ev.event_id and ctx.state_group is not None:
event_id_to_state_group[event_id] = ctx.state_group
break
else:
# If we couldn't find it, then we'll need to pull
# the state from the database
missing_event_ids.add(event_id)
if missing_event_ids:
# Now pull out the state groups for any missing events from DB
event_to_groups = yield self._get_state_group_for_events(missing_event_ids)
event_id_to_state_group.update(event_to_groups)
# State groups of old_latest_event_ids
old_state_groups = set(
event_id_to_state_group[evid] for evid in old_latest_event_ids
)
# State groups of new_latest_event_ids
new_state_groups = set(
event_id_to_state_group[evid] for evid in new_latest_event_ids
)
# If they old and new groups are the same then we don't need to do
# anything.
if old_state_groups == new_state_groups:
defer.returnValue((None, None))
if len(new_state_groups) == 1 and len(old_state_groups) == 1:
# If we're going from one state group to another, lets check if
# we have a delta for that transition. If we do then we can just
# return that.
new_state_group = next(iter(new_state_groups))
old_state_group = next(iter(old_state_groups))
delta_ids = state_group_deltas.get((old_state_group, new_state_group), None)
if delta_ids is not None:
# We have a delta from the existing to new current state,
# so lets just return that. If we happen to already have
# the current state in memory then lets also return that,
# but it doesn't matter if we don't.
new_state = state_groups_map.get(new_state_group)
defer.returnValue((new_state, delta_ids))
# Now that we have calculated new_state_groups we need to get
# their state IDs so we can resolve to a single state set.
missing_state = new_state_groups - set(state_groups_map)
if missing_state:
group_to_state = yield self._get_state_for_groups(missing_state)
state_groups_map.update(group_to_state)
if len(new_state_groups) == 1:
# If there is only one state group, then we know what the current
# state is.
defer.returnValue((state_groups_map[new_state_groups.pop()], None))
# Ok, we need to defer to the state handler to resolve our state sets.
state_groups = {sg: state_groups_map[sg] for sg in new_state_groups}
events_map = {ev.event_id: ev for ev, _ in events_context}
# We need to get the room version, which is in the create event.
# Normally that'd be in the database, but its also possible that we're
# currently trying to persist it.
room_version = None
for ev, _ in events_context:
if ev.type == EventTypes.Create and ev.state_key == "":
room_version = ev.content.get("room_version", "1")
break
if not room_version:
room_version = yield self.get_room_version(room_id)
logger.debug("calling resolve_state_groups from preserve_events")
res = yield self._state_resolution_handler.resolve_state_groups(
room_id,
room_version,
state_groups,
events_map,
state_res_store=StateResolutionStore(self),
)
defer.returnValue((res.state, None))
@defer.inlineCallbacks
def _calculate_state_delta(self, room_id, current_state):
"""Calculate the new state deltas for a room.
Assumes that we are only persisting events for one room at a time.
Returns:
tuple[list, dict] (to_delete, to_insert): where to_delete are the
type/state_keys to remove from current_state_events and `to_insert`
are the updates to current_state_events.
"""
existing_state = yield self.get_current_state_ids(room_id)
to_delete = [key for key in existing_state if key not in current_state]
to_insert = {
key: ev_id
for key, ev_id in iteritems(current_state)
if ev_id != existing_state.get(key)
}
defer.returnValue((to_delete, to_insert))
@log_function
def _persist_events_txn(
self,
txn,
events_and_contexts,
backfilled,
delete_existing=False,
state_delta_for_room={},
new_forward_extremeties={},
):
"""Insert some number of room events into the necessary database tables.
Rejected events are only inserted into the events table, the events_json table,
and the rejections table. Things reading from those table will need to check
whether the event was rejected.
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]):
events to persist
backfilled (bool): True if the events were backfilled
delete_existing (bool): True to purge existing table rows for the
events from the database. This is useful when retrying due to
IntegrityError.
state_delta_for_room (dict[str, (list, dict)]):
The current-state delta for each room. For each room, a tuple
(to_delete, to_insert), being a list of type/state keys to be
removed from the current state, and a state set to be added to
the current state.
new_forward_extremeties (dict[str, list[str]]):
The new forward extremities for each room. For each room, a
list of the event ids which are the forward extremities.
"""
all_events_and_contexts = events_and_contexts
min_stream_order = events_and_contexts[0][0].internal_metadata.stream_ordering
max_stream_order = events_and_contexts[-1][0].internal_metadata.stream_ordering
self._update_current_state_txn(txn, state_delta_for_room, min_stream_order)
self._update_forward_extremities_txn(
txn,
new_forward_extremities=new_forward_extremeties,
max_stream_order=max_stream_order,
)
# Ensure that we don't have the same event twice.
events_and_contexts = self._filter_events_and_contexts_for_duplicates(
events_and_contexts
)
self._update_room_depths_txn(
txn, events_and_contexts=events_and_contexts, backfilled=backfilled
)
# _update_outliers_txn filters out any events which have already been
# persisted, and returns the filtered list.
events_and_contexts = self._update_outliers_txn(
txn, events_and_contexts=events_and_contexts
)
# From this point onwards the events are only events that we haven't
# seen before.
if delete_existing:
# For paranoia reasons, we go and delete all the existing entries
# for these events so we can reinsert them.
# This gets around any problems with some tables already having
# entries.
self._delete_existing_rows_txn(txn, events_and_contexts=events_and_contexts)
self._store_event_txn(txn, events_and_contexts=events_and_contexts)
# Insert into event_to_state_groups.
self._store_event_state_mappings_txn(txn, events_and_contexts)
# We want to store event_auth mappings for rejected events, as they're
# used in state res v2.
# This is only necessary if the rejected event appears in an accepted
# event's auth chain, but its easier for now just to store them (and
# it doesn't take much storage compared to storing the entire event
# anyway).
self._simple_insert_many_txn(
txn,
table="event_auth",
values=[
{
"event_id": event.event_id,
"room_id": event.room_id,
"auth_id": auth_id,
}
for event, _ in events_and_contexts
for auth_id in event.auth_event_ids()
if event.is_state()
],
)
# _store_rejected_events_txn filters out any events which were
# rejected, and returns the filtered list.
events_and_contexts = self._store_rejected_events_txn(
txn, events_and_contexts=events_and_contexts
)
# From this point onwards the events are only ones that weren't
# rejected.
self._update_metadata_tables_txn(
txn,
events_and_contexts=events_and_contexts,
all_events_and_contexts=all_events_and_contexts,
backfilled=backfilled,
)
def _update_current_state_txn(self, txn, state_delta_by_room, stream_id):
for room_id, current_state_tuple in iteritems(state_delta_by_room):
to_delete, to_insert = current_state_tuple
# First we add entries to the current_state_delta_stream. We
# do this before updating the current_state_events table so
# that we can use it to calculate the `prev_event_id`. (This
# allows us to not have to pull out the existing state
# unnecessarily).
#
# The stream_id for the update is chosen to be the minimum of the stream_ids
# for the batch of the events that we are persisting; that means we do not
# end up in a situation where workers see events before the
# current_state_delta updates.
#
sql = """
INSERT INTO current_state_delta_stream
(stream_id, room_id, type, state_key, event_id, prev_event_id)
SELECT ?, ?, ?, ?, ?, (
SELECT event_id FROM current_state_events
WHERE room_id = ? AND type = ? AND state_key = ?
)
"""
txn.executemany(
sql,
(
(
stream_id,
room_id,
etype,
state_key,
None,
room_id,
etype,
state_key,
)
for etype, state_key in to_delete
# We sanity check that we're deleting rather than updating
if (etype, state_key) not in to_insert
),
)
txn.executemany(
sql,
(
(
stream_id,
room_id,
etype,
state_key,
ev_id,
room_id,
etype,
state_key,
)
for (etype, state_key), ev_id in iteritems(to_insert)
),
)
# Now we actually update the current_state_events table
txn.executemany(
"DELETE FROM current_state_events"
" WHERE room_id = ? AND type = ? AND state_key = ?",
(
(room_id, etype, state_key)
for etype, state_key in itertools.chain(to_delete, to_insert)
),
)
self._simple_insert_many_txn(
txn,
table="current_state_events",
values=[
{
"event_id": ev_id,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
}
for key, ev_id in iteritems(to_insert)
],
)
txn.call_after(
self._curr_state_delta_stream_cache.entity_has_changed,
room_id,
stream_id,
)
# Invalidate the various caches
# Figure out the changes of membership to invalidate the
# `get_rooms_for_user` cache.
# We find out which membership events we may have deleted
# and which we have added, then we invlidate the caches for all
# those users.
members_changed = set(
state_key
for ev_type, state_key in itertools.chain(to_delete, to_insert)
if ev_type == EventTypes.Member
)
for member in members_changed:
txn.call_after(
self.get_rooms_for_user_with_stream_ordering.invalidate, (member,)
)
self._invalidate_state_caches_and_stream(txn, room_id, members_changed)
def _update_forward_extremities_txn(
self, txn, new_forward_extremities, max_stream_order
):
for room_id, new_extrem in iteritems(new_forward_extremities):
self._simple_delete_txn(
txn, table="event_forward_extremities", keyvalues={"room_id": room_id}
)
txn.call_after(self.get_latest_event_ids_in_room.invalidate, (room_id,))
self._simple_insert_many_txn(
txn,
table="event_forward_extremities",
values=[
{"event_id": ev_id, "room_id": room_id}
for room_id, new_extrem in iteritems(new_forward_extremities)
for ev_id in new_extrem
],
)
# We now insert into stream_ordering_to_exterm a mapping from room_id,
# new stream_ordering to new forward extremeties in the room.
# This allows us to later efficiently look up the forward extremeties
# for a room before a given stream_ordering
self._simple_insert_many_txn(
txn,
table="stream_ordering_to_exterm",
values=[
{
"room_id": room_id,
"event_id": event_id,
"stream_ordering": max_stream_order,
}
for room_id, new_extrem in iteritems(new_forward_extremities)
for event_id in new_extrem
],
)
@classmethod
def _filter_events_and_contexts_for_duplicates(cls, events_and_contexts):
"""Ensure that we don't have the same event twice.
Pick the earliest non-outlier if there is one, else the earliest one.
Args:
events_and_contexts (list[(EventBase, EventContext)]):
Returns:
list[(EventBase, EventContext)]: filtered list
"""
new_events_and_contexts = OrderedDict()
for event, context in events_and_contexts:
prev_event_context = new_events_and_contexts.get(event.event_id)
if prev_event_context:
if not event.internal_metadata.is_outlier():
if prev_event_context[0].internal_metadata.is_outlier():
# To ensure correct ordering we pop, as OrderedDict is
# ordered by first insertion.
new_events_and_contexts.pop(event.event_id, None)
new_events_and_contexts[event.event_id] = (event, context)
else:
new_events_and_contexts[event.event_id] = (event, context)
return list(new_events_and_contexts.values())
def _update_room_depths_txn(self, txn, events_and_contexts, backfilled):
"""Update min_depth for each room
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
backfilled (bool): True if the events were backfilled
"""
depth_updates = {}
for event, context in events_and_contexts:
# Remove the any existing cache entries for the event_ids
txn.call_after(self._invalidate_get_event_cache, event.event_id)
if not backfilled:
txn.call_after(
self._events_stream_cache.entity_has_changed,
event.room_id,
event.internal_metadata.stream_ordering,
)
if not event.internal_metadata.is_outlier() and not context.rejected:
depth_updates[event.room_id] = max(
event.depth, depth_updates.get(event.room_id, event.depth)
)
for room_id, depth in iteritems(depth_updates):
self._update_min_depth_for_room_txn(txn, room_id, depth)
def _update_outliers_txn(self, txn, events_and_contexts):
"""Update any outliers with new event info.
This turns outliers into ex-outliers (unless the new event was
rejected).
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
Returns:
list[(EventBase, EventContext)] new list, without events which
are already in the events table.
"""
txn.execute(
"SELECT event_id, outlier FROM events WHERE event_id in (%s)"
% (",".join(["?"] * len(events_and_contexts)),),
[event.event_id for event, _ in events_and_contexts],
)
have_persisted = {event_id: outlier for event_id, outlier in txn}
to_remove = set()
for event, context in events_and_contexts:
if event.event_id not in have_persisted:
continue
to_remove.add(event)
if context.rejected:
# If the event is rejected then we don't care if the event
# was an outlier or not.
continue
outlier_persisted = have_persisted[event.event_id]
if not event.internal_metadata.is_outlier() and outlier_persisted:
# We received a copy of an event that we had already stored as
# an outlier in the database. We now have some state at that
# so we need to update the state_groups table with that state.
# insert into event_to_state_groups.
try:
self._store_event_state_mappings_txn(txn, ((event, context),))
except Exception:
logger.exception("")
raise
metadata_json = encode_json(event.internal_metadata.get_dict())
sql = (
"UPDATE event_json SET internal_metadata = ?" " WHERE event_id = ?"
)
txn.execute(sql, (metadata_json, event.event_id))
# Add an entry to the ex_outlier_stream table to replicate the
# change in outlier status to our workers.
stream_order = event.internal_metadata.stream_ordering
state_group_id = context.state_group
self._simple_insert_txn(
txn,
table="ex_outlier_stream",
values={
"event_stream_ordering": stream_order,
"event_id": event.event_id,
"state_group": state_group_id,
},
)
sql = "UPDATE events SET outlier = ?" " WHERE event_id = ?"
txn.execute(sql, (False, event.event_id))
# Update the event_backward_extremities table now that this
# event isn't an outlier any more.
self._update_backward_extremeties(txn, [event])
return [ec for ec in events_and_contexts if ec[0] not in to_remove]
@classmethod
def _delete_existing_rows_txn(cls, txn, events_and_contexts):
if not events_and_contexts:
# nothing to do here
return
logger.info("Deleting existing")
for table in (
"events",
"event_auth",
"event_json",
"event_edges",
"event_forward_extremities",
"event_reference_hashes",
"event_search",
"event_to_state_groups",
"guest_access",
"history_visibility",
"local_invites",
"room_names",
"state_events",
"rejections",
"redactions",
"room_memberships",
"topics",
):
txn.executemany(
"DELETE FROM %s WHERE event_id = ?" % (table,),
[(ev.event_id,) for ev, _ in events_and_contexts],
)
for table in ("event_push_actions",):
txn.executemany(
"DELETE FROM %s WHERE room_id = ? AND event_id = ?" % (table,),
[(ev.room_id, ev.event_id) for ev, _ in events_and_contexts],
)
def _store_event_txn(self, txn, events_and_contexts):
"""Insert new events into the event and event_json tables
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
"""
if not events_and_contexts:
# nothing to do here
return
def event_dict(event):
d = event.get_dict()
d.pop("redacted", None)
d.pop("redacted_because", None)
return d
self._simple_insert_many_txn(
txn,
table="event_json",
values=[
{
"event_id": event.event_id,
"room_id": event.room_id,
"internal_metadata": encode_json(
event.internal_metadata.get_dict()
),
"json": encode_json(event_dict(event)),
"format_version": event.format_version,
}
for event, _ in events_and_contexts
],
)
self._simple_insert_many_txn(
txn,
table="events",
values=[
{
"stream_ordering": event.internal_metadata.stream_ordering,
"topological_ordering": event.depth,
"depth": event.depth,
"event_id": event.event_id,
"room_id": event.room_id,
"type": event.type,
"processed": True,
"outlier": event.internal_metadata.is_outlier(),
"origin_server_ts": int(event.origin_server_ts),
"received_ts": self._clock.time_msec(),
"sender": event.sender,
"contains_url": (
"url" in event.content
and isinstance(event.content["url"], text_type)
),
}
for event, _ in events_and_contexts
],
)
def _store_rejected_events_txn(self, txn, events_and_contexts):
"""Add rows to the 'rejections' table for received events which were
rejected
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
Returns:
list[(EventBase, EventContext)] new list, without the rejected
events.
"""
# Remove the rejected events from the list now that we've added them
# to the events table and the events_json table.
to_remove = set()
for event, context in events_and_contexts:
if context.rejected:
# Insert the event_id into the rejections table
self._store_rejections_txn(txn, event.event_id, context.rejected)
to_remove.add(event)
return [ec for ec in events_and_contexts if ec[0] not in to_remove]
def _update_metadata_tables_txn(
self, txn, events_and_contexts, all_events_and_contexts, backfilled
):
"""Update all the miscellaneous tables for new events
Args:
txn (twisted.enterprise.adbapi.Connection): db connection
events_and_contexts (list[(EventBase, EventContext)]): events
we are persisting
all_events_and_contexts (list[(EventBase, EventContext)]): all
events that we were going to persist. This includes events
we've already persisted, etc, that wouldn't appear in
events_and_context.
backfilled (bool): True if the events were backfilled
"""
# Insert all the push actions into the event_push_actions table.
self._set_push_actions_for_event_and_users_txn(
txn,
events_and_contexts=events_and_contexts,
all_events_and_contexts=all_events_and_contexts,
)
if not events_and_contexts:
# nothing to do here
return
for event, context in events_and_contexts:
if event.type == EventTypes.Redaction and event.redacts is not None:
# Remove the entries in the event_push_actions table for the
# redacted event.
self._remove_push_actions_for_event_id_txn(
txn, event.room_id, event.redacts
)
# Remove from relations table.
self._handle_redaction(txn, event.redacts)
# Update the event_forward_extremities, event_backward_extremities and
# event_edges tables.
self._handle_mult_prev_events(
txn, events=[event for event, _ in events_and_contexts]
)
for event, _ in events_and_contexts:
if event.type == EventTypes.Name:
# Insert into the room_names and event_search tables.
self._store_room_name_txn(txn, event)
elif event.type == EventTypes.Topic:
# Insert into the topics table and event_search table.
self._store_room_topic_txn(txn, event)
elif event.type == EventTypes.Message:
# Insert into the event_search table.
self._store_room_message_txn(txn, event)
elif event.type == EventTypes.Redaction:
# Insert into the redactions table.
self._store_redaction(txn, event)
elif event.type == EventTypes.RoomHistoryVisibility:
# Insert into the event_search table.
self._store_history_visibility_txn(txn, event)
elif event.type == EventTypes.GuestAccess:
# Insert into the event_search table.
self._store_guest_access_txn(txn, event)
self._handle_event_relations(txn, event)
# Insert into the room_memberships table.
self._store_room_members_txn(
txn,
[
event
for event, _ in events_and_contexts
if event.type == EventTypes.Member
],
backfilled=backfilled,
)
# Insert event_reference_hashes table.
self._store_event_reference_hashes_txn(
txn, [event for event, _ in events_and_contexts]
)
state_events_and_contexts = [
ec for ec in events_and_contexts if ec[0].is_state()
]
state_values = []
for event, context in state_events_and_contexts:
vals = {
"event_id": event.event_id,
"room_id": event.room_id,
"type": event.type,
"state_key": event.state_key,
}
# TODO: How does this work with backfilling?
if hasattr(event, "replaces_state"):
vals["prev_state"] = event.replaces_state
state_values.append(vals)
self._simple_insert_many_txn(txn, table="state_events", values=state_values)
# Prefill the event cache
self._add_to_cache(txn, events_and_contexts)
def _add_to_cache(self, txn, events_and_contexts):
to_prefill = []
rows = []
N = 200
for i in range(0, len(events_and_contexts), N):
ev_map = {e[0].event_id: e[0] for e in events_and_contexts[i : i + N]}
if not ev_map:
break
sql = (
"SELECT "
" e.event_id as event_id, "
" r.redacts as redacts,"
" rej.event_id as rejects "
" FROM events as e"
" LEFT JOIN rejections as rej USING (event_id)"
" LEFT JOIN redactions as r ON e.event_id = r.redacts"
" WHERE e.event_id IN (%s)"
) % (",".join(["?"] * len(ev_map)),)
txn.execute(sql, list(ev_map))
rows = self.cursor_to_dict(txn)
for row in rows:
event = ev_map[row["event_id"]]
if not row["rejects"] and not row["redacts"]:
to_prefill.append(
_EventCacheEntry(event=event, redacted_event=None)
)
def prefill():
for cache_entry in to_prefill:
self._get_event_cache.prefill((cache_entry[0].event_id,), cache_entry)
txn.call_after(prefill)
def _store_redaction(self, txn, event):
# invalidate the cache for the redacted event
txn.call_after(self._invalidate_get_event_cache, event.redacts)
txn.execute(
"INSERT INTO redactions (event_id, redacts) VALUES (?,?)",
(event.event_id, event.redacts),
)
@defer.inlineCallbacks
def count_daily_messages(self):
"""
Returns an estimate of the number of messages sent in the last day.
If it has been significantly less or more than one day since the last
call to this function, it will return None.
"""
def _count_messages(txn):
sql = """
SELECT COALESCE(COUNT(*), 0) FROM events
WHERE type = 'm.room.message'
AND stream_ordering > ?
"""
txn.execute(sql, (self.stream_ordering_day_ago,))
count, = txn.fetchone()
return count
ret = yield self.runInteraction("count_messages", _count_messages)
defer.returnValue(ret)
@defer.inlineCallbacks
def count_daily_sent_messages(self):
def _count_messages(txn):
# This is good enough as if you have silly characters in your own
# hostname then thats your own fault.
like_clause = "%:" + self.hs.hostname
sql = """
SELECT COALESCE(COUNT(*), 0) FROM events
WHERE type = 'm.room.message'
AND sender LIKE ?
AND stream_ordering > ?
"""
txn.execute(sql, (like_clause, self.stream_ordering_day_ago))
count, = txn.fetchone()
return count
ret = yield self.runInteraction("count_daily_sent_messages", _count_messages)
defer.returnValue(ret)
@defer.inlineCallbacks
def count_daily_active_rooms(self):
def _count(txn):
sql = """
SELECT COALESCE(COUNT(DISTINCT room_id), 0) FROM events
WHERE type = 'm.room.message'
AND stream_ordering > ?
"""
txn.execute(sql, (self.stream_ordering_day_ago,))
count, = txn.fetchone()
return count
ret = yield self.runInteraction("count_daily_active_rooms", _count)
defer.returnValue(ret)
def get_current_backfill_token(self):
"""The current minimum token that backfilled events have reached"""
return -self._backfill_id_gen.get_current_token()
def get_current_events_token(self):
"""The current maximum token that events have reached"""
return self._stream_id_gen.get_current_token()
def get_all_new_forward_event_rows(self, last_id, current_id, limit):
if last_id == current_id:
return defer.succeed([])
def get_all_new_forward_event_rows(txn):
sql = (
"SELECT e.stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts, relates_to_id"
" FROM events AS e"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" LEFT JOIN event_relations USING (event_id)"
" WHERE ? < stream_ordering AND stream_ordering <= ?"
" ORDER BY stream_ordering ASC"
" LIMIT ?"
)
txn.execute(sql, (last_id, current_id, limit))
new_event_updates = txn.fetchall()
if len(new_event_updates) == limit:
upper_bound = new_event_updates[-1][0]
else:
upper_bound = current_id
sql = (
"SELECT event_stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts, relates_to_id"
" FROM events AS e"
" INNER JOIN ex_outlier_stream USING (event_id)"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" LEFT JOIN event_relations USING (event_id)"
" WHERE ? < event_stream_ordering"
" AND event_stream_ordering <= ?"
" ORDER BY event_stream_ordering DESC"
)
txn.execute(sql, (last_id, upper_bound))
new_event_updates.extend(txn)
return new_event_updates
return self.runInteraction(
"get_all_new_forward_event_rows", get_all_new_forward_event_rows
)
def get_all_new_backfill_event_rows(self, last_id, current_id, limit):
if last_id == current_id:
return defer.succeed([])
def get_all_new_backfill_event_rows(txn):
sql = (
"SELECT -e.stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts, relates_to_id"
" FROM events AS e"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" LEFT JOIN event_relations USING (event_id)"
" WHERE ? > stream_ordering AND stream_ordering >= ?"
" ORDER BY stream_ordering ASC"
" LIMIT ?"
)
txn.execute(sql, (-last_id, -current_id, limit))
new_event_updates = txn.fetchall()
if len(new_event_updates) == limit:
upper_bound = new_event_updates[-1][0]
else:
upper_bound = current_id
sql = (
"SELECT -event_stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts, relates_to_id"
" FROM events AS e"
" INNER JOIN ex_outlier_stream USING (event_id)"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" LEFT JOIN event_relations USING (event_id)"
" WHERE ? > event_stream_ordering"
" AND event_stream_ordering >= ?"
" ORDER BY event_stream_ordering DESC"
)
txn.execute(sql, (-last_id, -upper_bound))
new_event_updates.extend(txn.fetchall())
return new_event_updates
return self.runInteraction(
"get_all_new_backfill_event_rows", get_all_new_backfill_event_rows
)
@cached(num_args=5, max_entries=10)
def get_all_new_events(
self,
last_backfill_id,
last_forward_id,
current_backfill_id,
current_forward_id,
limit,
):
"""Get all the new events that have arrived at the server either as
new events or as backfilled events"""
have_backfill_events = last_backfill_id != current_backfill_id
have_forward_events = last_forward_id != current_forward_id
if not have_backfill_events and not have_forward_events:
return defer.succeed(AllNewEventsResult([], [], [], [], []))
def get_all_new_events_txn(txn):
sql = (
"SELECT e.stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts"
" FROM events AS e"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" WHERE ? < stream_ordering AND stream_ordering <= ?"
" ORDER BY stream_ordering ASC"
" LIMIT ?"
)
if have_forward_events:
txn.execute(sql, (last_forward_id, current_forward_id, limit))
new_forward_events = txn.fetchall()
if len(new_forward_events) == limit:
upper_bound = new_forward_events[-1][0]
else:
upper_bound = current_forward_id
sql = (
"SELECT event_stream_ordering, event_id, state_group"
" FROM ex_outlier_stream"
" WHERE ? > event_stream_ordering"
" AND event_stream_ordering >= ?"
" ORDER BY event_stream_ordering DESC"
)
txn.execute(sql, (last_forward_id, upper_bound))
forward_ex_outliers = txn.fetchall()
else:
new_forward_events = []
forward_ex_outliers = []
sql = (
"SELECT -e.stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts"
" FROM events AS e"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" WHERE ? > stream_ordering AND stream_ordering >= ?"
" ORDER BY stream_ordering DESC"
" LIMIT ?"
)
if have_backfill_events:
txn.execute(sql, (-last_backfill_id, -current_backfill_id, limit))
new_backfill_events = txn.fetchall()
if len(new_backfill_events) == limit:
upper_bound = new_backfill_events[-1][0]
else:
upper_bound = current_backfill_id
sql = (
"SELECT -event_stream_ordering, event_id, state_group"
" FROM ex_outlier_stream"
" WHERE ? > event_stream_ordering"
" AND event_stream_ordering >= ?"
" ORDER BY event_stream_ordering DESC"
)
txn.execute(sql, (-last_backfill_id, -upper_bound))
backward_ex_outliers = txn.fetchall()
else:
new_backfill_events = []
backward_ex_outliers = []
return AllNewEventsResult(
new_forward_events,
new_backfill_events,
forward_ex_outliers,
backward_ex_outliers,
)
return self.runInteraction("get_all_new_events", get_all_new_events_txn)
def purge_history(self, room_id, token, delete_local_events):
"""Deletes room history before a certain point
Args:
room_id (str):
token (str): A topological token to delete events before
delete_local_events (bool):
if True, we will delete local events as well as remote ones
(instead of just marking them as outliers and deleting their
state groups).
"""
return self.runInteraction(
"purge_history",
self._purge_history_txn,
room_id,
token,
delete_local_events,
)
def _purge_history_txn(self, txn, room_id, token_str, delete_local_events):
token = RoomStreamToken.parse(token_str)
# Tables that should be pruned:
# event_auth
# event_backward_extremities
# event_edges
# event_forward_extremities
# event_json
# event_push_actions
# event_reference_hashes
# event_search
# event_to_state_groups
# events
# rejections
# room_depth
# state_groups
# state_groups_state
# we will build a temporary table listing the events so that we don't
# have to keep shovelling the list back and forth across the
# connection. Annoyingly the python sqlite driver commits the
# transaction on CREATE, so let's do this first.
#
# furthermore, we might already have the table from a previous (failed)
# purge attempt, so let's drop the table first.
txn.execute("DROP TABLE IF EXISTS events_to_purge")
txn.execute(
"CREATE TEMPORARY TABLE events_to_purge ("
" event_id TEXT NOT NULL,"
" should_delete BOOLEAN NOT NULL"
")"
)
# First ensure that we're not about to delete all the forward extremeties
txn.execute(
"SELECT e.event_id, e.depth FROM events as e "
"INNER JOIN event_forward_extremities as f "
"ON e.event_id = f.event_id "
"AND e.room_id = f.room_id "
"WHERE f.room_id = ?",
(room_id,),
)
rows = txn.fetchall()
max_depth = max(row[1] for row in rows)
if max_depth < token.topological:
# We need to ensure we don't delete all the events from the database
# otherwise we wouldn't be able to send any events (due to not
# having any backwards extremeties)
raise SynapseError(
400, "topological_ordering is greater than forward extremeties"
)
logger.info("[purge] looking for events to delete")
should_delete_expr = "state_key IS NULL"
should_delete_params = ()
if not delete_local_events:
should_delete_expr += " AND event_id NOT LIKE ?"
# We include the parameter twice since we use the expression twice
should_delete_params += ("%:" + self.hs.hostname, "%:" + self.hs.hostname)
should_delete_params += (room_id, token.topological)
# Note that we insert events that are outliers and aren't going to be
# deleted, as nothing will happen to them.
txn.execute(
"INSERT INTO events_to_purge"
" SELECT event_id, %s"
" FROM events AS e LEFT JOIN state_events USING (event_id)"
" WHERE (NOT outlier OR (%s)) AND e.room_id = ? AND topological_ordering < ?"
% (should_delete_expr, should_delete_expr),
should_delete_params,
)
# We create the indices *after* insertion as that's a lot faster.
# create an index on should_delete because later we'll be looking for
# the should_delete / shouldn't_delete subsets
txn.execute(
"CREATE INDEX events_to_purge_should_delete"
" ON events_to_purge(should_delete)"
)
# We do joins against events_to_purge for e.g. calculating state
# groups to purge, etc., so lets make an index.
txn.execute("CREATE INDEX events_to_purge_id" " ON events_to_purge(event_id)")
txn.execute("SELECT event_id, should_delete FROM events_to_purge")
event_rows = txn.fetchall()
logger.info(
"[purge] found %i events before cutoff, of which %i can be deleted",
len(event_rows),
sum(1 for e in event_rows if e[1]),
)
logger.info("[purge] Finding new backward extremities")
# We calculate the new entries for the backward extremeties by finding
# events to be purged that are pointed to by events we're not going to
# purge.
txn.execute(
"SELECT DISTINCT e.event_id FROM events_to_purge AS e"
" INNER JOIN event_edges AS ed ON e.event_id = ed.prev_event_id"
" LEFT JOIN events_to_purge AS ep2 ON ed.event_id = ep2.event_id"
" WHERE ep2.event_id IS NULL"
)
new_backwards_extrems = txn.fetchall()
logger.info("[purge] replacing backward extremities: %r", new_backwards_extrems)
txn.execute(
"DELETE FROM event_backward_extremities WHERE room_id = ?", (room_id,)
)
# Update backward extremeties
txn.executemany(
"INSERT INTO event_backward_extremities (room_id, event_id)"
" VALUES (?, ?)",
[(room_id, event_id) for event_id, in new_backwards_extrems],
)
logger.info("[purge] finding redundant state groups")
# Get all state groups that are referenced by events that are to be
# deleted. We then go and check if they are referenced by other events
# or state groups, and if not we delete them.
txn.execute(
"""
SELECT DISTINCT state_group FROM events_to_purge
INNER JOIN event_to_state_groups USING (event_id)
"""
)
referenced_state_groups = set(sg for sg, in txn)
logger.info(
"[purge] found %i referenced state groups", len(referenced_state_groups)
)
logger.info("[purge] finding state groups that can be deleted")
_ = self._find_unreferenced_groups_during_purge(txn, referenced_state_groups)
state_groups_to_delete, remaining_state_groups = _
logger.info(
"[purge] found %i state groups to delete", len(state_groups_to_delete)
)
logger.info(
"[purge] de-delta-ing %i remaining state groups",
len(remaining_state_groups),
)
# Now we turn the state groups that reference to-be-deleted state
# groups to non delta versions.
for sg in remaining_state_groups:
logger.info("[purge] de-delta-ing remaining state group %s", sg)
curr_state = self._get_state_groups_from_groups_txn(txn, [sg])
curr_state = curr_state[sg]
self._simple_delete_txn(
txn, table="state_groups_state", keyvalues={"state_group": sg}
)
self._simple_delete_txn(
txn, table="state_group_edges", keyvalues={"state_group": sg}
)
self._simple_insert_many_txn(
txn,
table="state_groups_state",
values=[
{
"state_group": sg,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
"event_id": state_id,
}
for key, state_id in iteritems(curr_state)
],
)
logger.info("[purge] removing redundant state groups")
txn.executemany(
"DELETE FROM state_groups_state WHERE state_group = ?",
((sg,) for sg in state_groups_to_delete),
)
txn.executemany(
"DELETE FROM state_groups WHERE id = ?",
((sg,) for sg in state_groups_to_delete),
)
logger.info("[purge] removing events from event_to_state_groups")
txn.execute(
"DELETE FROM event_to_state_groups "
"WHERE event_id IN (SELECT event_id from events_to_purge)"
)
for event_id, _ in event_rows:
txn.call_after(self._get_state_group_for_event.invalidate, (event_id,))
# Delete all remote non-state events
for table in (
"events",
"event_json",
"event_auth",
"event_edges",
"event_forward_extremities",
"event_reference_hashes",
"event_search",
"rejections",
):
logger.info("[purge] removing events from %s", table)
txn.execute(
"DELETE FROM %s WHERE event_id IN ("
" SELECT event_id FROM events_to_purge WHERE should_delete"
")" % (table,)
)
# event_push_actions lacks an index on event_id, and has one on
# (room_id, event_id) instead.
for table in ("event_push_actions",):
logger.info("[purge] removing events from %s", table)
txn.execute(
"DELETE FROM %s WHERE room_id = ? AND event_id IN ("
" SELECT event_id FROM events_to_purge WHERE should_delete"
")" % (table,),
(room_id,),
)
# Mark all state and own events as outliers
logger.info("[purge] marking remaining events as outliers")
txn.execute(
"UPDATE events SET outlier = ?"
" WHERE event_id IN ("
" SELECT event_id FROM events_to_purge "
" WHERE NOT should_delete"
")",
(True,),
)
# synapse tries to take out an exclusive lock on room_depth whenever it
# persists events (because upsert), and once we run this update, we
# will block that for the rest of our transaction.
#
# So, let's stick it at the end so that we don't block event
# persistence.
#
# We do this by calculating the minimum depth of the backwards
# extremities. However, the events in event_backward_extremities
# are ones we don't have yet so we need to look at the events that
# point to it via event_edges table.
txn.execute(
"""
SELECT COALESCE(MIN(depth), 0)
FROM event_backward_extremities AS eb
INNER JOIN event_edges AS eg ON eg.prev_event_id = eb.event_id
INNER JOIN events AS e ON e.event_id = eg.event_id
WHERE eb.room_id = ?
""",
(room_id,),
)
min_depth, = txn.fetchone()
logger.info("[purge] updating room_depth to %d", min_depth)
txn.execute(
"UPDATE room_depth SET min_depth = ? WHERE room_id = ?",
(min_depth, room_id),
)
# finally, drop the temp table. this will commit the txn in sqlite,
# so make sure to keep this actually last.
txn.execute("DROP TABLE events_to_purge")
logger.info("[purge] done")
def _find_unreferenced_groups_during_purge(self, txn, state_groups):
"""Used when purging history to figure out which state groups can be
deleted and which need to be de-delta'ed (due to one of its prev groups
being scheduled for deletion).
Args:
txn
state_groups (set[int]): Set of state groups referenced by events
that are going to be deleted.
Returns:
tuple[set[int], set[int]]: The set of state groups that can be
deleted and the set of state groups that need to be de-delta'ed
"""
# Graph of state group -> previous group
graph = {}
# Set of events that we have found to be referenced by events
referenced_groups = set()
# Set of state groups we've already seen
state_groups_seen = set(state_groups)
# Set of state groups to handle next.
next_to_search = set(state_groups)
while next_to_search:
# We bound size of groups we're looking up at once, to stop the
# SQL query getting too big
if len(next_to_search) < 100:
current_search = next_to_search
next_to_search = set()
else:
current_search = set(itertools.islice(next_to_search, 100))
next_to_search -= current_search
# Check if state groups are referenced
sql = """
SELECT DISTINCT state_group FROM event_to_state_groups
LEFT JOIN events_to_purge AS ep USING (event_id)
WHERE state_group IN (%s) AND ep.event_id IS NULL
""" % (
",".join("?" for _ in current_search),
)
txn.execute(sql, list(current_search))
referenced = set(sg for sg, in txn)
referenced_groups |= referenced
# We don't continue iterating up the state group graphs for state
# groups that are referenced.
current_search -= referenced
rows = self._simple_select_many_txn(
txn,
table="state_group_edges",
column="prev_state_group",
iterable=current_search,
keyvalues={},
retcols=("prev_state_group", "state_group"),
)
prevs = set(row["state_group"] for row in rows)
# We don't bother re-handling groups we've already seen
prevs -= state_groups_seen
next_to_search |= prevs
state_groups_seen |= prevs
for row in rows:
# Note: Each state group can have at most one prev group
graph[row["state_group"]] = row["prev_state_group"]
to_delete = state_groups_seen - referenced_groups
to_dedelta = set()
for sg in referenced_groups:
prev_sg = graph.get(sg)
if prev_sg and prev_sg in to_delete:
to_dedelta.add(sg)
return to_delete, to_dedelta
@defer.inlineCallbacks
def is_event_after(self, event_id1, event_id2):
"""Returns True if event_id1 is after event_id2 in the stream
"""
to_1, so_1 = yield self._get_event_ordering(event_id1)
to_2, so_2 = yield self._get_event_ordering(event_id2)
defer.returnValue((to_1, so_1) > (to_2, so_2))
@cachedInlineCallbacks(max_entries=5000)
def _get_event_ordering(self, event_id):
res = yield self._simple_select_one(
table="events",
retcols=["topological_ordering", "stream_ordering"],
keyvalues={"event_id": event_id},
allow_none=True,
)
if not res:
raise SynapseError(404, "Could not find event %s" % (event_id,))
defer.returnValue(
(int(res["topological_ordering"]), int(res["stream_ordering"]))
)
def get_all_updated_current_state_deltas(self, from_token, to_token, limit):
def get_all_updated_current_state_deltas_txn(txn):
sql = """
SELECT stream_id, room_id, type, state_key, event_id
FROM current_state_delta_stream
WHERE ? < stream_id AND stream_id <= ?
ORDER BY stream_id ASC LIMIT ?
"""
txn.execute(sql, (from_token, to_token, limit))
return txn.fetchall()
return self.runInteraction(
"get_all_updated_current_state_deltas",
get_all_updated_current_state_deltas_txn,
)
AllNewEventsResult = namedtuple(
"AllNewEventsResult",
[
"new_forward_events",
"new_backfill_events",
"forward_ex_outliers",
"backward_ex_outliers",
],
)
| [
"logging.getLogger",
"itertools.chain",
"synapse.util.logcontext.PreserveLoggingContext",
"synapse.api.errors.SynapseError",
"synapse.util.frozenutils.frozendict_json_encoder.encode",
"synapse.util.metrics.Measure",
"canonicaljson.json.loads",
"collections.deque",
"prometheus_client.Histogram",
"twisted.internet.defer.returnValue",
"functools.wraps",
"synapse.types.get_domain_from_id",
"synapse.util.caches.descriptors.cached",
"prometheus_client.Counter",
"synapse.util.caches.descriptors.cachedInlineCallbacks",
"collections.OrderedDict",
"collections.namedtuple",
"twisted.internet.defer.gatherResults",
"synapse.metrics.BucketCollector",
"six.iteritems",
"twisted.internet.defer.succeed",
"itertools.islice",
"synapse.types.RoomStreamToken.parse",
"synapse.state.StateResolutionStore",
"synapse.util.batch_iter",
"collections.Counter",
"synapse.util.logcontext.make_deferred_yieldable",
"synapse.metrics.background_process_metrics.run_as_background_process",
"twisted.internet.defer.Deferred"
] | [((2138, 2165), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2155, 2165), False, 'import logging\n'), ((2191, 2245), 'prometheus_client.Counter', 'Counter', (['"""synapse_storage_events_persisted_events"""', '""""""'], {}), "('synapse_storage_events_persisted_events', '')\n", (2198, 2245), False, 'from prometheus_client import Counter, Histogram\n'), ((2262, 2366), 'prometheus_client.Counter', 'Counter', (['"""synapse_storage_events_persisted_events_sep"""', '""""""', "['type', 'origin_type', 'origin_entity']"], {}), "('synapse_storage_events_persisted_events_sep', '', ['type',\n 'origin_type', 'origin_entity'])\n", (2269, 2366), False, 'from prometheus_client import Counter, Histogram\n'), ((2462, 2511), 'prometheus_client.Counter', 'Counter', (['"""synapse_storage_events_state_delta"""', '""""""'], {}), "('synapse_storage_events_state_delta', '')\n", (2469, 2511), False, 'from prometheus_client import Counter, Histogram\n'), ((2645, 2707), 'prometheus_client.Counter', 'Counter', (['"""synapse_storage_events_state_delta_single_event"""', '""""""'], {}), "('synapse_storage_events_state_delta_single_event', '')\n", (2652, 2707), False, 'from prometheus_client import Counter, Histogram\n'), ((2911, 2972), 'prometheus_client.Counter', 'Counter', (['"""synapse_storage_events_state_delta_reuse_delta"""', '""""""'], {}), "('synapse_storage_events_state_delta_reuse_delta', '')\n", (2918, 2972), False, 'from prometheus_client import Counter, Histogram\n'), ((3066, 3255), 'prometheus_client.Histogram', 'Histogram', (['"""synapse_storage_events_forward_extremities_persisted"""', '"""Number of forward extremities for each new event"""'], {'buckets': "(1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, '+Inf')"}), "('synapse_storage_events_forward_extremities_persisted',\n 'Number of forward extremities for each new event', buckets=(1, 2, 3, 5,\n 7, 10, 15, 20, 50, 100, 200, 500, '+Inf'))\n", (3075, 3255), False, 'from prometheus_client import Counter, Histogram\n'), ((3457, 3666), 'prometheus_client.Histogram', 'Histogram', (['"""synapse_storage_events_stale_forward_extremities_persisted"""', '"""Number of unchanged forward extremities for each new event"""'], {'buckets': "(0, 1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, '+Inf')"}), "('synapse_storage_events_stale_forward_extremities_persisted',\n 'Number of unchanged forward extremities for each new event', buckets=(\n 0, 1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, '+Inf'))\n", (3466, 3666), False, 'from prometheus_client import Counter, Histogram\n'), ((7841, 7900), 'collections.namedtuple', 'namedtuple', (['"""_EventCacheEntry"""', "('event', 'redacted_event')"], {}), "('_EventCacheEntry', ('event', 'redacted_event'))\n", (7851, 7900), False, 'from collections import Counter as c_counter, OrderedDict, deque, namedtuple\n'), ((87472, 87602), 'collections.namedtuple', 'namedtuple', (['"""AllNewEventsResult"""', "['new_forward_events', 'new_backfill_events', 'forward_ex_outliers',\n 'backward_ex_outliers']"], {}), "('AllNewEventsResult', ['new_forward_events',\n 'new_backfill_events', 'forward_ex_outliers', 'backward_ex_outliers'])\n", (87482, 87602), False, 'from collections import Counter as c_counter, OrderedDict, deque, namedtuple\n'), ((3801, 3844), 'synapse.util.frozenutils.frozendict_json_encoder.encode', 'frozendict_json_encoder.encode', (['json_object'], {}), '(json_object)\n', (3831, 3844), False, 'from synapse.util.frozenutils import frozendict_json_encoder\n'), ((4114, 4205), 'collections.namedtuple', 'namedtuple', (['"""_EventPersistQueueItem"""', "('events_and_contexts', 'backfilled', 'deferred')"], {}), "('_EventPersistQueueItem', ('events_and_contexts', 'backfilled',\n 'deferred'))\n", (4124, 4205), False, 'from collections import Counter as c_counter, OrderedDict, deque, namedtuple\n'), ((8167, 8178), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (8172, 8178), False, 'from functools import wraps\n'), ((67894, 67928), 'synapse.util.caches.descriptors.cached', 'cached', ([], {'num_args': '(5)', 'max_entries': '(10)'}), '(num_args=5, max_entries=10)\n', (67900, 67928), False, 'from synapse.util.caches.descriptors import cached, cachedInlineCallbacks\n'), ((86284, 86323), 'synapse.util.caches.descriptors.cachedInlineCallbacks', 'cachedInlineCallbacks', ([], {'max_entries': '(5000)'}), '(max_entries=5000)\n', (86305, 86323), False, 'from synapse.util.caches.descriptors import cached, cachedInlineCallbacks\n'), ((7480, 7542), 'synapse.metrics.background_process_metrics.run_as_background_process', 'run_as_background_process', (['"""persist_events"""', 'handle_queue_loop'], {}), "('persist_events', handle_queue_loop)\n", (7505, 7542), False, 'from synapse.metrics.background_process_metrics import run_as_background_process\n'), ((8504, 8526), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['res'], {}), '(res)\n', (8521, 8526), False, 'from twisted.internet import defer\n'), ((9244, 9255), 'collections.Counter', 'c_counter', ([], {}), '()\n', (9253, 9255), True, 'from collections import Counter as c_counter, OrderedDict, deque, namedtuple\n'), ((9265, 9435), 'synapse.metrics.BucketCollector', 'BucketCollector', (['"""synapse_forward_extremities"""', '(lambda : self._current_forward_extremities_amount)'], {'buckets': "[1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, '+Inf']"}), "('synapse_forward_extremities', lambda : self.\n _current_forward_extremities_amount, buckets=[1, 2, 3, 5, 7, 10, 15, 20,\n 50, 100, 200, 500, '+Inf'])\n", (9280, 9435), False, 'from synapse.metrics import BucketCollector\n'), ((11126, 11148), 'six.iteritems', 'iteritems', (['partitioned'], {}), '(partitioned)\n', (11135, 11148), False, 'from six import iteritems, text_type\n'), ((11592, 11627), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['max_persisted_id'], {}), '(max_persisted_id)\n', (11609, 11627), False, 'from twisted.internet import defer\n'), ((12360, 12438), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['(event.internal_metadata.stream_ordering, max_persisted_id)'], {}), '((event.internal_metadata.stream_ordering, max_persisted_id))\n', (12377, 12438), False, 'from twisted.internet import defer\n'), ((24714, 24739), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['result'], {}), '(result)\n', (24731, 24739), False, 'from twisted.internet import defer\n'), ((25825, 25851), 'synapse.util.batch_iter', 'batch_iter', (['event_ids', '(100)'], {}), '(event_ids, 100)\n', (25835, 25851), False, 'from synapse.util import batch_iter\n'), ((26001, 26027), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['results'], {}), '(results)\n', (26018, 26027), False, 'from twisted.internet import defer\n'), ((28210, 28236), 'synapse.util.batch_iter', 'batch_iter', (['event_ids', '(100)'], {}), '(event_ids, 100)\n', (28220, 28236), False, 'from synapse.util import batch_iter\n'), ((28384, 28417), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['existing_prevs'], {}), '(existing_prevs)\n', (28401, 28417), False, 'from twisted.internet import defer\n'), ((35202, 35238), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['(res.state, None)'], {}), '((res.state, None))\n', (35219, 35238), False, 'from twisted.internet import defer\n'), ((36016, 36057), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['(to_delete, to_insert)'], {}), '((to_delete, to_insert))\n', (36033, 36057), False, 'from twisted.internet import defer\n'), ((40778, 40808), 'six.iteritems', 'iteritems', (['state_delta_by_room'], {}), '(state_delta_by_room)\n', (40787, 40808), False, 'from six import iteritems, text_type\n'), ((44898, 44932), 'six.iteritems', 'iteritems', (['new_forward_extremities'], {}), '(new_forward_extremities)\n', (44907, 44932), False, 'from six import iteritems, text_type\n'), ((46641, 46654), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (46652, 46654), False, 'from collections import Counter as c_counter, OrderedDict, deque, namedtuple\n'), ((48515, 48539), 'six.iteritems', 'iteritems', (['depth_updates'], {}), '(depth_updates)\n', (48524, 48539), False, 'from six import iteritems, text_type\n'), ((62541, 62563), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['ret'], {}), '(ret)\n', (62558, 62563), False, 'from twisted.internet import defer\n'), ((63296, 63318), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['ret'], {}), '(ret)\n', (63313, 63318), False, 'from twisted.internet import defer\n'), ((63816, 63838), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['ret'], {}), '(ret)\n', (63833, 63838), False, 'from twisted.internet import defer\n'), ((72296, 72328), 'synapse.types.RoomStreamToken.parse', 'RoomStreamToken.parse', (['token_str'], {}), '(token_str)\n', (72317, 72328), False, 'from synapse.types import RoomStreamToken, get_domain_from_id\n'), ((86231, 86277), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['((to_1, so_1) > (to_2, so_2))'], {}), '((to_1, so_1) > (to_2, so_2))\n', (86248, 86277), False, 'from twisted.internet import defer\n'), ((5066, 5073), 'collections.deque', 'deque', ([], {}), '()\n', (5071, 5073), False, 'from collections import Counter as c_counter, OrderedDict, deque, namedtuple\n'), ((5479, 5495), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (5493, 5495), False, 'from twisted.internet import defer\n'), ((7653, 7660), 'collections.deque', 'deque', ([], {}), '()\n', (7658, 7660), False, 'from collections import Counter as c_counter, OrderedDict, deque, namedtuple\n'), ((9708, 9798), 'synapse.metrics.background_process_metrics.run_as_background_process', 'run_as_background_process', (['"""read_forward_extremities"""', 'self._read_forward_extremities'], {}), "('read_forward_extremities', self.\n _read_forward_extremities)\n", (9733, 9798), False, 'from synapse.metrics.background_process_metrics import run_as_background_process\n'), ((12244, 12277), 'synapse.util.logcontext.make_deferred_yieldable', 'make_deferred_yieldable', (['deferred'], {}), '(deferred)\n', (12267, 12277), False, 'from synapse.util.logcontext import PreserveLoggingContext, make_deferred_yieldable\n'), ((32637, 32668), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['(None, None)'], {}), '((None, None))\n', (32654, 32668), False, 'from twisted.internet import defer\n'), ((64305, 64322), 'twisted.internet.defer.succeed', 'defer.succeed', (['[]'], {}), '([])\n', (64318, 64322), False, 'from twisted.internet import defer\n'), ((66151, 66168), 'twisted.internet.defer.succeed', 'defer.succeed', (['[]'], {}), '([])\n', (66164, 66168), False, 'from twisted.internet import defer\n'), ((74182, 74259), 'synapse.api.errors.SynapseError', 'SynapseError', (['(400)', '"""topological_ordering is greater than forward extremeties"""'], {}), "(400, 'topological_ordering is greater than forward extremeties')\n", (74194, 74259), False, 'from synapse.api.errors import SynapseError\n'), ((86631, 86689), 'synapse.api.errors.SynapseError', 'SynapseError', (['(404)', "('Could not find event %s' % (event_id,))"], {}), "(404, 'Could not find event %s' % (event_id,))\n", (86643, 86689), False, 'from synapse.api.errors import SynapseError\n'), ((11448, 11498), 'twisted.internet.defer.gatherResults', 'defer.gatherResults', (['deferreds'], {'consumeErrors': '(True)'}), '(deferreds, consumeErrors=True)\n', (11467, 11498), False, 'from twisted.internet import defer\n'), ((12572, 12610), 'synapse.util.metrics.Measure', 'Measure', (['self._clock', '"""persist_events"""'], {}), "(self._clock, 'persist_events')\n", (12579, 12610), False, 'from synapse.util.metrics import Measure\n'), ((22314, 22347), 'six.iteritems', 'iteritems', (['current_state_for_room'], {}), '(current_state_for_room)\n', (22323, 22347), False, 'from six import iteritems, text_type\n'), ((22477, 22511), 'six.iteritems', 'iteritems', (['new_forward_extremeties'], {}), '(new_forward_extremeties)\n', (22486, 22511), False, 'from six import iteritems, text_type\n'), ((33523, 33564), 'twisted.internet.defer.returnValue', 'defer.returnValue', (['(new_state, delta_ids)'], {}), '((new_state, delta_ids))\n', (33540, 33564), False, 'from twisted.internet import defer\n'), ((35924, 35948), 'six.iteritems', 'iteritems', (['current_state'], {}), '(current_state)\n', (35933, 35948), False, 'from six import iteritems, text_type\n'), ((35155, 35181), 'synapse.state.StateResolutionStore', 'StateResolutionStore', (['self'], {}), '(self)\n', (35175, 35181), False, 'from synapse.state import StateResolutionStore\n'), ((84177, 84214), 'itertools.islice', 'itertools.islice', (['next_to_search', '(100)'], {}), '(next_to_search, 100)\n', (84193, 84214), False, 'import itertools\n'), ((15059, 15110), 'synapse.util.metrics.Measure', 'Measure', (['self._clock', '"""_calculate_state_and_extrem"""'], {}), "(self._clock, '_calculate_state_and_extrem')\n", (15066, 15110), False, 'from synapse.util.metrics import Measure\n'), ((15643, 15668), 'six.iteritems', 'iteritems', (['events_by_room'], {}), '(events_by_room)\n', (15652, 15668), False, 'from six import iteritems, text_type\n'), ((42890, 42910), 'six.iteritems', 'iteritems', (['to_insert'], {}), '(to_insert)\n', (42899, 42910), False, 'from six import iteritems, text_type\n'), ((43273, 43310), 'itertools.chain', 'itertools.chain', (['to_delete', 'to_insert'], {}), '(to_delete, to_insert)\n', (43288, 43310), False, 'import itertools\n'), ((44387, 44424), 'itertools.chain', 'itertools.chain', (['to_delete', 'to_insert'], {}), '(to_delete, to_insert)\n', (44402, 44424), False, 'import itertools\n'), ((45380, 45414), 'six.iteritems', 'iteritems', (['new_forward_extremities'], {}), '(new_forward_extremities)\n', (45389, 45414), False, 'from six import iteritems, text_type\n'), ((46100, 46134), 'six.iteritems', 'iteritems', (['new_forward_extremities'], {}), '(new_forward_extremities)\n', (46109, 46134), False, 'from six import iteritems, text_type\n'), ((7088, 7112), 'synapse.util.logcontext.PreserveLoggingContext', 'PreserveLoggingContext', ([], {}), '()\n', (7110, 7112), False, 'from synapse.util.logcontext import PreserveLoggingContext, make_deferred_yieldable\n'), ((22150, 22182), 'synapse.types.get_domain_from_id', 'get_domain_from_id', (['event.sender'], {}), '(event.sender)\n', (22168, 22182), False, 'from synapse.types import RoomStreamToken, get_domain_from_id\n'), ((27975, 27995), 'canonicaljson.json.loads', 'json.loads', (['metadata'], {}), '(metadata)\n', (27985, 27995), False, 'from canonicaljson import json\n'), ((43733, 43753), 'six.iteritems', 'iteritems', (['to_insert'], {}), '(to_insert)\n', (43742, 43753), False, 'from six import iteritems, text_type\n'), ((79298, 79319), 'six.iteritems', 'iteritems', (['curr_state'], {}), '(curr_state)\n', (79307, 79319), False, 'from six import iteritems, text_type\n'), ((6955, 6979), 'synapse.util.logcontext.PreserveLoggingContext', 'PreserveLoggingContext', ([], {}), '()\n', (6977, 6979), False, 'from synapse.util.logcontext import PreserveLoggingContext, make_deferred_yieldable\n'), ((19072, 19137), 'synapse.util.metrics.Measure', 'Measure', (['self._clock', '"""persist_events.get_new_state_after_events"""'], {}), "(self._clock, 'persist_events.get_new_state_after_events')\n", (19079, 19137), False, 'from synapse.util.metrics import Measure\n'), ((25766, 25782), 'canonicaljson.json.loads', 'json.loads', (['r[1]'], {}), '(r[1])\n', (25776, 25782), False, 'from canonicaljson import json\n'), ((20209, 20269), 'synapse.util.metrics.Measure', 'Measure', (['self._clock', '"""persist_events.calculate_state_delta"""'], {}), "(self._clock, 'persist_events.calculate_state_delta')\n", (20216, 20269), False, 'from synapse.util.metrics import Measure\n')] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a class which acts as a wrapper around the PPR algorithm.
This class has the following functionality:
1. Load the KB graph,
2. Given list of seed entities, get topk entities from PPR.
3. Get unique facts between all extracted entities.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from fat.fat_bert_nq.ppr.apr_algo import csr_personalized_pagerank
from fat.fat_bert_nq.ppr.apr_algo import csr_topk_fact_extractor
from fat.fat_bert_nq.ppr.kb_csr_io import CsrData
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_bool(
'verbose_logging', False,
'If true, all of the warnings related to data processing will be printed. '
'A number of warnings are expected for a normal NQ evaluation.')
class ApproximatePageRank(object):
"""APR main lib which is used to wrap functions around ppr algo."""
def __init__(self):
self.data = CsrData()
self.data.load_csr_data(
full_wiki=FLAGS.full_wiki, files_dir=FLAGS.apr_files_dir)
def get_topk_extracted_ent(self, seeds, alpha, topk):
"""Extract topk entities given seeds.
Args:
seeds: An Ex1 vector with weight on every seed entity
alpha: probability for PPR
topk: max top entities to extract
Returns:
extracted_ents: list of selected entities
extracted_scores: list of scores of selected entities
"""
ppr_scores = csr_personalized_pagerank(seeds, self.data.adj_mat_t_csr,
alpha)
sorted_idx = np.argsort(ppr_scores)[::-1]
extracted_ents = sorted_idx[:topk]
extracted_scores = ppr_scores[sorted_idx[:topk]]
# Check for really low values
# Get idx of First value < 1e-6, limit extracted ents till there
zero_idx = np.where(ppr_scores[extracted_ents] < 1e-6)[0]
if zero_idx.shape[0] > 0:
extracted_ents = extracted_ents[:zero_idx[0]]
return extracted_ents, extracted_scores
def get_facts(self, entities, topk, alpha, seed_weighting=True):
"""Get subgraph describing a neighbourhood around given entities.
Args:
entities: A list of Wikidata entities
topk: Max entities to extract from PPR
alpha: Node probability for PPR
seed_weighting: Boolean for performing weighting seeds by freq in passage
Returns:
unique_facts: A list of unique facts around the seeds.
"""
if FLAGS.verbose_logging:
tf.logging.info('Getting subgraph')
entity_ids = [
int(self.data.ent2id[x]) for x in entities if x in self.data.ent2id
]
if FLAGS.verbose_logging:
tf.logging.info(
str([self.data.entity_names['e'][str(x)]['name'] for x in entity_ids
]))
freq_dict = {x: entity_ids.count(x) for x in entity_ids}
seed = np.zeros((self.data.adj_mat.shape[0], 1))
if not seed_weighting:
seed[entity_ids] = 1. / len(set(entity_ids))
else:
for x, y in freq_dict.items():
seed[x] = y
seed = seed / seed.sum()
extracted_ents, extracted_scores = self.get_topk_extracted_ent(
seed, alpha, topk)
if FLAGS.verbose_logging:
tf.logging.info('Extracted ents: ')
tf.logging.info(
str([
self.data.entity_names['e'][str(x)]['name']
for x in extracted_ents
]))
facts = csr_topk_fact_extractor(self.data.adj_mat_t_csr, self.data.rel_dict,
freq_dict, self.data.entity_names,
extracted_ents, extracted_scores)
if FLAGS.verbose_logging:
tf.logging.info('Extracted facts: ')
tf.logging.info(str(facts))
# Extract 1 unique fact per pair of entities (fact with highest score)
# Sort by scores
unique_facts = {}
for (sub, obj, rel, score) in facts:
fwd_dir = (sub, obj)
rev_dir = (obj, sub)
if fwd_dir in unique_facts and score > unique_facts[fwd_dir][1]:
unique_facts[fwd_dir] = (rel, score)
elif rev_dir in unique_facts and score > unique_facts[rev_dir][1]:
unique_facts[fwd_dir] = (rel, score)
del unique_facts[rev_dir] # Remove existing entity pair
else:
unique_facts[(sub, obj)] = (rel, score)
unique_facts = list(unique_facts.items())
return unique_facts
| [
"fat.fat_bert_nq.ppr.kb_csr_io.CsrData",
"numpy.where",
"tensorflow.logging.info",
"numpy.argsort",
"numpy.zeros",
"fat.fat_bert_nq.ppr.apr_algo.csr_personalized_pagerank",
"fat.fat_bert_nq.ppr.apr_algo.csr_topk_fact_extractor"
] | [((1590, 1599), 'fat.fat_bert_nq.ppr.kb_csr_io.CsrData', 'CsrData', ([], {}), '()\n', (1597, 1599), False, 'from fat.fat_bert_nq.ppr.kb_csr_io import CsrData\n'), ((2084, 2148), 'fat.fat_bert_nq.ppr.apr_algo.csr_personalized_pagerank', 'csr_personalized_pagerank', (['seeds', 'self.data.adj_mat_t_csr', 'alpha'], {}), '(seeds, self.data.adj_mat_t_csr, alpha)\n', (2109, 2148), False, 'from fat.fat_bert_nq.ppr.apr_algo import csr_personalized_pagerank\n'), ((3459, 3500), 'numpy.zeros', 'np.zeros', (['(self.data.adj_mat.shape[0], 1)'], {}), '((self.data.adj_mat.shape[0], 1))\n', (3467, 3500), True, 'import numpy as np\n'), ((4007, 4148), 'fat.fat_bert_nq.ppr.apr_algo.csr_topk_fact_extractor', 'csr_topk_fact_extractor', (['self.data.adj_mat_t_csr', 'self.data.rel_dict', 'freq_dict', 'self.data.entity_names', 'extracted_ents', 'extracted_scores'], {}), '(self.data.adj_mat_t_csr, self.data.rel_dict,\n freq_dict, self.data.entity_names, extracted_ents, extracted_scores)\n', (4030, 4148), False, 'from fat.fat_bert_nq.ppr.apr_algo import csr_topk_fact_extractor\n'), ((2209, 2231), 'numpy.argsort', 'np.argsort', (['ppr_scores'], {}), '(ppr_scores)\n', (2219, 2231), True, 'import numpy as np\n'), ((2449, 2493), 'numpy.where', 'np.where', (['(ppr_scores[extracted_ents] < 1e-06)'], {}), '(ppr_scores[extracted_ents] < 1e-06)\n', (2457, 2493), True, 'import numpy as np\n'), ((3099, 3134), 'tensorflow.logging.info', 'tf.logging.info', (['"""Getting subgraph"""'], {}), "('Getting subgraph')\n", (3114, 3134), True, 'import tensorflow as tf\n'), ((3809, 3844), 'tensorflow.logging.info', 'tf.logging.info', (['"""Extracted ents: """'], {}), "('Extracted ents: ')\n", (3824, 3844), True, 'import tensorflow as tf\n'), ((4253, 4289), 'tensorflow.logging.info', 'tf.logging.info', (['"""Extracted facts: """'], {}), "('Extracted facts: ')\n", (4268, 4289), True, 'import tensorflow as tf\n')] |
import warnings
from collections import OrderedDict
from distutils.version import LooseVersion
from functools import partial
from inspect import isclass
from typing import Callable, Optional, Dict, Union
import numpy as np
import torch
import tqdm
from torch import Tensor, nn
from torch.nn import functional as F
from adv_lib.distances.lp_norms import l0_distances, l1_distances, l2_distances, linf_distances
from adv_lib.utils import ForwardCounter, BackwardCounter, predict_inputs
def generate_random_targets(labels: Tensor, num_classes: int) -> Tensor:
"""
Generates one random target in (num_classes - 1) possibilities for each label that is different from the original
label.
Parameters
----------
labels: Tensor
Original labels. Generated targets will be different from labels.
num_classes: int
Number of classes to generate the random targets from.
Returns
-------
targets: Tensor
Random target for each label. Has the same shape as labels.
"""
random = torch.rand(len(labels), num_classes, device=labels.device, dtype=torch.float)
random.scatter_(1, labels.unsqueeze(-1), 0)
return random.argmax(1)
def get_all_targets(labels: Tensor, num_classes: int):
"""
Generates all possible targets that are different from the original labels.
Parameters
----------
labels: Tensor
Original labels. Generated targets will be different from labels.
num_classes: int
Number of classes to generate the random targets from.
Returns
-------
targets: Tensor
Random targets for each label. shape: (len(labels), num_classes - 1).
"""
all_possible_targets = torch.zeros(len(labels), num_classes - 1, dtype=torch.long)
all_classes = set(range(num_classes))
for i in range(len(labels)):
this_label = labels[i].item()
other_labels = list(all_classes.difference({this_label}))
all_possible_targets[i] = torch.tensor(other_labels)
return all_possible_targets
def run_attack(model: nn.Module,
inputs: Tensor,
labels: Tensor,
attack: Callable,
targets: Optional[Tensor] = None,
batch_size: Optional[int] = None) -> dict:
device = next(model.parameters()).device
to_device = lambda tensor: tensor.to(device)
targeted, adv_labels = False, labels
if targets is not None:
targeted, adv_labels = True, targets
batch_size = batch_size or len(inputs)
# run attack only on non already adversarial samples
already_adv = []
chunks = [tensor.split(batch_size) for tensor in [inputs, adv_labels]]
for (inputs_chunk, label_chunk) in zip(*chunks):
batch_chunk_d, label_chunk_d = [to_device(tensor) for tensor in [inputs_chunk, label_chunk]]
preds = model(batch_chunk_d).argmax(1)
is_adv = (preds == label_chunk_d) if targeted else (preds != label_chunk_d)
already_adv.append(is_adv.cpu())
not_adv = ~torch.cat(already_adv, 0)
start, end = torch.cuda.Event(enable_timing=True), torch.cuda.Event(enable_timing=True)
forward_counter, backward_counter = ForwardCounter(), BackwardCounter()
model.register_forward_pre_hook(forward_counter)
if LooseVersion(torch.__version__) >= LooseVersion('1.8'):
model.register_full_backward_hook(backward_counter)
else:
model.register_backward_hook(backward_counter)
average_forwards, average_backwards = [], [] # number of forward and backward calls per sample
advs_chunks = []
chunks = [tensor.split(batch_size) for tensor in [inputs[not_adv], adv_labels[not_adv]]]
total_time = 0
for (inputs_chunk, label_chunk) in tqdm.tqdm(zip(*chunks), ncols=80, total=len(chunks[0])):
batch_chunk_d, label_chunk_d = [to_device(tensor.clone()) for tensor in [inputs_chunk, label_chunk]]
start.record()
advs_chunk_d = attack(model, batch_chunk_d, label_chunk_d, targeted=targeted)
# performance monitoring
end.record()
torch.cuda.synchronize()
total_time += (start.elapsed_time(end)) / 1000 # times for cuda Events are in milliseconds
average_forwards.append(forward_counter.num_samples_called / len(batch_chunk_d))
average_backwards.append(backward_counter.num_samples_called / len(batch_chunk_d))
forward_counter.reset(), backward_counter.reset()
advs_chunks.append(advs_chunk_d.cpu())
if isinstance(attack, partial) and (callback := attack.keywords.get('callback')) is not None:
callback.reset_windows()
adv_inputs = inputs.clone()
adv_inputs[not_adv] = torch.cat(advs_chunks, 0)
data = {
'inputs': inputs,
'labels': labels,
'targets': adv_labels if targeted else None,
'adv_inputs': adv_inputs,
'time': total_time,
'num_forwards': sum(average_forwards) / len(chunks[0]),
'num_backwards': sum(average_backwards) / len(chunks[0]),
}
return data
_default_metrics = OrderedDict([
('linf', linf_distances),
('l0', l0_distances),
('l1', l1_distances),
('l2', l2_distances),
])
def compute_attack_metrics(model: nn.Module,
attack_data: Dict[str, Union[Tensor, float]],
batch_size: Optional[int] = None,
metrics: Dict[str, Callable] = _default_metrics) -> Dict[str, Union[Tensor, float]]:
inputs, labels, targets, adv_inputs = map(attack_data.get, ['inputs', 'labels', 'targets', 'adv_inputs'])
if adv_inputs.min() < 0 or adv_inputs.max() > 1:
warnings.warn('Values of produced adversarials are not in the [0, 1] range -> Clipping to [0, 1].')
adv_inputs.clamp_(min=0, max=1)
device = next(model.parameters()).device
to_device = lambda tensor: tensor.to(device)
batch_size = batch_size or len(inputs)
chunks = [tensor.split(batch_size) for tensor in [inputs, labels, adv_inputs]]
all_predictions = [[] for _ in range(6)]
distances = {k: [] for k in metrics.keys()}
metrics = {k: v().to(device) if (isclass(v.func) if isinstance(v, partial) else False) else v for k, v in
metrics.items()}
append = lambda list, data: list.append(data.cpu())
for inputs_chunk, labels_chunk, adv_chunk in zip(*chunks):
inputs_chunk, adv_chunk = map(to_device, [inputs_chunk, adv_chunk])
clean_preds, adv_preds = [predict_inputs(model, chunk.to(device)) for chunk in [inputs_chunk, adv_chunk]]
list(map(append, all_predictions, [*clean_preds, *adv_preds]))
for metric, metric_func in metrics.items():
distances[metric].append(metric_func(adv_chunk, inputs_chunk).detach().cpu())
logits, probs, preds, logits_adv, probs_adv, preds_adv = [torch.cat(l) for l in all_predictions]
for metric in metrics.keys():
distances[metric] = torch.cat(distances[metric], 0)
accuracy_orig = (preds == labels).float().mean().item()
if targets is not None:
success = (preds_adv == targets)
labels = targets
else:
success = (preds_adv != labels)
prob_orig = probs.gather(1, labels.unsqueeze(1)).squeeze(1)
prob_adv = probs_adv.gather(1, labels.unsqueeze(1)).squeeze(1)
labels_infhot = torch.zeros_like(logits_adv).scatter_(1, labels.unsqueeze(1), float('inf'))
real = logits_adv.gather(1, labels.unsqueeze(1)).squeeze(1)
other = (logits_adv - labels_infhot).max(1).values
diff_vs_max_adv = (real - other)
nll = F.cross_entropy(logits, labels, reduction='none')
nll_adv = F.cross_entropy(logits_adv, labels, reduction='none')
data = {
'time': attack_data['time'],
'num_forwards': attack_data['num_forwards'],
'num_backwards': attack_data['num_backwards'],
'targeted': targets is not None,
'preds': preds,
'adv_preds': preds_adv,
'accuracy_orig': accuracy_orig,
'success': success,
'probs_orig': prob_orig,
'probs_adv': prob_adv,
'logit_diff_adv': diff_vs_max_adv,
'nll': nll,
'nll_adv': nll_adv,
'distances': distances,
}
return data
def print_metrics(metrics: dict) -> None:
np.set_printoptions(formatter={'float': '{:0.3f}'.format}, threshold=16, edgeitems=3,
linewidth=120) # To print arrays with less precision
print('Original accuracy: {:.2%}'.format(metrics['accuracy_orig']))
print('Attack done in: {:.2f}s with {:.4g} forwards and {:.4g} backwards.'.format(
metrics['time'], metrics['num_forwards'], metrics['num_backwards']))
success = metrics['success'].numpy()
fail = bool(success.mean() != 1)
print('Attack success: {:.2%}'.format(success.mean()) + fail * ' - {}'.format(success))
for distance, values in metrics['distances'].items():
data = values.numpy()
print('{}: {} - Average: {:.3f} - Median: {:.3f}'.format(distance, data, data.mean(), np.median(data)) +
fail * ' | Avg over success: {:.3f}'.format(data[success].mean()))
attack_type = 'targets' if metrics['targeted'] else 'correct'
print('Logit({} class) - max_Logit(other classes): {} - Average: {:.2f}'.format(
attack_type, metrics['logit_diff_adv'].numpy(), metrics['logit_diff_adv'].numpy().mean()))
print('NLL of target/pred class: {:.3f}'.format(metrics['nll_adv'].numpy().mean()))
| [
"torch.cuda.Event",
"collections.OrderedDict",
"numpy.median",
"inspect.isclass",
"torch.cuda.synchronize",
"torch.tensor",
"torch.zeros_like",
"adv_lib.utils.ForwardCounter",
"torch.nn.functional.cross_entropy",
"adv_lib.utils.BackwardCounter",
"distutils.version.LooseVersion",
"warnings.warn",
"torch.cat",
"numpy.set_printoptions"
] | [((5060, 5169), 'collections.OrderedDict', 'OrderedDict', (["[('linf', linf_distances), ('l0', l0_distances), ('l1', l1_distances), (\n 'l2', l2_distances)]"], {}), "([('linf', linf_distances), ('l0', l0_distances), ('l1',\n l1_distances), ('l2', l2_distances)])\n", (5071, 5169), False, 'from collections import OrderedDict\n'), ((4679, 4704), 'torch.cat', 'torch.cat', (['advs_chunks', '(0)'], {}), '(advs_chunks, 0)\n', (4688, 4704), False, 'import torch\n'), ((7563, 7612), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'labels'], {'reduction': '"""none"""'}), "(logits, labels, reduction='none')\n", (7578, 7612), True, 'from torch.nn import functional as F\n'), ((7627, 7680), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits_adv', 'labels'], {'reduction': '"""none"""'}), "(logits_adv, labels, reduction='none')\n", (7642, 7680), True, 'from torch.nn import functional as F\n'), ((8263, 8367), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'formatter': "{'float': '{:0.3f}'.format}", 'threshold': '(16)', 'edgeitems': '(3)', 'linewidth': '(120)'}), "(formatter={'float': '{:0.3f}'.format}, threshold=16,\n edgeitems=3, linewidth=120)\n", (8282, 8367), True, 'import numpy as np\n'), ((1980, 2006), 'torch.tensor', 'torch.tensor', (['other_labels'], {}), '(other_labels)\n', (1992, 2006), False, 'import torch\n'), ((3023, 3048), 'torch.cat', 'torch.cat', (['already_adv', '(0)'], {}), '(already_adv, 0)\n', (3032, 3048), False, 'import torch\n'), ((3067, 3103), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (3083, 3103), False, 'import torch\n'), ((3105, 3141), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (3121, 3141), False, 'import torch\n'), ((3182, 3198), 'adv_lib.utils.ForwardCounter', 'ForwardCounter', ([], {}), '()\n', (3196, 3198), False, 'from adv_lib.utils import ForwardCounter, BackwardCounter, predict_inputs\n'), ((3200, 3217), 'adv_lib.utils.BackwardCounter', 'BackwardCounter', ([], {}), '()\n', (3215, 3217), False, 'from adv_lib.utils import ForwardCounter, BackwardCounter, predict_inputs\n'), ((3278, 3309), 'distutils.version.LooseVersion', 'LooseVersion', (['torch.__version__'], {}), '(torch.__version__)\n', (3290, 3309), False, 'from distutils.version import LooseVersion\n'), ((3313, 3332), 'distutils.version.LooseVersion', 'LooseVersion', (['"""1.8"""'], {}), "('1.8')\n", (3325, 3332), False, 'from distutils.version import LooseVersion\n'), ((4070, 4094), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (4092, 4094), False, 'import torch\n'), ((5649, 5758), 'warnings.warn', 'warnings.warn', (['"""Values of produced adversarials are not in the [0, 1] range -> Clipping to [0, 1]."""'], {}), "(\n 'Values of produced adversarials are not in the [0, 1] range -> Clipping to [0, 1].'\n )\n", (5662, 5758), False, 'import warnings\n'), ((6831, 6843), 'torch.cat', 'torch.cat', (['l'], {}), '(l)\n', (6840, 6843), False, 'import torch\n'), ((6932, 6963), 'torch.cat', 'torch.cat', (['distances[metric]', '(0)'], {}), '(distances[metric], 0)\n', (6941, 6963), False, 'import torch\n'), ((7321, 7349), 'torch.zeros_like', 'torch.zeros_like', (['logits_adv'], {}), '(logits_adv)\n', (7337, 7349), False, 'import torch\n'), ((6140, 6155), 'inspect.isclass', 'isclass', (['v.func'], {}), '(v.func)\n', (6147, 6155), False, 'from inspect import isclass\n'), ((9015, 9030), 'numpy.median', 'np.median', (['data'], {}), '(data)\n', (9024, 9030), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Filename: polygons_cd
"""
introduction: compare two polygons in to shape file
authors: <NAME>
email:<EMAIL>
add time: 26 February, 2020
"""
import sys,os
from optparse import OptionParser
# added path of DeeplabforRS
sys.path.insert(0, os.path.expanduser('~/codes/PycharmProjects/DeeplabforRS'))
import basic_src.io_function as io_function
import basic_src.basic as basic
import basic_src.map_projection as map_projection
import parameters
import polygons_cd_multi
import polygons_cd
def main(options, args):
old_shp_path = args[0]
new_shp_path = args[1]
# check files do exist
assert io_function.is_file_exist(new_shp_path)
assert io_function.is_file_exist(old_shp_path)
# check projection of the shape file, should be the same
old_shp_proj4 = map_projection.get_raster_or_vector_srs_info_proj4(old_shp_path)
new_shp_proj4 = map_projection.get_raster_or_vector_srs_info_proj4(new_shp_path)
if old_shp_proj4 != new_shp_proj4:
raise ValueError('error, projection insistence between %s and %s' % (old_shp_proj4, new_shp_proj4))
main_shp_name = polygons_cd_multi.get_main_shp_name(old_shp_path,new_shp_path)
# conduct change detection
if options.output is not None:
main_shp_name = options.output
# get expanding and shrinking parts
output_path_expand = 'expand_' + main_shp_name
output_path_shrink = 'shrink_' + main_shp_name
polygons_cd.polygons_change_detection(old_shp_path, new_shp_path, output_path_expand,output_path_shrink)
if __name__ == "__main__":
usage = "usage: %prog [options] old_shape_file new_shape_file "
parser = OptionParser(usage=usage, version="1.0 2020-02-26")
parser.description = 'Introduction: compare two groups of polygons '
parser.add_option("-p", "--para",
action="store", dest="para_file",
help="the parameters file")
parser.add_option('-o', '--output',
action="store", dest = 'output',
help='the path to save the change detection results')
(options, args) = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(2)
# # set parameters files
# if options.para_file is None:
# print('error, no parameters file')
# parser.print_help()
# sys.exit(2)
# else:
# parameters.set_saved_parafile_path(options.para_file)
basic.setlogfile('polygons_changeDetection.log')
main(options, args)
| [
"sys.exit",
"basic_src.io_function.is_file_exist",
"polygons_cd_multi.get_main_shp_name",
"optparse.OptionParser",
"basic_src.basic.setlogfile",
"polygons_cd.polygons_change_detection",
"basic_src.map_projection.get_raster_or_vector_srs_info_proj4",
"os.path.expanduser"
] | [((264, 322), 'os.path.expanduser', 'os.path.expanduser', (['"""~/codes/PycharmProjects/DeeplabforRS"""'], {}), "('~/codes/PycharmProjects/DeeplabforRS')\n", (282, 322), False, 'import sys, os\n'), ((633, 672), 'basic_src.io_function.is_file_exist', 'io_function.is_file_exist', (['new_shp_path'], {}), '(new_shp_path)\n', (658, 672), True, 'import basic_src.io_function as io_function\n'), ((684, 723), 'basic_src.io_function.is_file_exist', 'io_function.is_file_exist', (['old_shp_path'], {}), '(old_shp_path)\n', (709, 723), True, 'import basic_src.io_function as io_function\n'), ((806, 870), 'basic_src.map_projection.get_raster_or_vector_srs_info_proj4', 'map_projection.get_raster_or_vector_srs_info_proj4', (['old_shp_path'], {}), '(old_shp_path)\n', (856, 870), True, 'import basic_src.map_projection as map_projection\n'), ((891, 955), 'basic_src.map_projection.get_raster_or_vector_srs_info_proj4', 'map_projection.get_raster_or_vector_srs_info_proj4', (['new_shp_path'], {}), '(new_shp_path)\n', (941, 955), True, 'import basic_src.map_projection as map_projection\n'), ((1125, 1188), 'polygons_cd_multi.get_main_shp_name', 'polygons_cd_multi.get_main_shp_name', (['old_shp_path', 'new_shp_path'], {}), '(old_shp_path, new_shp_path)\n', (1160, 1188), False, 'import polygons_cd_multi\n'), ((1441, 1550), 'polygons_cd.polygons_change_detection', 'polygons_cd.polygons_change_detection', (['old_shp_path', 'new_shp_path', 'output_path_expand', 'output_path_shrink'], {}), '(old_shp_path, new_shp_path,\n output_path_expand, output_path_shrink)\n', (1478, 1550), False, 'import polygons_cd\n'), ((1657, 1708), 'optparse.OptionParser', 'OptionParser', ([], {'usage': 'usage', 'version': '"""1.0 2020-02-26"""'}), "(usage=usage, version='1.0 2020-02-26')\n", (1669, 1708), False, 'from optparse import OptionParser\n'), ((2460, 2508), 'basic_src.basic.setlogfile', 'basic.setlogfile', (['"""polygons_changeDetection.log"""'], {}), "('polygons_changeDetection.log')\n", (2476, 2508), True, 'import basic_src.basic as basic\n'), ((2204, 2215), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (2212, 2215), False, 'import sys, os\n')] |
from django_cron import CronJobBase, Schedule
from .models import Link
from django.utils import timezone
class MyCronJob(CronJobBase):
RUN_EVERY_MINS = 1 # every 2 hours
schedule = Schedule(run_every_mins=RUN_EVERY_MINS)
code = 'basicapp.cron' # a unique code
def do(self):
current_time = timezone.now()
links = Link.objects.all()
for obj in links:
print("Checking last hit date for: ", obj.shortenURL)
delta = current_time - obj.last_hit
if delta.days > 2:
print('link is older than 2 days, DELETING!')
obj.delete()
else:
print('link was recently hit, Wont Delete.')
| [
"django.utils.timezone.now",
"django_cron.Schedule"
] | [((191, 230), 'django_cron.Schedule', 'Schedule', ([], {'run_every_mins': 'RUN_EVERY_MINS'}), '(run_every_mins=RUN_EVERY_MINS)\n', (199, 230), False, 'from django_cron import CronJobBase, Schedule\n'), ((319, 333), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (331, 333), False, 'from django.utils import timezone\n')] |
from django.shortcuts import render
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from sesame import utils
from django.core.mail import send_mail
def login_page(request):
if request.method == "POST":
email = request.POST.get("emailId")
user = User.objects.get(email=email)
login_token = utils.get_query_string(user)
login_link = "http://127.0.0.1:8000/customers/{}".format(login_token)
html_message = """
<p>Hi there,</p>
<p>Here is your <a href="{}">magic link</a> </p>
<p>Thanks,</p>
<p>Django Admin</p>
""".format(login_link)
send_mail(
'Django Magic Link',
html_message,
'<EMAIL>',
[email],
fail_silently=False,
html_message = html_message
)
return render(request, "login.html", context={"message":"Please check your email for magic link."})
return render(request, "login.html")
@login_required
def customers_home_page(request):
return render(request, "customers/index.html") | [
"django.shortcuts.render",
"django.contrib.auth.models.User.objects.get",
"sesame.utils.get_query_string",
"django.core.mail.send_mail"
] | [((998, 1027), 'django.shortcuts.render', 'render', (['request', '"""login.html"""'], {}), "(request, 'login.html')\n", (1004, 1027), False, 'from django.shortcuts import render\n'), ((1090, 1129), 'django.shortcuts.render', 'render', (['request', '"""customers/index.html"""'], {}), "(request, 'customers/index.html')\n", (1096, 1129), False, 'from django.shortcuts import render\n'), ((321, 350), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'email': 'email'}), '(email=email)\n', (337, 350), False, 'from django.contrib.auth.models import User\n'), ((373, 401), 'sesame.utils.get_query_string', 'utils.get_query_string', (['user'], {}), '(user)\n', (395, 401), False, 'from sesame import utils\n'), ((681, 797), 'django.core.mail.send_mail', 'send_mail', (['"""Django Magic Link"""', 'html_message', '"""<EMAIL>"""', '[email]'], {'fail_silently': '(False)', 'html_message': 'html_message'}), "('Django Magic Link', html_message, '<EMAIL>', [email],\n fail_silently=False, html_message=html_message)\n", (690, 797), False, 'from django.core.mail import send_mail\n'), ((893, 990), 'django.shortcuts.render', 'render', (['request', '"""login.html"""'], {'context': "{'message': 'Please check your email for magic link.'}"}), "(request, 'login.html', context={'message':\n 'Please check your email for magic link.'})\n", (899, 990), False, 'from django.shortcuts import render\n')] |
from .custom_check import CustomCheck, CustomCheckError
from typing import Any, List
import logging
logger = logging.getLogger(__name__)
class DSSParameterError(Exception):
"""Exception raised when at least one CustomCheck fails."""
pass
class DSSParameter:
"""Object related to one parameter. It is mainly used for checks to run in backend for custom forms.
Attributes:
name(str): Name of the parameter
value(Any): Value of the parameter
checks(list[dict], optional): Checks to run on provided value
required(bool, optional): Whether the value can be None
"""
def __init__(
self, name: str, value: Any, checks: List[dict] = None, required: bool = False
):
"""Initialization method for the DSSParameter class
Args:
name(str): Name of the parameter
value(Any): Value of the parameter
checks(list[dict], optional): Checks to run on provided value
required(bool, optional): Whether the value can be None
"""
if checks is None:
checks = []
self.name = name
self.value = value
self.checks = [CustomCheck(**check) for check in checks]
if required:
self.checks.append(CustomCheck(type="exists"))
self.run_checks()
def run_checks(self):
"""Runs all checks provided for this parameter"""
errors = []
for check in self.checks:
try:
check.run(self.value)
except CustomCheckError as err:
errors.append(err)
if errors:
self.handle_failure(errors)
self.handle_success()
def handle_failure(self, errors: List[CustomCheckError]):
"""Is called when at least one test fails. It will raise an Exception with understandable text
Args:
errors(list[CustomCheckError]: Errors met when running checks
Raises:
DSSParameterError: Raises if at least on check fails
"""
raise DSSParameterError(self.format_failure_message(errors))
def format_failure_message(self, errors: List[CustomCheckError]) -> str:
"""Format failure text
Args:
errors(list[CustomCheckError]: Errors met when running checks
Returns:
str: Formatted error message
"""
return """
Error for parameter \"{name}\" :
{errors}
""".format(
name=self.name, errors="\n".join(["\t {}".format(e) for e in errors])
)
def handle_success(self):
"""Called if all checks are successful. Prints a success message"""
self.print_success_message()
def print_success_message(self):
"""Formats the succee message"""
logger.info("All checks have been successfully done for {}.".format(self.name))
def __repr__(self):
return "DSSParameter(name={}, value={})".format(self.name, self.value)
def __str__(self):
return "DSSParameter(name={}, value={})".format(self.name, self.value)
| [
"logging.getLogger"
] | [((111, 138), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (128, 138), False, 'import logging\n')] |
#!/usr/bin/env python3
import os
import os.path
import shutil
import subprocess
import sys
import tempfile
import uuid
import mgm_utils
def main():
(root_dir, input_file, json_file) = sys.argv[1:4]
tmpName = str(uuid.uuid4())
tmpdir = "/tmp"
temp_input_file = f"{tmpdir}/{tmpName}.dat"
temp_output_file = f"{tmpdir}/{tmpName}.json"
shutil.copy(input_file, temp_input_file)
sif = mgm_utils.get_sif_dir(root_dir) + "/ina_segmentation.sif"
r = subprocess.run(["singularity", "run", sif, temp_input_file, temp_output_file])
shutil.copy(temp_output_file, json_file)
if os.path.exists(temp_input_file):
os.remove(temp_input_file)
if os.path.exists(temp_output_file):
os.remove(temp_output_file)
exit(r.returncode)
if __name__ == "__main__":
main()
| [
"os.path.exists",
"subprocess.run",
"uuid.uuid4",
"os.remove",
"shutil.copy",
"mgm_utils.get_sif_dir"
] | [((341, 381), 'shutil.copy', 'shutil.copy', (['input_file', 'temp_input_file'], {}), '(input_file, temp_input_file)\n', (352, 381), False, 'import shutil\n'), ((453, 531), 'subprocess.run', 'subprocess.run', (["['singularity', 'run', sif, temp_input_file, temp_output_file]"], {}), "(['singularity', 'run', sif, temp_input_file, temp_output_file])\n", (467, 531), False, 'import subprocess\n'), ((535, 575), 'shutil.copy', 'shutil.copy', (['temp_output_file', 'json_file'], {}), '(temp_output_file, json_file)\n', (546, 575), False, 'import shutil\n'), ((581, 612), 'os.path.exists', 'os.path.exists', (['temp_input_file'], {}), '(temp_input_file)\n', (595, 612), False, 'import os\n'), ((648, 680), 'os.path.exists', 'os.path.exists', (['temp_output_file'], {}), '(temp_output_file)\n', (662, 680), False, 'import os\n'), ((217, 229), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (227, 229), False, 'import uuid\n'), ((390, 421), 'mgm_utils.get_sif_dir', 'mgm_utils.get_sif_dir', (['root_dir'], {}), '(root_dir)\n', (411, 421), False, 'import mgm_utils\n'), ((616, 642), 'os.remove', 'os.remove', (['temp_input_file'], {}), '(temp_input_file)\n', (625, 642), False, 'import os\n'), ((684, 711), 'os.remove', 'os.remove', (['temp_output_file'], {}), '(temp_output_file)\n', (693, 711), False, 'import os\n')] |
from concurrent.futures import TimeoutError
from google.cloud import pubsub_v1
project_id = "pubsub-testing-331300"
subscription_id = "test-sub"
# Number of seconds the subscriber should listen for messages
timeout = 5.0
subscriber = pubsub_v1.SubscriberClient()
# The `subscription_path` method creates a fully qualified identifier
# in the form `projects/{project_id}/subscriptions/{subscription_id}`
subscription_path = subscriber.subscription_path(project_id, subscription_id)
def callback(message: pubsub_v1.subscriber.message.Message) -> None:
print(f"Received {message}.")
message.ack()
streaming_pull_future = subscriber.subscribe(subscription_path, callback=callback)
print(f"Listening for messages on {subscription_path}..\n")
# Wrap subscriber in a 'with' block to automatically call close() when done.
with subscriber:
try:
# When `timeout` is not set, result() will block indefinitely,
# unless an exception is encountered first.
streaming_pull_future.result(timeout=timeout)
except TimeoutError:
streaming_pull_future.cancel() # Trigger the shutdown.
streaming_pull_future.result() # Block until the shutdown is complete.
| [
"google.cloud.pubsub_v1.SubscriberClient"
] | [((236, 264), 'google.cloud.pubsub_v1.SubscriberClient', 'pubsub_v1.SubscriberClient', ([], {}), '()\n', (262, 264), False, 'from google.cloud import pubsub_v1\n')] |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
class ResConfigSettings(models.TransientModel):
_inherit = "res.config.settings"
google_drive_authorization_code = fields.Char(string='Authorization Code', config_parameter='google_drive_authorization_code')
google_drive_uri = fields.Char(compute='_compute_drive_uri', string='URI', help="The URL to generate the authorization code from Google")
is_google_drive_token_generated = fields.Boolean(string='Refresh Token Generated')
@api.depends('google_drive_authorization_code')
def _compute_drive_uri(self):
google_drive_uri = self.env['google.service']._get_google_token_uri('drive', scope=self.env['google.drive.config'].get_google_scope())
for config in self:
config.google_drive_uri = google_drive_uri
def get_values(self):
res = super(ResConfigSettings, self).get_values()
refresh_token = self.env['ir.config_parameter'].sudo().get_param('google_drive_refresh_token', False)
res.update(is_google_drive_token_generated=bool(refresh_token))
return res
def confirm_setup_token(self):
params = self.env['ir.config_parameter'].sudo()
authorization_code_before = params.get_param('google_drive_authorization_code')
authorization_code = self.google_drive_authorization_code
if authorization_code != authorization_code_before:
refresh_token = (
self.env['google.service'].generate_refresh_token('drive', authorization_code)
if authorization_code else False
)
params.set_param('google_drive_refresh_token', refresh_token)
def action_setup_token(self):
self.ensure_one()
template = self.env.ref('google_drive.google_drive_auth_code_wizard')
return {
'name': _('Set up refresh token'),
'type': 'ir.actions.act_window',
'res_model': 'res.config.settings',
'views': [(template.id, 'form')],
'target': 'new',
}
| [
"odoo.api.depends",
"odoo._",
"odoo.fields.Char",
"odoo.fields.Boolean"
] | [((266, 363), 'odoo.fields.Char', 'fields.Char', ([], {'string': '"""Authorization Code"""', 'config_parameter': '"""google_drive_authorization_code"""'}), "(string='Authorization Code', config_parameter=\n 'google_drive_authorization_code')\n", (277, 363), False, 'from odoo import api, fields, models, _\n'), ((382, 505), 'odoo.fields.Char', 'fields.Char', ([], {'compute': '"""_compute_drive_uri"""', 'string': '"""URI"""', 'help': '"""The URL to generate the authorization code from Google"""'}), "(compute='_compute_drive_uri', string='URI', help=\n 'The URL to generate the authorization code from Google')\n", (393, 505), False, 'from odoo import api, fields, models, _\n'), ((539, 587), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Refresh Token Generated"""'}), "(string='Refresh Token Generated')\n", (553, 587), False, 'from odoo import api, fields, models, _\n'), ((594, 640), 'odoo.api.depends', 'api.depends', (['"""google_drive_authorization_code"""'], {}), "('google_drive_authorization_code')\n", (605, 640), False, 'from odoo import api, fields, models, _\n'), ((1931, 1956), 'odoo._', '_', (['"""Set up refresh token"""'], {}), "('Set up refresh token')\n", (1932, 1956), False, 'from odoo import api, fields, models, _\n')] |
from dataclasses import dataclass
from typing import List
from greendoge.types.condition_opcodes import ConditionOpcode
from greendoge.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class ConditionWithArgs(Streamable):
"""
This structure is used to store parsed CLVM conditions
Conditions in CLVM have either format of (opcode, var1) or (opcode, var1, var2)
"""
opcode: ConditionOpcode
vars: List[bytes]
| [
"dataclasses.dataclass"
] | [((185, 207), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (194, 207), False, 'from dataclasses import dataclass\n')] |
"""The nexia integration base entity."""
from aiopvapi.resources.shade import ATTR_TYPE
from homeassistant.const import ATTR_MODEL, ATTR_SW_VERSION
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
DEVICE_FIRMWARE,
DEVICE_MAC_ADDRESS,
DEVICE_MODEL,
DEVICE_NAME,
DEVICE_SERIAL_NUMBER,
DOMAIN,
FIRMWARE,
FIRMWARE_BUILD,
FIRMWARE_REVISION,
FIRMWARE_SUB_REVISION,
MANUFACTURER,
)
class HDEntity(CoordinatorEntity):
"""Base class for hunter douglas entities."""
def __init__(self, coordinator, device_info, room_name, unique_id):
"""Initialize the entity."""
super().__init__(coordinator)
self._room_name = room_name
self._unique_id = unique_id
self._device_info = device_info
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def device_info(self) -> DeviceInfo:
"""Return the device_info of the device."""
firmware = self._device_info[DEVICE_FIRMWARE]
sw_version = f"{firmware[FIRMWARE_REVISION]}.{firmware[FIRMWARE_SUB_REVISION]}.{firmware[FIRMWARE_BUILD]}"
return DeviceInfo(
identifiers={(DOMAIN, self._device_info[DEVICE_SERIAL_NUMBER])},
connections={
(dr.CONNECTION_NETWORK_MAC, self._device_info[DEVICE_MAC_ADDRESS])
},
name=self._device_info[DEVICE_NAME],
suggested_area=self._room_name,
model=self._device_info[DEVICE_MODEL],
sw_version=sw_version,
manufacturer=MANUFACTURER,
)
class ShadeEntity(HDEntity):
"""Base class for hunter douglas shade entities."""
def __init__(self, coordinator, device_info, room_name, shade, shade_name):
"""Initialize the shade."""
super().__init__(coordinator, device_info, room_name, shade.id)
self._shade_name = shade_name
self._shade = shade
@property
def device_info(self) -> DeviceInfo:
"""Return the device_info of the device."""
device_info = DeviceInfo(
identifiers={(DOMAIN, self._shade.id)},
name=self._shade_name,
suggested_area=self._room_name,
manufacturer=MANUFACTURER,
model=str(self._shade.raw_data[ATTR_TYPE]),
via_device=(DOMAIN, self._device_info[DEVICE_SERIAL_NUMBER]),
)
for shade in self._shade.shade_types:
if shade.shade_type == device_info[ATTR_MODEL]:
device_info[ATTR_MODEL] = shade.description
break
if FIRMWARE not in self._shade.raw_data:
return device_info
firmware = self._shade.raw_data[FIRMWARE]
sw_version = f"{firmware[FIRMWARE_REVISION]}.{firmware[FIRMWARE_SUB_REVISION]}.{firmware[FIRMWARE_BUILD]}"
device_info[ATTR_SW_VERSION] = sw_version
return device_info
| [
"homeassistant.helpers.entity.DeviceInfo"
] | [((1314, 1646), 'homeassistant.helpers.entity.DeviceInfo', 'DeviceInfo', ([], {'identifiers': '{(DOMAIN, self._device_info[DEVICE_SERIAL_NUMBER])}', 'connections': '{(dr.CONNECTION_NETWORK_MAC, self._device_info[DEVICE_MAC_ADDRESS])}', 'name': 'self._device_info[DEVICE_NAME]', 'suggested_area': 'self._room_name', 'model': 'self._device_info[DEVICE_MODEL]', 'sw_version': 'sw_version', 'manufacturer': 'MANUFACTURER'}), '(identifiers={(DOMAIN, self._device_info[DEVICE_SERIAL_NUMBER])},\n connections={(dr.CONNECTION_NETWORK_MAC, self._device_info[\n DEVICE_MAC_ADDRESS])}, name=self._device_info[DEVICE_NAME],\n suggested_area=self._room_name, model=self._device_info[DEVICE_MODEL],\n sw_version=sw_version, manufacturer=MANUFACTURER)\n', (1324, 1646), False, 'from homeassistant.helpers.entity import DeviceInfo\n')] |
# Process the unix command line of the pipeline.
import argparse
from version import rubra_version
def get_cmdline_args():
return parser.parse_args()
parser = argparse.ArgumentParser(
description='A bioinformatics pipeline system.')
parser.add_argument(
'pipeline',
metavar='PIPELINE_FILE',
type=str,
help='Your Ruffus pipeline stages (a Python module)')
parser.add_argument(
'--config',
metavar='CONFIG_FILE',
type=str,
nargs='+',
required=True,
help='One or more configuration files (Python modules)')
parser.add_argument(
'--verbose',
type=int,
choices=(0, 1, 2),
required=False,
default=1,
help='Output verbosity level: 0 = quiet; 1 = normal; \
2 = chatty (default is 1)')
parser.add_argument(
'--style',
type=str,
choices=('print', 'run', 'flowchart', 'touchfiles'),
required=False,
default='print',
help='Pipeline behaviour: print; run; touchfiles; flowchart (default is print)')
parser.add_argument(
'--force',
metavar='TASKNAME',
type=str,
required=False,
default=[],
nargs='+',
help='tasks which are forced to be out of date regardless of timestamps')
parser.add_argument(
'--end',
metavar='TASKNAME',
type=str,
required=False,
help='end points (tasks) for the pipeline')
parser.add_argument(
'--rebuild',
type=str,
choices=('fromstart', 'fromend'),
required=False,
default='fromstart',
help='rebuild outputs by working back from end tasks or forwards \
from start tasks (default is fromstart)')
parser.add_argument(
'--version', action='version', version='%(prog)s ' + rubra_version)
| [
"argparse.ArgumentParser"
] | [((166, 238), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""A bioinformatics pipeline system."""'}), "(description='A bioinformatics pipeline system.')\n", (189, 238), False, 'import argparse\n')] |
import webbrowser
import config
from Generator import Generator
def main():
generator = Generator()
latitude, longitude = generator.getCoordinates()
webbrowser.open(config.api_request.format(latitude, longitude))
if __name__ == '__main__':
main()
| [
"Generator.Generator",
"config.api_request.format"
] | [((94, 105), 'Generator.Generator', 'Generator', ([], {}), '()\n', (103, 105), False, 'from Generator import Generator\n'), ((180, 226), 'config.api_request.format', 'config.api_request.format', (['latitude', 'longitude'], {}), '(latitude, longitude)\n', (205, 226), False, 'import config\n')] |
hiddenimports = ['sip', 'PyQt4.QtGui', 'PyQt4._qt']
from PyInstaller.hooks.hookutils import qt4_plugins_binaries
def hook(mod):
mod.binaries.extend(qt4_plugins_binaries('phonon_backend'))
return mod
| [
"PyInstaller.hooks.hookutils.qt4_plugins_binaries"
] | [((155, 193), 'PyInstaller.hooks.hookutils.qt4_plugins_binaries', 'qt4_plugins_binaries', (['"""phonon_backend"""'], {}), "('phonon_backend')\n", (175, 193), False, 'from PyInstaller.hooks.hookutils import qt4_plugins_binaries\n')] |
from PyTradier.base import BasePyTradier
from typing import Union
from datetime import datetime
class MarketData(BasePyTradier):
"""All Methods currently only support string API calls, no datetime, bools, etc
"""
def quotes(self, symbols: Union[str, list], greeks: bool = False) -> dict:
"""Get a list of symbols using a keyword lookup on the symbols description. Results are in descending order by average volume of the security. This can be used for simple search functions
:param symbols: Comma-delimited list of symbols (equity or option)
:type symbols: Union[str, list]
:param greeks: Add greeks and volatility information (option only), defaults to False
:type greeks: bool, optional
:return: quotes for requested symbols
:rtype: dict
"""
symbols = self._symbol_prep(symbols)
return self._get(
"/v1/markets/quotes",
params=self.create_params(locals()),
dict_args=("quotes", "quotes"),
)
def option_chain(
self,
symbol: str,
expiration: Union[str, datetime],
greeks: Union[str, bool] = "false",
) -> dict:
"""Get all quotes in an option chain. Greek and IV data is included courtesy of ORATS. Please check out their APIs for more in-depth options data.
:param symbol: Underlying symbol of the chain
:type symbol: str
:param expiration: Expiration for the chain
:type expiration: Union[str, datetime]
:param greeks: Add greeks and volatility information, defaults to "false"
:type greeks: Union[str, bool], optional
:return: Get all quotes in an option chain
:rtype: dict
"""
return self._get(
"/v1/markets/options/chains",
params=self.create_params(locals()),
dict_args=("options", "option"),
)
def option_strike(self, symbol: str, expiration: Union[str, datetime]) -> list:
"""Get an options strike prices for a specified expiration date.
:param symbol: Underlying symbol of the chain
:type symbol: str
:param expiration: Expiration for the chain
:type expiration: Union[str, datetime]
:return: [description]
:rtype: list
"""
return self._get(
"/v1/markets/options/strikes", params=self.create_params(locals())
)
def option_lookup(self, underlying: str) -> dict:
"""Get all options symbols for the given underlying. This will include additional option roots (ex. SPXW, RUTW) if applicable.
:param underlying: Underlying symbol of the chain
:type underlying: str
:return: dict {"rootSymbol": underlying, "options": [list of option symbols]}
:rtype: dict
"""
return self._get(
"/v1/markets/options/lookup", params=self.create_params(locals())
)
def option_expirations(
self,
symbol: str,
includeAllRoots: Union[str, bool] = "",
strikes: Union[str, bool] = "",
) -> list:
"""Get expiration dates for a particular underlying.
Note that some underlying securities use a different symbol for their weekly options (RUT/RUTW, SPX/SPXW). To make sure you see all expirations, make sure to send the includeAllRoots parameter. This will also ensure any unique options due to corporate actions (AAPL1) are returned.
:param symbol: Underlying symbol of the chain
:type symbol: str
:param includeAllRoots: Send expirations related to all option roots, defaults to ''
:type includeAllRoots: Union[str, bool], optional
:param strikes: Add strike prices to each expiration, defaults to ''
:type strikes: Union[str, bool], optional
:return: list of expiration dates as str %Y-%m-%d
:rtype: list
"""
response = self._get(
"/v1/markets/options/expirations", params=self.create_params(locals())
)
return response
def historic_quotes(
self, symbol: str, interval: str = "daily", start: str = None, end: str = None
) -> list:
"""Get historical pricing for a security. This data will usually cover the entire lifetime of the company if sending reasonable start/end times. You can fetch historical pricing for options by passing the OCC option symbol (ex. AAPL220617C00270000) as the symbol.
:param symbol: Symbol to query
:type symbol: str
:param interval: Interval of time per timesale. One of: daily, weekly, monthly, defaults to "daily"
:type interval: str, optional
:param start: Start date represented as YYYY-MM-DD, defaults to None
:type start: str, optional
:param end: End date represented as YYYY-MM-DD, defaults to None
:type end: str, optional
:return: [description]
:rtype: list
"""
return self._get(
"/v1/markets/history",
params=self.create_params(locals()),
dict_args=("history", "day"),
)
def time_and_sales(
self, symbol: str, start: str, end: str, interval: str = "1min"
) -> list:
"""Time and Sales (timesales) is typically used for charting purposes. It captures pricing across a time slice at predefined intervals.
Tick data is also available through this endpoint. This results in a very large data set for high-volume symbols, so the time slice needs to be much smaller to keep downloads time reasonable.`
:param symbol: A single security symbol.
:type symbol: str
:param start: Start date/time for timesales range represented as YYYY-MM-DD HH:MM
:type start: str
:param end: Start date/time for timesales range represented as YYYY-MM-DD HH:MM
:type end: str
:param interval: Interval of time per timesale. One of: tick, 1min, 5min, 15min, defaults to "1min"
:type interval: str, optional
:return: list of dictionaries containing keys of ['time', 'timestamp', 'price', 'open', 'high', 'close', low', 'volume', 'vwap']
:rtype: list
"""
return self._get(
"/v1/markets/timesales",
params=self.create_params(locals()),
dict_args=("series", "data"),
)
if __name__ == "__main__":
from utils import printer
data = MarketData()
symbol = "AAPL"
response = data.option_lookup(symbol)
# response = data.option_strike(symbol, dates[0])
printer(response)
| [
"utils.printer"
] | [((6561, 6578), 'utils.printer', 'printer', (['response'], {}), '(response)\n', (6568, 6578), False, 'from utils import printer\n')] |
"""
Services are the heart of RPyC: each side of the connection exposes a *service*,
which define the capabilities available to the other side.
Note that the services by both parties need not be symmetric, e.g., one side may
exposed *service A*, while the other may expose *service B*. As long as the two
can interoperate, you're good to go.
"""
from functools import partial
from rpyc.lib import hybridmethod
from rpyc.lib.compat import execute, is_py3k
from rpyc.core.protocol import Connection
class Service(object):
"""The service base-class. Derive from this class to implement custom RPyC
services:
* The name of the class implementing the ``Foo`` service should match the
pattern ``FooService`` (suffixed by the word 'Service') ::
class FooService(Service):
pass
FooService.get_service_name() # 'FOO'
FooService.get_service_aliases() # ['FOO']
* To supply a different name or aliases, use the ``ALIASES`` class attribute ::
class Foobar(Service):
ALIASES = ["foo", "bar", "lalaland"]
Foobar.get_service_name() # 'FOO'
Foobar.get_service_aliases() # ['FOO', 'BAR', 'LALALAND']
* Override :func:`on_connect` to perform custom initialization
* Override :func:`on_disconnect` to perform custom finalization
* To add exposed methods or attributes, simply define them normally,
but prefix their name by ``exposed_``, e.g. ::
class FooService(Service):
def exposed_add(self, x, y):
return x + y
* All other names (not prefixed by ``exposed_``) are local (not accessible
to the other party)
.. note::
You can override ``_rpyc_getattr``, ``_rpyc_setattr`` and ``_rpyc_delattr``
to change attribute lookup -- but beware of possible **security implications!**
"""
__slots__ = ()
ALIASES = ()
_protocol = Connection
def on_connect(self, conn):
"""called when the connection is established"""
pass
def on_disconnect(self, conn):
"""called when the connection had already terminated for cleanup
(must not perform any IO on the connection)"""
pass
# Using default defined in 'protocol.Connection._access_attr' for:
# def _rpyc_getattr(self, name):
def _rpyc_delattr(self, name):
raise AttributeError("access denied")
def _rpyc_setattr(self, name, value):
raise AttributeError("access denied")
@classmethod
def get_service_aliases(cls):
"""returns a list of the aliases of this service"""
if cls.ALIASES:
return tuple(str(n).upper() for n in cls.ALIASES)
name = cls.__name__.upper()
if name.endswith("SERVICE"):
name = name[:-7]
return (name,)
@classmethod
def get_service_name(cls):
"""returns the canonical name of the service (which is its first
alias)"""
return cls.get_service_aliases()[0]
exposed_get_service_aliases = get_service_aliases
exposed_get_service_name = get_service_name
@hybridmethod
def _connect(self, channel, config={}):
"""Setup a connection via the given channel."""
if isinstance(self, type): # autovivify if accessed as class method
self = self()
# Note that we are here passing in `self` as root object for backward
# compatibility and convenience. You could pass in a different root if
# you wanted:
conn = self._protocol(self, channel, config)
self.on_connect(conn)
return conn
class VoidService(Service):
"""void service - an do-nothing service"""
__slots__ = ()
class ModuleNamespace(object):
"""used by the :class:`SlaveService` to implement the magical
'module namespace'"""
__slots__ = ["__getmodule", "__cache", "__weakref__"]
def __init__(self, getmodule):
self.__getmodule = getmodule
self.__cache = {}
def __contains__(self, name):
try:
self[name]
except ImportError:
return False
else:
return True
def __getitem__(self, name):
if type(name) is tuple:
name = ".".join(name)
if name not in self.__cache:
self.__cache[name] = self.__getmodule(name)
return self.__cache[name]
def __getattr__(self, name):
return self[name]
class Slave(object):
__slots__ = ["_conn", "namespace"]
def __init__(self):
self._conn = None
self.namespace = {}
def execute(self, text):
"""execute arbitrary code (using ``exec``)"""
execute(text, self.namespace)
def eval(self, text):
"""evaluate arbitrary code (using ``eval``)"""
return eval(text, self.namespace)
def getmodule(self, name):
"""imports an arbitrary module"""
return __import__(name, None, None, "*")
def getconn(self):
"""returns the local connection instance to the other side"""
return self._conn
class SlaveService(Slave, Service):
"""The SlaveService allows the other side to perform arbitrary imports and
execution arbitrary code on the server. This is provided for compatibility
with the classic RPyC (2.6) modus operandi.
This service is very useful in local, secure networks, but it exposes
a **major security risk** otherwise."""
__slots__ = ()
def on_connect(self, conn):
self._conn = conn
self._conn._config.update(dict(
allow_all_attrs = True,
allow_pickle = True,
allow_getattr = True,
allow_setattr = True,
allow_delattr = True,
allow_exposed_attrs = False,
import_custom_exceptions = True,
instantiate_custom_exceptions = True,
instantiate_oldstyle_exceptions = True,
))
super(SlaveService, self).on_connect(conn)
class FakeSlaveService(VoidService):
"""VoidService that can be used for connecting to peers that operate a
:class:`MasterService`, :class:`ClassicService`, or the old
``SlaveService`` (pre v3.5) without exposing any functionality to them."""
__slots__ = ()
exposed_namespace = None
exposed_execute = None
exposed_eval = None
exposed_getmodule = None
exposed_getconn = None
class MasterService(Service):
"""Peer for a new-style (>=v3.5) :class:`SlaveService`. Use this service
if you want to connect to a ``SlaveService`` without exposing any
functionality to them."""
__slots__ = ()
def on_connect(self, conn):
super(MasterService, self).on_connect(conn)
self._install(conn, conn.root)
@staticmethod
def _install(conn, slave):
modules = ModuleNamespace(slave.getmodule)
builtin = modules.builtins if is_py3k else modules.__builtin__
conn.modules = modules
conn.eval = slave.eval
conn.execute = slave.execute
conn.namespace = slave.namespace
conn.builtin = builtin
conn.builtins = builtin
from rpyc.utils.classic import teleport_function
conn.teleport = partial(teleport_function, conn)
class ClassicService(MasterService, SlaveService):
"""Full duplex master/slave service, i.e. both parties have full control
over the other. Must be used by both parties."""
__slots__ = ()
class ClassicClient(MasterService, FakeSlaveService):
"""MasterService that can be used for connecting to peers that operate a
:class:`MasterService`, :class:`ClassicService` without exposing any
functionality to them."""
__slots__ = ()
| [
"functools.partial",
"rpyc.lib.compat.execute"
] | [((4665, 4694), 'rpyc.lib.compat.execute', 'execute', (['text', 'self.namespace'], {}), '(text, self.namespace)\n', (4672, 4694), False, 'from rpyc.lib.compat import execute, is_py3k\n'), ((7188, 7220), 'functools.partial', 'partial', (['teleport_function', 'conn'], {}), '(teleport_function, conn)\n', (7195, 7220), False, 'from functools import partial\n')] |
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.safestring import SafeString
import markdown
import urllib
register = template.Library()
@register.filter
def strip_space(value):
return value.replace(' ', '')
@register.filter
@stringfilter
def commonmark(value):
return markdown.Markdown().convert(value)
@register.filter(name="getID")
def get_ID(value):
if not type(value) is str:
return value
return value.split('/')[-1]
@register.filter(name="getNav")
def get_nav(value):
return value.split('/')[-2]
@register.filter(name="encode_url")
def encode_url(value):
return urllib.parse.quote(value)
@register.filter
def get_post_id(url):
"""
gets the post id from the comment page url
"""
return urllib.parse.urlparse(url.get_full_path()).path.rsplit('/', 1)[0] | [
"markdown.Markdown",
"urllib.parse.quote",
"django.template.Library"
] | [((173, 191), 'django.template.Library', 'template.Library', ([], {}), '()\n', (189, 191), False, 'from django import template\n'), ((660, 685), 'urllib.parse.quote', 'urllib.parse.quote', (['value'], {}), '(value)\n', (678, 685), False, 'import urllib\n'), ((334, 353), 'markdown.Markdown', 'markdown.Markdown', ([], {}), '()\n', (351, 353), False, 'import markdown\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.CloudbusUserInfo import CloudbusUserInfo
class MetroOdItem(object):
def __init__(self):
self._dest_geo = None
self._od = None
self._time = None
self._user_info = None
self._week_od = None
self._work_od = None
@property
def dest_geo(self):
return self._dest_geo
@dest_geo.setter
def dest_geo(self, value):
self._dest_geo = value
@property
def od(self):
return self._od
@od.setter
def od(self, value):
self._od = value
@property
def time(self):
return self._time
@time.setter
def time(self, value):
self._time = value
@property
def user_info(self):
return self._user_info
@user_info.setter
def user_info(self, value):
if isinstance(value, CloudbusUserInfo):
self._user_info = value
else:
self._user_info = CloudbusUserInfo.from_alipay_dict(value)
@property
def week_od(self):
return self._week_od
@week_od.setter
def week_od(self, value):
self._week_od = value
@property
def work_od(self):
return self._work_od
@work_od.setter
def work_od(self, value):
self._work_od = value
def to_alipay_dict(self):
params = dict()
if self.dest_geo:
if hasattr(self.dest_geo, 'to_alipay_dict'):
params['dest_geo'] = self.dest_geo.to_alipay_dict()
else:
params['dest_geo'] = self.dest_geo
if self.od:
if hasattr(self.od, 'to_alipay_dict'):
params['od'] = self.od.to_alipay_dict()
else:
params['od'] = self.od
if self.time:
if hasattr(self.time, 'to_alipay_dict'):
params['time'] = self.time.to_alipay_dict()
else:
params['time'] = self.time
if self.user_info:
if hasattr(self.user_info, 'to_alipay_dict'):
params['user_info'] = self.user_info.to_alipay_dict()
else:
params['user_info'] = self.user_info
if self.week_od:
if hasattr(self.week_od, 'to_alipay_dict'):
params['week_od'] = self.week_od.to_alipay_dict()
else:
params['week_od'] = self.week_od
if self.work_od:
if hasattr(self.work_od, 'to_alipay_dict'):
params['work_od'] = self.work_od.to_alipay_dict()
else:
params['work_od'] = self.work_od
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MetroOdItem()
if 'dest_geo' in d:
o.dest_geo = d['dest_geo']
if 'od' in d:
o.od = d['od']
if 'time' in d:
o.time = d['time']
if 'user_info' in d:
o.user_info = d['user_info']
if 'week_od' in d:
o.week_od = d['week_od']
if 'work_od' in d:
o.work_od = d['work_od']
return o
| [
"alipay.aop.api.domain.CloudbusUserInfo.CloudbusUserInfo.from_alipay_dict"
] | [((1063, 1103), 'alipay.aop.api.domain.CloudbusUserInfo.CloudbusUserInfo.from_alipay_dict', 'CloudbusUserInfo.from_alipay_dict', (['value'], {}), '(value)\n', (1096, 1103), False, 'from alipay.aop.api.domain.CloudbusUserInfo import CloudbusUserInfo\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" fogbench -- a Python script used to test FogLAMP.
The objective is to simulate payloads for input, REST and other requests against one or
more FogLAMP instances. This version of fogbench is meant to test the CoAP and HTTP plugins
interface of FogLAMP southbound services.
fogbench
[IN] -h --help Print this help
-i --interval The interval in seconds between each iteration (default: 0)
[IN] -k --keep Do not delete (keep) the running sample (default: no)
[IN] -o --output Set the output file for statistics
[IN] -p --payload Type of payload and protocol (default: coap)
[IN] -t --template Set the template to use
[IN] -v --version Display the version and exit
[IN] -H --host The FogLAMP host (default: localhost)
-I --iterations The number of iterations of the test (default: 1)
[IN] -O --occurrences The number of occurrences of the template (default: 1)
[IN] -P --port The FogLAMP port. Default depends on payload and protocol
[IN] -S --statistic The type of statistics to collect
Example:
$ cd $FOGLAMP_ROOT/bin
$ ./fogbench
Help:
$ ./fogbench -h
* Create reading objects from given template, as per the json file name specified with -t
* Save those objects to the file, as per the file name specified with -o
* Read those objects
* Send those to CoAP or HTTP south plugin server, on specific host and port
.. todo::
* Try generators
"""
import sys
import os
import random
import json
from datetime import datetime, timezone
import argparse
import collections
import asyncio
import aiohttp
from .exceptions import *
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_FOGBENCH_VERSION = u"0.1.1"
_start_time = []
_end_time = []
_tot_msgs_transferred = []
_tot_byte_transferred = []
_num_iterated = 0
"""Statistics to be collected"""
# _logger = logger.setup(__name__)
def local_timestamp():
"""
:return: str - current time stamp with microseconds and machine timezone info
:example '2018-05-08 14:06:40.517313+05:30'
"""
return str(datetime.now(timezone.utc).astimezone())
def read_templates():
templates = []
return templates
def parse_template_and_prepare_json(_template_file,
_write_to_file=None, _occurrences=1):
# template_file = os.path.join(os.path.dirname(__file__), "templates/" + _template_file)
with open(_template_file) as data_file:
data = json.load(data_file)
supported_format_types = ["number", "enum"]
for _ in range(_occurrences):
readings_ = _prepare_sensor_reading(data, supported_format_types)
for r in readings_:
_write_readings_to_file(_write_to_file, r)
def _write_readings_to_file(to_file, r):
with open(to_file, 'a') as the_file:
json.dump(r, the_file)
the_file.write(os.linesep)
def _prepare_sensor_reading(data, supported_format_types):
readings = []
for d in data:
x_sensor_values = dict()
_sensor_value_object_formats = d["sensor_values"]
for fmt in _sensor_value_object_formats:
if fmt["type"] not in supported_format_types:
raise InvalidSensorValueObjectTemplateFormat(u"Invalid format, "
u"Can not parse type {}".format(fmt["type"]))
if fmt["type"] == "number":
# check float precision if any
precision = fmt.get("precision", None)
min_val = fmt.get("min", None)
max_val = fmt.get("max", None)
if min_val is None or max_val is None:
raise InvalidSensorValueObjectTemplateFormat(u"Invalid format, "
u"Min and Max values must be defined for type number.")
# print(precision)
# print(min_val)
# print(max_val)
reading = round(random.uniform(min_val, max_val), precision)
elif fmt["type"] == "enum":
reading = random.choice(fmt["list"])
# print(fmt["name"], reading)
x_sensor_values[fmt["name"]] = reading
# print(d["name"])
sensor_value_object = dict()
sensor_value_object["asset"] = d['name']
sensor_value_object["readings"] = x_sensor_values
sensor_value_object["timestamp"] = "{!s}".format(local_timestamp())
# print(json.dumps(sensor_value_object))
ord_dict = collections.OrderedDict(sorted(sensor_value_object.items()))
readings.append(ord_dict)
return readings
def read_out_file(_file=None, _keep=False, _iterations=1, _interval=0, send_to='coap'):
global _start_time
global _end_time
global _tot_msgs_transferred
global _tot_byte_transferred
global _num_iterated
# from pprint import pprint
import time
# _file = os.path.join(os.path.dirname(__file__), "out/{}".format(outfile))
with open(_file) as f:
readings_list = [json.loads(line) for line in f]
loop = asyncio.get_event_loop()
while _iterations > 0:
# Pre-calculate the messages and size
msg_transferred_itr = 0 # Messages transferred in every iteration
byte_transferred_itr = 0 # Bytes transferred in every iteration
for r in readings_list:
msg_transferred_itr += 1
byte_transferred_itr += sys.getsizeof(r)
if send_to == 'coap':
_start_time.append(datetime.now())
for r in readings_list:
is_sent = loop.run_until_complete(send_to_coap(r))
if not is_sent:
break
elif send_to == 'http':
_start_time.append(datetime.now())
loop.run_until_complete(send_to_http(readings_list))
_end_time.append(datetime.now()) # End time of every iteration
_tot_msgs_transferred.append(msg_transferred_itr)
_tot_byte_transferred.append(byte_transferred_itr)
_iterations -= 1
_num_iterated += 1
if _iterations != 0:
# print(u"Iteration {} completed, waiting for {} seconds".format(_iterations, _interval))
time.sleep(_interval)
if not _keep:
os.remove(_file)
async def send_to_coap(payload):
"""
POST request to:
localhost
port 5683 (official IANA assigned CoAP port),
URI "/other/sensor-values".
"""
from aiocoap import Context, Message
from aiocoap.numbers.codes import Code
from cbor2 import dumps
context = await Context.create_client_context()
request = Message(payload=dumps(payload), code=Code.POST)
request.opt.uri_host = arg_host
request.opt.uri_port = arg_port
request.opt.uri_path = ("other", "sensor-values")
response = await context.request(request).response
str_res = str(response.code)
status_code = str_res[:4] # or str_res.split()[0]
if status_code == "4.00" or status_code == "5.00":
print("Error: ", str_res)
return False
return True
async def send_to_http(payload):
"""
POST request to:
host localhost
port 6683 (default HTTP south plugin port),
uri sensor-reading
"""
headers = {'content-type': 'application/json'}
url = 'http://{}:{}/sensor-reading'.format(arg_host, arg_port)
async with aiohttp.ClientSession() as session:
async with session.post(url, data=json.dumps(payload), headers=headers) as resp:
await resp.text()
status_code = resp.status
if status_code in range(400, 500):
print("Bad request error | code:{}, reason: {}".format(status_code, resp.reason))
return False
if status_code in range(500, 600):
print("Server error | code:{}, reason: {}".format(status_code, resp.reason))
return False
return True
def get_statistics(_stats_type=None, _out_file=None):
stat = ''
global _start_time
global _end_time
global _tot_msgs_transferred
global _tot_byte_transferred
global _num_iterated
if _stats_type == 'total':
stat += u"Total Statistics:\n"
stat += (u"\nStart Time: {}".format(datetime.strftime(_start_time[0], "%Y-%m-%d %H:%M:%S.%f")))
stat += (u"\nEnd Time: {}\n".format(datetime.strftime(_end_time[-1], "%Y-%m-%d %H:%M:%S.%f")))
stat += (u"\nTotal Messages Transferred: {}".format(sum(_tot_msgs_transferred)))
stat += (u"\nTotal Bytes Transferred: {}\n".format(sum(_tot_byte_transferred)))
stat += (u"\nTotal Iterations: {}".format(_num_iterated))
stat += (u"\nTotal Messages per Iteration: {}".format(sum(_tot_msgs_transferred)/_num_iterated))
stat += (u"\nTotal Bytes per Iteration: {}\n".format(sum(_tot_byte_transferred)/_num_iterated))
_msg_rate = []
_byte_rate = []
for itr in range(_num_iterated):
time_taken = _end_time[itr] - _start_time[itr]
_msg_rate.append(_tot_msgs_transferred[itr]/(time_taken.seconds+time_taken.microseconds/1E6))
_byte_rate.append(_tot_byte_transferred[itr] / (time_taken.seconds+time_taken.microseconds/1E6))
stat += (u"\nMin messages/second: {}".format(min(_msg_rate)))
stat += (u"\nMax messages/second: {}".format(max(_msg_rate)))
stat += (u"\nAvg messages/second: {}\n".format(sum(_msg_rate)/_num_iterated))
stat += (u"\nMin Bytes/second: {}".format(min(_byte_rate)))
stat += (u"\nMax Bytes/second: {}".format(max(_byte_rate)))
stat += (u"\nAvg Bytes/second: {}".format(sum(_byte_rate)/_num_iterated))
if _out_file:
with open(_out_file, 'w') as f:
f.write(stat)
else:
print(stat)
# should we also show total time diff? end_time - start_time
def check_server(payload_type='coap'):
template_str = ">>> Make sure south {} plugin service is running \n & listening on specified host and port \n"
if payload_type == 'coap':
print(template_str.format("CoAP"))
elif payload_type == 'http':
print(template_str.format("HTTP"))
parser = argparse.ArgumentParser(prog='fogbench')
parser.description = '%(prog)s -- a Python script used to test FogLAMP (simulate payloads)'
parser.epilog = 'The initial version of %(prog)s is meant to test the south plugin interface of ' \
'FogLAMP using CoAP or HTTP'
parser.add_argument('-v', '--version', action='version', version='%(prog)s {0!s}'.format(_FOGBENCH_VERSION))
parser.add_argument('-k', '--keep', default=False, choices=['y', 'yes', 'n', 'no'],
help='Do not delete the running sample (default: no)')
parser.add_argument('-t', '--template', required=True, help='Set the template file, json extension')
parser.add_argument('-o', '--output', default=None, help='Set the statistics output file')
parser.add_argument('-p', '--payload', default='coap', choices=['coap', 'http'], help='Type of payload '
'and protocol (default: coap)')
parser.add_argument('-I', '--iterations', help='The number of iterations of the test (default: 1)')
parser.add_argument('-O', '--occurrences', help='The number of occurrences of the template (default: 1)')
parser.add_argument('-H', '--host', help='Server host address (default: localhost)')
parser.add_argument('-P', '--port', help='The FogLAMP port. (default: 5683)')
parser.add_argument('-i', '--interval', default=0, help='The interval in seconds for each iteration (default: 0)')
parser.add_argument('-S', '--statistics', default='total', choices=['total'], help='The type of statistics to collect '
'(default: total)')
namespace = parser.parse_args(sys.argv[1:])
infile = '{0}'.format(namespace.template if namespace.template else '')
statistics_file = os.path.join(os.path.dirname(__file__), "out/{}".format(namespace.output)) if namespace.output else None
keep_the_file = True if namespace.keep in ['y', 'yes'] else False
# iterations and occurrences
arg_iterations = int(namespace.iterations) if namespace.iterations else 1
arg_occurrences = int(namespace.occurrences) if namespace.occurrences else 1
# interval between each iteration
arg_interval = int(namespace.interval) if namespace.interval else 0
arg_stats_type = '{0}'.format(namespace.statistics) if namespace.statistics else 'total'
if namespace.payload:
arg_payload_protocol = namespace.payload
arg_host = '{0}'.format(namespace.host) if namespace.host else 'localhost'
default_port = 6683 if arg_payload_protocol == 'http' else 5683
arg_port = int(namespace.port) if namespace.port else default_port
check_server(arg_payload_protocol)
sample_file = os.path.join("/tmp", "foglamp_running_sample.{}".format(os.getpid()))
parse_template_and_prepare_json(_template_file=infile, _write_to_file=sample_file, _occurrences=arg_occurrences)
read_out_file(_file=sample_file, _keep=keep_the_file, _iterations=arg_iterations, _interval=arg_interval,
send_to=arg_payload_protocol)
get_statistics(_stats_type=arg_stats_type, _out_file=statistics_file)
# TODO: Change below per local_timestamp() values
""" Expected output from given template
{
"timestamp" : "2017-08-04T06:59:57.503Z",
"asset" : "TI sensorTag/luxometer",
"sensor_values" : { "lux" : 49 }
}
{
"timestamp" : "2017-08-04T06:59:57.863Z",
"asset" : "TI sensorTag/pressure",
"sensor_values" : { "pressure" : 1021.2 }
}
{
"timestamp" : "2017-08-04T06:59:58.863Z",
"asset" : "TI sensorTag/humidity",
"sensor_values" : { "humidity" : 71.2, "temperature" : 18.6 }
}
{
"timestamp" : "2017-08-04T06:59:59.863Z",
"asset" : "TI sensorTag/temperature",
"sensor_values" : { "object" : 18.2, "ambient" : 21.6 }
}
{
"timestamp" : "2017-08-04T07:00:00.863Z",
"asset" : "TI sensorTag/accelerometer",
"sensor_values" : { "x" : 1.2, "y" : 0.0, "z" : -0.6 }
}
{
"timestamp" : "2017-08-04T07:00:01.863Z",
"asset" : "TI sensorTag/gyroscope",
"sensor_values" : { "x" : 101.2, "y" : 46.2, "z" : -12.6 }
}
{
"timestamp" : "2017-08-04T07:00:02.863Z",
"asset" : "TI sensorTag/magnetometer",
"sensor_values" : { "x" : 101.2, "y" : 46.2, "z" : -12.6 }
}
{
"timestamp" : "2017-08-04T07:00:03.863Z",
"asset" : "mouse",
"sensor_values" : { "button" : "down" }
}
{
"timestamp" : "2017-08-04T07:00:04.863Z",
"asset" : "wall clock",
"sensor_values" : { "tick" : "tock" }
}
"""
| [
"aiohttp.ClientSession",
"json.loads",
"random.uniform",
"random.choice",
"aiocoap.Context.create_client_context",
"argparse.ArgumentParser",
"cbor2.dumps",
"sys.getsizeof",
"datetime.datetime.strftime",
"json.dumps",
"time.sleep",
"os.path.dirname",
"datetime.datetime.now",
"os.getpid",
"json.load",
"asyncio.get_event_loop",
"json.dump",
"os.remove"
] | [((10450, 10490), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""fogbench"""'}), "(prog='fogbench')\n", (10473, 10490), False, 'import argparse\n'), ((5345, 5369), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (5367, 5369), False, 'import asyncio\n'), ((2690, 2710), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (2699, 2710), False, 'import json\n'), ((3051, 3073), 'json.dump', 'json.dump', (['r', 'the_file'], {}), '(r, the_file)\n', (3060, 3073), False, 'import json\n'), ((6533, 6549), 'os.remove', 'os.remove', (['_file'], {}), '(_file)\n', (6542, 6549), False, 'import os\n'), ((6855, 6886), 'aiocoap.Context.create_client_context', 'Context.create_client_context', ([], {}), '()\n', (6884, 6886), False, 'from aiocoap import Context, Message\n'), ((7646, 7669), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (7667, 7669), False, 'import aiohttp\n'), ((12268, 12293), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (12283, 12293), False, 'import os\n'), ((13182, 13193), 'os.getpid', 'os.getpid', ([], {}), '()\n', (13191, 13193), False, 'import os\n'), ((5301, 5317), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (5311, 5317), False, 'import json\n'), ((5699, 5715), 'sys.getsizeof', 'sys.getsizeof', (['r'], {}), '(r)\n', (5712, 5715), False, 'import sys\n'), ((6125, 6139), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6137, 6139), False, 'from datetime import datetime, timezone\n'), ((6484, 6505), 'time.sleep', 'time.sleep', (['_interval'], {}), '(_interval)\n', (6494, 6505), False, 'import time\n'), ((6918, 6932), 'cbor2.dumps', 'dumps', (['payload'], {}), '(payload)\n', (6923, 6932), False, 'from cbor2 import dumps\n'), ((8525, 8582), 'datetime.datetime.strftime', 'datetime.strftime', (['_start_time[0]', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(_start_time[0], '%Y-%m-%d %H:%M:%S.%f')\n", (8542, 8582), False, 'from datetime import datetime, timezone\n'), ((8631, 8687), 'datetime.datetime.strftime', 'datetime.strftime', (['_end_time[-1]', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(_end_time[-1], '%Y-%m-%d %H:%M:%S.%f')\n", (8648, 8687), False, 'from datetime import datetime, timezone\n'), ((2303, 2329), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (2315, 2329), False, 'from datetime import datetime, timezone\n'), ((5778, 5792), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5790, 5792), False, 'from datetime import datetime, timezone\n'), ((4229, 4261), 'random.uniform', 'random.uniform', (['min_val', 'max_val'], {}), '(min_val, max_val)\n', (4243, 4261), False, 'import random\n'), ((4340, 4366), 'random.choice', 'random.choice', (["fmt['list']"], {}), "(fmt['list'])\n", (4353, 4366), False, 'import random\n'), ((6018, 6032), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6030, 6032), False, 'from datetime import datetime, timezone\n'), ((7724, 7743), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (7734, 7743), False, 'import json\n')] |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Module for graph representations of crystals.
"""
import copy
import logging
import os.path
import subprocess
import warnings
from collections import defaultdict, namedtuple
from itertools import combinations
from operator import itemgetter
import networkx as nx
import networkx.algorithms.isomorphism as iso
import numpy as np
from monty.json import MSONable
from monty.os.path import which
from networkx.drawing.nx_agraph import write_dot
from networkx.readwrite import json_graph
from scipy.spatial import KDTree
from scipy.stats import describe
from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure
from pymatgen.core.structure import FunctionalGroups
from pymatgen.util.coord import lattice_points_in_supercell
from pymatgen.vis.structure_vtk import EL_COLORS
try:
import igraph
IGRAPH_AVAILABLE = True
except ImportError:
IGRAPH_AVAILABLE = False
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
__author__ = "<NAME>, <NAME>, <NAME>"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
__date__ = "August 2017"
ConnectedSite = namedtuple("ConnectedSite", "site, jimage, index, weight, dist")
def _compare(g1, g2, i1, i2):
"""
Helper function called by isomorphic to ensure comparison of node identities.
"""
return g1.vs[i1]["species"] == g2.vs[i2]["species"]
def _igraph_from_nxgraph(graph):
"""
Helper function that converts a networkx graph object into an igraph graph object.
"""
nodes = graph.nodes(data=True)
new_igraph = igraph.Graph()
for node in nodes:
new_igraph.add_vertex(name=str(node[0]), species=node[1]["specie"], coords=node[1]["coords"])
new_igraph.add_edges([(str(edge[0]), str(edge[1])) for edge in graph.edges()])
return new_igraph
def _isomorphic(frag1, frag2):
"""
Internal function to check if two graph objects are isomorphic, using igraph if
if is available and networkx if it is not.
"""
f1_nodes = frag1.nodes(data=True)
f2_nodes = frag2.nodes(data=True)
if len(f1_nodes) != len(f2_nodes):
return False
f2_edges = frag2.edges()
if len(f2_edges) != len(f2_edges):
return False
f1_comp_dict = {}
f2_comp_dict = {}
for node in f1_nodes:
if node[1]["specie"] not in f1_comp_dict:
f1_comp_dict[node[1]["specie"]] = 1
else:
f1_comp_dict[node[1]["specie"]] += 1
for node in f2_nodes:
if node[1]["specie"] not in f2_comp_dict:
f2_comp_dict[node[1]["specie"]] = 1
else:
f2_comp_dict[node[1]["specie"]] += 1
if f1_comp_dict != f2_comp_dict:
return False
if IGRAPH_AVAILABLE:
ifrag1 = _igraph_from_nxgraph(frag1)
ifrag2 = _igraph_from_nxgraph(frag2)
return ifrag1.isomorphic_vf2(ifrag2, node_compat_fn=_compare)
nm = iso.categorical_node_match("specie", "ERROR")
return nx.is_isomorphic(frag1.to_undirected(), frag2.to_undirected(), node_match=nm)
class StructureGraph(MSONable):
"""
This is a class for annotating a Structure with
bond information, stored in the form of a graph. A "bond" does
not necessarily have to be a chemical bond, but can store any
kind of information that connects two Sites.
"""
def __init__(self, structure, graph_data=None):
"""
If constructing this class manually, use the `with_empty_graph`
method or `with_local_env_strategy` method (using an algorithm
provided by the `local_env` module, such as O'Keeffe).
This class that contains connection information:
relationships between sites represented by a Graph structure,
and an associated structure object.
This class uses the NetworkX package to store and operate
on the graph itself, but contains a lot of helper methods
to make associating a graph with a given crystallographic
structure easier.
Use cases for this include storing bonding information,
NMR J-couplings, Heisenberg exchange parameters, etc.
For periodic graphs, class stores information on the graph
edges of what lattice image the edge belongs to.
:param structure: a Structure object
:param graph_data: dict containing graph information in
dict format (not intended to be constructed manually,
see as_dict method for format)
"""
if isinstance(structure, StructureGraph):
# just make a copy from input
graph_data = structure.as_dict()["graphs"]
self.structure = structure
self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)
# tidy up edge attr dicts, reading to/from json duplicates
# information
for u, v, k, d in self.graph.edges(keys=True, data=True):
if "id" in d:
del d["id"]
if "key" in d:
del d["key"]
# ensure images are tuples (conversion to lists happens
# when serializing back from json), it's important images
# are hashable/immutable
if "to_jimage" in d:
d["to_jimage"] = tuple(d["to_jimage"])
if "from_jimage" in d:
d["from_jimage"] = tuple(d["from_jimage"])
@classmethod
def with_empty_graph(cls, structure, name="bonds", edge_weight_name=None, edge_weight_units=None):
"""
Constructor for StructureGraph, returns a StructureGraph
object with an empty graph (no edges, only nodes defined
that correspond to Sites in Structure).
:param structure (Structure):
:param name (str): name of graph, e.g. "bonds"
:param edge_weight_name (str): name of edge weights,
e.g. "bond_length" or "exchange_constant"
:param edge_weight_units (str): name of edge weight units
e.g. "Å" or "eV"
:return (StructureGraph):
"""
if edge_weight_name and (edge_weight_units is None):
raise ValueError(
"Please specify units associated "
"with your edge weights. Can be "
"empty string if arbitrary or "
"dimensionless."
)
# construct graph with one node per site
# graph attributes don't change behavior of graph,
# they're just for book-keeping
graph = nx.MultiDiGraph(
edge_weight_name=edge_weight_name,
edge_weight_units=edge_weight_units,
name=name,
)
graph.add_nodes_from(range(len(structure)))
graph_data = json_graph.adjacency_data(graph)
return cls(structure, graph_data=graph_data)
@staticmethod
def with_edges(structure, edges):
"""
Constructor for MoleculeGraph, using pre-existing or pre-defined edges
with optional edge parameters.
:param molecule: Molecule object
:param edges: dict representing the bonds of the functional
group (format: {(from_index, to_index, from_image, to_image): props},
where props is a dictionary of properties, including weight.
Props should be None if no additional properties are to be
specified.
:return: sg, a StructureGraph
"""
sg = StructureGraph.with_empty_graph(structure, name="bonds", edge_weight_name="weight", edge_weight_units="")
for edge, props in edges.items():
try:
from_index = edge[0]
to_index = edge[1]
from_image = edge[2]
to_image = edge[3]
except TypeError:
raise ValueError("Edges must be given as (from_index, to_index," " from_image, to_image) tuples")
if props is not None:
if "weight" in props.keys():
weight = props["weight"]
del props["weight"]
else:
weight = None
if len(props.items()) == 0:
props = None
else:
weight = None
nodes = sg.graph.nodes
if not (from_index in nodes and to_index in nodes):
raise ValueError(
"Edges cannot be added if nodes are not" " present in the graph. Please check your" " indices."
)
sg.add_edge(
from_index,
to_index,
from_jimage=from_image,
to_jimage=to_image,
weight=weight,
edge_properties=props,
)
sg.set_node_attributes()
return sg
@staticmethod
def with_local_env_strategy(structure, strategy, weights=False):
"""
Constructor for StructureGraph, using a strategy
from :Class: `pymatgen.analysis.local_env`.
:param structure: Structure object
:param strategy: an instance of a
:Class: `pymatgen.analysis.local_env.NearNeighbors` object
:param weights: if True, use weights from local_env class
(consult relevant class for their meaning)
:return:
"""
if not strategy.structures_allowed:
raise ValueError(
"Chosen strategy is not designed for use with structures! " "Please choose another strategy."
)
sg = StructureGraph.with_empty_graph(structure, name="bonds")
for n, neighbors in enumerate(strategy.get_all_nn_info(structure)):
for neighbor in neighbors:
# local_env will always try to add two edges
# for any one bond, one from site u to site v
# and another form site v to site u: this is
# harmless, so warn_duplicates=False
sg.add_edge(
from_index=n,
from_jimage=(0, 0, 0),
to_index=neighbor["site_index"],
to_jimage=neighbor["image"],
weight=neighbor["weight"] if weights else None,
warn_duplicates=False,
)
return sg
@property
def name(self):
"""
:return: Name of graph
"""
return self.graph.graph["name"]
@property
def edge_weight_name(self):
"""
:return: Name of the edge weight property of graph
"""
return self.graph.graph["edge_weight_name"]
@property
def edge_weight_unit(self):
"""
:return: Units of the edge weight property of graph
"""
return self.graph.graph["edge_weight_units"]
def add_edge(
self,
from_index,
to_index,
from_jimage=(0, 0, 0),
to_jimage=None,
weight=None,
warn_duplicates=True,
edge_properties=None,
):
"""
Add edge to graph.
Since physically a 'bond' (or other connection
between sites) doesn't have a direction, from_index,
from_jimage can be swapped with to_index, to_jimage.
However, images will always always be shifted so that
from_index < to_index and from_jimage becomes (0, 0, 0).
:param from_index: index of site connecting from
:param to_index: index of site connecting to
:param from_jimage (tuple of ints): lattice vector of periodic
image, e.g. (1, 0, 0) for periodic image in +x direction
:param to_jimage (tuple of ints): lattice vector of image
:param weight (float): e.g. bond length
:param warn_duplicates (bool): if True, will warn if
trying to add duplicate edges (duplicate edges will not
be added in either case)
:param edge_properties (dict): any other information to
store on graph edges, similar to Structure's site_properties
:return:
"""
# this is not necessary for the class to work, but
# just makes it neater
if to_index < from_index:
to_index, from_index = from_index, to_index
to_jimage, from_jimage = from_jimage, to_jimage
# constrain all from_jimages to be (0, 0, 0),
# initial version of this class worked even if
# from_jimage != (0, 0, 0), but making this
# assumption simplifies logic later
if not np.array_equal(from_jimage, (0, 0, 0)):
shift = from_jimage
from_jimage = np.subtract(from_jimage, shift)
to_jimage = np.subtract(to_jimage, shift)
# automatic detection of to_jimage if user doesn't specify
# will try and detect all equivalent images and add multiple
# edges if appropriate
if to_jimage is None:
# assume we want the closest site
warnings.warn("Please specify to_jimage to be unambiguous, " "trying to automatically detect.")
dist, to_jimage = self.structure[from_index].distance_and_image(self.structure[to_index])
if dist == 0:
# this will happen when from_index == to_index,
# typically in primitive single-atom lattices
images = [1, 0, 0], [0, 1, 0], [0, 0, 1]
dists = []
for image in images:
dists.append(
self.structure[from_index].distance_and_image(self.structure[from_index], jimage=image)[0]
)
dist = min(dists)
equiv_sites = self.structure.get_neighbors_in_shell(
self.structure[from_index].coords, dist, dist * 0.01, include_index=True
)
for nnsite in equiv_sites:
to_jimage = np.subtract(nnsite.frac_coords, self.structure[from_index].frac_coords)
to_jimage = np.round(to_jimage).astype(int)
self.add_edge(
from_index=from_index,
from_jimage=(0, 0, 0),
to_jimage=to_jimage,
to_index=nnsite.index,
)
return
# sanitize types
from_jimage, to_jimage = (
tuple(map(int, from_jimage)),
tuple(map(int, to_jimage)),
)
from_index, to_index = int(from_index), int(to_index)
# check we're not trying to add a duplicate edge
# there should only ever be at most one edge
# between a given (site, jimage) pair and another
# (site, jimage) pair
existing_edge_data = self.graph.get_edge_data(from_index, to_index)
if existing_edge_data:
for key, d in existing_edge_data.items():
if d["to_jimage"] == to_jimage:
if warn_duplicates:
warnings.warn(
"Trying to add an edge that already exists from "
"site {} to site {} in {}.".format(from_index, to_index, to_jimage)
)
return
# generic container for additional edge properties,
# similar to site properties
edge_properties = edge_properties or {}
if weight:
self.graph.add_edge(from_index, to_index, to_jimage=to_jimage, weight=weight, **edge_properties)
else:
self.graph.add_edge(from_index, to_index, to_jimage=to_jimage, **edge_properties)
def insert_node(
self,
i,
species,
coords,
coords_are_cartesian=False,
validate_proximity=False,
site_properties=None,
edges=None,
):
"""
A wrapper around Molecule.insert(), which also incorporates the new
site into the MoleculeGraph.
:param i: Index at which to insert the new site
:param species: Species for the new site
:param coords: 3x1 array representing coordinates of the new site
:param coords_are_cartesian: Whether coordinates are cartesian.
Defaults to False.
:param validate_proximity: For Molecule.insert(); if True (default
False), distance will be checked to ensure that site can be safely
added.
:param site_properties: Site properties for Molecule
:param edges: List of dicts representing edges to be added to the
MoleculeGraph. These edges must include the index of the new site i,
and all indices used for these edges should reflect the
MoleculeGraph AFTER the insertion, NOT before. Each dict should at
least have a "to_index" and "from_index" key, and can also have a
"weight" and a "properties" key.
:return:
"""
self.structure.insert(
i,
species,
coords,
coords_are_cartesian=coords_are_cartesian,
validate_proximity=validate_proximity,
properties=site_properties,
)
mapping = {}
for j in range(len(self.structure) - 1):
if j < i:
mapping[j] = j
else:
mapping[j] = j + 1
nx.relabel_nodes(self.graph, mapping, copy=False)
self.graph.add_node(i)
self.set_node_attributes()
if edges is not None:
for edge in edges:
try:
self.add_edge(
edge["from_index"],
edge["to_index"],
from_jimage=(0, 0, 0),
to_jimage=edge["to_jimage"],
weight=edge.get("weight", None),
edge_properties=edge.get("properties", None),
)
except KeyError:
raise RuntimeError("Some edges are invalid.")
def set_node_attributes(self):
"""
Gives each node a "specie" and a "coords" attribute, updated with the
current species and coordinates.
:return:
"""
species = {}
coords = {}
properties = {}
for node in self.graph.nodes():
species[node] = self.structure[node].specie.symbol
coords[node] = self.structure[node].coords
properties[node] = self.structure[node].properties
nx.set_node_attributes(self.graph, species, "specie")
nx.set_node_attributes(self.graph, coords, "coords")
nx.set_node_attributes(self.graph, properties, "properties")
def alter_edge(
self,
from_index,
to_index,
to_jimage=None,
new_weight=None,
new_edge_properties=None,
):
"""
Alters either the weight or the edge_properties of
an edge in the StructureGraph.
:param from_index: int
:param to_index: int
:param to_jimage: tuple
:param new_weight: alter_edge does not require
that weight be altered. As such, by default, this
is None. If weight is to be changed, it should be a
float.
:param new_edge_properties: alter_edge does not require
that edge_properties be altered. As such, by default,
this is None. If any edge properties are to be changed,
it should be a dictionary of edge properties to be changed.
:return:
"""
existing_edges = self.graph.get_edge_data(from_index, to_index)
# ensure that edge exists before attempting to change it
if not existing_edges:
raise ValueError(
"Edge between {} and {} cannot be altered;\
no edge exists between those sites.".format(
from_index, to_index
)
)
if to_jimage is None:
edge_index = 0
else:
for i, properties in existing_edges.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
if new_weight is not None:
self.graph[from_index][to_index][edge_index]["weight"] = new_weight
if new_edge_properties is not None:
for prop in list(new_edge_properties.keys()):
self.graph[from_index][to_index][edge_index][prop] = new_edge_properties[prop]
def break_edge(self, from_index, to_index, to_jimage=None, allow_reverse=False):
"""
Remove an edge from the StructureGraph. If no image is given, this method will fail.
:param from_index: int
:param to_index: int
:param to_jimage: tuple
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return:
"""
# ensure that edge exists before attempting to remove it
existing_edges = self.graph.get_edge_data(from_index, to_index)
existing_reverse = None
if to_jimage is None:
raise ValueError("Image must be supplied, to avoid ambiguity.")
if existing_edges:
for i, properties in existing_edges.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
self.graph.remove_edge(from_index, to_index, edge_index)
else:
if allow_reverse:
existing_reverse = self.graph.get_edge_data(to_index, from_index)
if existing_reverse:
for i, properties in existing_reverse.items():
if properties["to_jimage"] == to_jimage:
edge_index = i
self.graph.remove_edge(to_index, from_index, edge_index)
else:
raise ValueError(
"Edge cannot be broken between {} and {};\
no edge exists between those sites.".format(
from_index, to_index
)
)
def remove_nodes(self, indices):
"""
A wrapper for Molecule.remove_sites().
:param indices: list of indices in the current Molecule (and graph) to
be removed.
:return:
"""
self.structure.remove_sites(indices)
self.graph.remove_nodes_from(indices)
mapping = {}
for correct, current in enumerate(sorted(self.graph.nodes)):
mapping[current] = correct
nx.relabel_nodes(self.graph, mapping, copy=False)
self.set_node_attributes()
def substitute_group(
self,
index,
func_grp,
strategy,
bond_order=1,
graph_dict=None,
strategy_params=None,
):
"""
Builds off of Structure.substitute to replace an atom in self.structure
with a functional group. This method also amends self.graph to
incorporate the new functional group.
NOTE: Care must be taken to ensure that the functional group that is
substituted will not place atoms to close to each other, or violate the
dimensions of the Lattice.
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are two options:
1. Providing an actual Molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
def map_indices(grp):
grp_map = {}
# Get indices now occupied by functional group
# Subtracting 1 because the dummy atom X should not count
atoms = len(grp) - 1
offset = len(self.structure) - atoms
for i in range(atoms):
grp_map[i] = i + offset
return grp_map
if isinstance(func_grp, Molecule):
func_grp = copy.deepcopy(func_grp)
else:
try:
func_grp = copy.deepcopy(FunctionalGroups[func_grp])
except Exception:
raise RuntimeError("Can't find functional group in list. " "Provide explicit coordinate instead")
self.structure.substitute(index, func_grp, bond_order=bond_order)
mapping = map_indices(func_grp)
# Remove dummy atom "X"
func_grp.remove_species("X")
if graph_dict is not None:
for (u, v) in graph_dict.keys():
edge_props = graph_dict[(u, v)]
if "to_jimage" in edge_props.keys():
to_jimage = edge_props["to_jimage"]
del edge_props["to_jimage"]
else:
# By default, assume that all edges should stay remain
# inside the initial image
to_jimage = (0, 0, 0)
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(
mapping[u],
mapping[v],
to_jimage=to_jimage,
weight=weight,
edge_properties=edge_props,
)
else:
if strategy_params is None:
strategy_params = {}
strat = strategy(**strategy_params)
for site in mapping.values():
neighbors = strat.get_nn_info(self.structure, site)
for neighbor in neighbors:
self.add_edge(
from_index=site,
from_jimage=(0, 0, 0),
to_index=neighbor["site_index"],
to_jimage=neighbor["image"],
weight=neighbor["weight"],
warn_duplicates=False,
)
def get_connected_sites(self, n, jimage=(0, 0, 0)):
"""
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Structure
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
"""
connected_sites = set()
connected_site_images = set()
out_edges = [(u, v, d, "out") for u, v, d in self.graph.out_edges(n, data=True)]
in_edges = [(u, v, d, "in") for u, v, d in self.graph.in_edges(n, data=True)]
for u, v, d, dir in out_edges + in_edges:
to_jimage = d["to_jimage"]
if dir == "in":
u, v = v, u
to_jimage = np.multiply(-1, to_jimage)
to_jimage = tuple(map(int, np.add(to_jimage, jimage)))
site_d = self.structure[v].as_dict()
site_d["abc"] = np.add(site_d["abc"], to_jimage).tolist()
site = PeriodicSite.from_dict(site_d)
# from_site if jimage arg != (0, 0, 0)
relative_jimage = np.subtract(to_jimage, jimage)
dist = self.structure[u].distance(self.structure[v], jimage=relative_jimage)
weight = d.get("weight", None)
if (v, to_jimage) not in connected_site_images:
connected_site = ConnectedSite(site=site, jimage=to_jimage, index=v, weight=weight, dist=dist)
connected_sites.add(connected_site)
connected_site_images.add((v, to_jimage))
# return list sorted by closest sites first
connected_sites = list(connected_sites)
connected_sites.sort(key=lambda x: x.dist)
return connected_sites
def get_coordination_of_site(self, n):
"""
Returns the number of neighbors of site n.
In graph terms, simply returns degree
of node corresponding to site n.
:param n: index of site
:return (int):
"""
number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])
return self.graph.degree(n) - number_of_self_loops
def draw_graph_to_file(
self,
filename="graph",
diff=None,
hide_unconnected_nodes=False,
hide_image_edges=True,
edge_colors=False,
node_labels=False,
weight_labels=False,
image_labels=False,
color_scheme="VESTA",
keep_dot=False,
algo="fdp",
):
"""
Draws graph using GraphViz.
The networkx graph object itself can also be drawn
with networkx's in-built graph drawing methods, but
note that this might give misleading results for
multigraphs (edges are super-imposed on each other).
If visualization is difficult to interpret,
`hide_image_edges` can help, especially in larger
graphs.
:param filename: filename to output, will detect filetype
from extension (any graphviz filetype supported, such as
pdf or png)
:param diff (StructureGraph): an additional graph to
compare with, will color edges red that do not exist in diff
and edges green that are in diff graph but not in the
reference graph
:param hide_unconnected_nodes: if True, hide unconnected
nodes
:param hide_image_edges: if True, do not draw edges that
go through periodic boundaries
:param edge_colors (bool): if True, use node colors to
color edges
:param node_labels (bool): if True, label nodes with
species and site index
:param weight_labels (bool): if True, label edges with
weights
:param image_labels (bool): if True, label edges with
their periodic images (usually only used for debugging,
edges to periodic images always appear as dashed lines)
:param color_scheme (str): "VESTA" or "JMOL"
:param keep_dot (bool): keep GraphViz .dot file for later
visualization
:param algo: any graphviz algo, "neato" (for simple graphs)
or "fdp" (for more crowded graphs) usually give good outputs
:return:
"""
if not which(algo):
raise RuntimeError("StructureGraph graph drawing requires " "GraphViz binaries to be in the path.")
# Developer note: NetworkX also has methods for drawing
# graphs using matplotlib, these also work here. However,
# a dedicated tool like GraphViz allows for much easier
# control over graph appearance and also correctly displays
# mutli-graphs (matplotlib can superimpose multiple edges).
g = self.graph.copy()
g.graph = {"nodesep": 10.0, "dpi": 300, "overlap": "false"}
# add display options for nodes
for n in g.nodes():
# get label by species name
label = "{}({})".format(str(self.structure[n].specie), n) if node_labels else ""
# use standard color scheme for nodes
c = EL_COLORS[color_scheme].get(str(self.structure[n].specie.symbol), [0, 0, 0])
# get contrasting font color
# magic numbers account for perceived luminescence
# https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color
fontcolor = "#000000" if 1 - (c[0] * 0.299 + c[1] * 0.587 + c[2] * 0.114) / 255 < 0.5 else "#ffffff"
# convert color to hex string
color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2])
g.add_node(
n,
fillcolor=color,
fontcolor=fontcolor,
label=label,
fontname="Helvetica-bold",
style="filled",
shape="circle",
)
edges_to_delete = []
# add display options for edges
for u, v, k, d in g.edges(keys=True, data=True):
# retrieve from/to images, set as origin if not defined
to_image = d["to_jimage"]
# set edge style
d["style"] = "solid"
if to_image != (0, 0, 0):
d["style"] = "dashed"
if hide_image_edges:
edges_to_delete.append((u, v, k))
# don't show edge directions
d["arrowhead"] = "none"
# only add labels for images that are not the origin
if image_labels:
d["headlabel"] = "" if to_image == (0, 0, 0) else "to {}".format((to_image))
d["arrowhead"] = "normal" if d["headlabel"] else "none"
# optionally color edges using node colors
color_u = g.nodes[u]["fillcolor"]
color_v = g.nodes[v]["fillcolor"]
d["color_uv"] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000"
# optionally add weights to graph
if weight_labels:
units = g.graph.get("edge_weight_units", "")
if d.get("weight"):
d["label"] = "{:.2f} {}".format(d["weight"], units)
# update edge with our new style attributes
g.edges[u, v, k].update(d)
# optionally remove periodic image edges,
# these can be confusing due to periodic boundaries
if hide_image_edges:
for edge_to_delete in edges_to_delete:
g.remove_edge(*edge_to_delete)
# optionally hide unconnected nodes,
# these can appear when removing periodic edges
if hide_unconnected_nodes:
g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])
# optionally highlight differences with another graph
if diff:
diff = self.diff(diff, strict=True)
green_edges = []
red_edges = []
for u, v, k, d in g.edges(keys=True, data=True):
if (u, v, d["to_jimage"]) in diff["self"]:
# edge has been deleted
red_edges.append((u, v, k))
elif (u, v, d["to_jimage"]) in diff["other"]:
# edge has been added
green_edges.append((u, v, k))
for u, v, k in green_edges:
g.edges[u, v, k].update({"color_uv": "#00ff00"})
for u, v, k in red_edges:
g.edges[u, v, k].update({"color_uv": "#ff0000"})
basename, extension = os.path.splitext(filename)
extension = extension[1:]
write_dot(g, basename + ".dot")
with open(filename, "w") as f:
args = [algo, "-T", extension, basename + ".dot"]
rs = subprocess.Popen(args, stdout=f, stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if rs.returncode != 0:
raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode))
if not keep_dot:
os.remove(basename + ".dot")
@property
def types_and_weights_of_connections(self):
"""
Extract a dictionary summarizing the types and weights
of edges in the graph.
:return: A dictionary with keys specifying the
species involved in a connection in alphabetical order
(e.g. string 'Fe-O') and values which are a list of
weights for those connections (e.g. bond lengths).
"""
def get_label(u, v):
u_label = self.structure[u].species_string
v_label = self.structure[v].species_string
return "-".join(sorted((u_label, v_label)))
types = defaultdict(list)
for u, v, d in self.graph.edges(data=True):
label = get_label(u, v)
types[label].append(d["weight"])
return dict(types)
@property
def weight_statistics(self):
"""
Extract a statistical summary of edge weights present in
the graph.
:return: A dict with an 'all_weights' list, 'minimum',
'maximum', 'median', 'mean', 'std_dev'
"""
all_weights = [d.get("weight", None) for u, v, d in self.graph.edges(data=True)]
stats = describe(all_weights, nan_policy="omit")
return {
"all_weights": all_weights,
"min": stats.minmax[0],
"max": stats.minmax[1],
"mean": stats.mean,
"variance": stats.variance,
}
def types_of_coordination_environments(self, anonymous=False):
"""
Extract information on the different co-ordination environments
present in the graph.
:param anonymous: if anonymous, will replace specie names
with A, B, C, etc.
:return: a list of co-ordination environments,
e.g. ['Mo-S(6)', 'S-Mo(3)']
"""
motifs = set()
for idx, site in enumerate(self.structure):
centre_sp = site.species_string
connected_sites = self.get_connected_sites(idx)
connected_species = [connected_site.site.species_string for connected_site in connected_sites]
labels = []
for sp in set(connected_species):
count = connected_species.count(sp)
labels.append((count, sp))
labels = sorted(labels, reverse=True)
if anonymous:
mapping = {centre_sp: "A"}
available_letters = [chr(66 + i) for i in range(25)]
for label in labels:
sp = label[1]
if sp not in mapping:
mapping[sp] = available_letters.pop(0)
centre_sp = "A"
labels = [(label[0], mapping[label[1]]) for label in labels]
labels = ["{}({})".format(label[1], label[0]) for label in labels]
motif = "{}-{}".format(centre_sp, ",".join(labels))
motifs.add(motif)
return sorted(list(motifs))
def as_dict(self):
"""
As in :Class: `pymatgen.core.Structure` except
with using `to_dict_of_dicts` from NetworkX
to store graph information.
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"graphs": json_graph.adjacency_data(self.graph),
}
return d
@classmethod
def from_dict(cls, d):
"""
As in :Class: `pymatgen.core.Structure` except
restoring graphs using `from_dict_of_dicts`
from NetworkX to restore graph information.
"""
s = Structure.from_dict(d["structure"])
return cls(s, d["graphs"])
def __mul__(self, scaling_matrix):
"""
Replicates the graph, creating a supercell,
intelligently joining together
edges that lie on periodic boundaries.
In principle, any operations on the expanded
graph could also be done on the original
graph, but a larger graph can be easier to
visualize and reason about.
:param scaling_matrix: same as Structure.__mul__
:return:
"""
# Developer note: a different approach was also trialed, using
# a simple Graph (instead of MultiDiGraph), with node indices
# representing both site index and periodic image. Here, the
# number of nodes != number of sites in the Structure. This
# approach has many benefits, but made it more difficult to
# keep the graph in sync with its corresponding Structure.
# Broadly, it would be easier to multiply the Structure
# *before* generating the StructureGraph, but this isn't
# possible when generating the graph using critic2 from
# charge density.
# Multiplication works by looking for the expected position
# of an image node, and seeing if that node exists in the
# supercell. If it does, the edge is updated. This is more
# computationally expensive than just keeping track of the
# which new lattice images present, but should hopefully be
# easier to extend to a general 3x3 scaling matrix.
# code adapted from Structure.__mul__
scale_matrix = np.array(scaling_matrix, np.int16)
if scale_matrix.shape != (3, 3):
scale_matrix = np.array(scale_matrix * np.eye(3), np.int16)
else:
# TODO: test __mul__ with full 3x3 scaling matrices
raise NotImplementedError("Not tested with 3x3 scaling matrices yet.")
new_lattice = Lattice(np.dot(scale_matrix, self.structure.lattice.matrix))
f_lat = lattice_points_in_supercell(scale_matrix)
c_lat = new_lattice.get_cartesian_coords(f_lat)
new_sites = []
new_graphs = []
for v in c_lat:
# create a map of nodes from original graph to its image
mapping = {n: n + len(new_sites) for n in range(len(self.structure))}
for idx, site in enumerate(self.structure):
s = PeriodicSite(
site.species,
site.coords + v,
new_lattice,
properties=site.properties,
coords_are_cartesian=True,
to_unit_cell=False,
)
new_sites.append(s)
new_graphs.append(nx.relabel_nodes(self.graph, mapping, copy=True))
new_structure = Structure.from_sites(new_sites)
# merge all graphs into one big graph
new_g = nx.MultiDiGraph()
for new_graph in new_graphs:
new_g = nx.union(new_g, new_graph)
edges_to_remove = [] # tuple of (u, v, k)
edges_to_add = [] # tuple of (u, v, attr_dict)
# list of new edges inside supercell
# for duplicate checking
edges_inside_supercell = [{u, v} for u, v, d in new_g.edges(data=True) if d["to_jimage"] == (0, 0, 0)]
new_periodic_images = []
orig_lattice = self.structure.lattice
# use k-d tree to match given position to an
# existing Site in Structure
kd_tree = KDTree(new_structure.cart_coords)
# tolerance in Å for sites to be considered equal
# this could probably be a lot smaller
tol = 0.05
for u, v, k, d in new_g.edges(keys=True, data=True):
to_jimage = d["to_jimage"] # for node v
# reduce unnecessary checking
if to_jimage != (0, 0, 0):
# get index in original site
n_u = u % len(self.structure)
n_v = v % len(self.structure)
# get fractional co-ordinates of where atoms defined
# by edge are expected to be, relative to original
# lattice (keeping original lattice has
# significant benefits)
v_image_frac = np.add(self.structure[n_v].frac_coords, to_jimage)
u_frac = self.structure[n_u].frac_coords
# using the position of node u as a reference,
# get relative Cartesian co-ordinates of where
# atoms defined by edge are expected to be
v_image_cart = orig_lattice.get_cartesian_coords(v_image_frac)
u_cart = orig_lattice.get_cartesian_coords(u_frac)
v_rel = np.subtract(v_image_cart, u_cart)
# now retrieve position of node v in
# new supercell, and get asgolute Cartesian
# co-ordinates of where atoms defined by edge
# are expected to be
v_expec = new_structure[u].coords + v_rel
# now search in new structure for these atoms
# query returns (distance, index)
v_present = kd_tree.query(v_expec)
v_present = v_present[1] if v_present[0] <= tol else None
# check if image sites now present in supercell
# and if so, delete old edge that went through
# periodic boundary
if v_present is not None:
new_u = u
new_v = v_present
new_d = d.copy()
# node now inside supercell
new_d["to_jimage"] = (0, 0, 0)
edges_to_remove.append((u, v, k))
# make sure we don't try to add duplicate edges
# will remove two edges for everyone one we add
if {new_u, new_v} not in edges_inside_supercell:
# normalize direction
if new_v < new_u:
new_u, new_v = new_v, new_u
edges_inside_supercell.append({new_u, new_v})
edges_to_add.append((new_u, new_v, new_d))
else:
# want to find new_v such that we have
# full periodic boundary conditions
# so that nodes on one side of supercell
# are connected to nodes on opposite side
v_expec_frac = new_structure.lattice.get_fractional_coords(v_expec)
# find new to_jimage
# use np.around to fix issues with finite precision leading to incorrect image
v_expec_image = np.around(v_expec_frac, decimals=3)
v_expec_image = v_expec_image - v_expec_image % 1
v_expec_frac = np.subtract(v_expec_frac, v_expec_image)
v_expec = new_structure.lattice.get_cartesian_coords(v_expec_frac)
v_present = kd_tree.query(v_expec)
v_present = v_present[1] if v_present[0] <= tol else None
if v_present is not None:
new_u = u
new_v = v_present
new_d = d.copy()
new_to_jimage = tuple(map(int, v_expec_image))
# normalize direction
if new_v < new_u:
new_u, new_v = new_v, new_u
new_to_jimage = tuple(np.multiply(-1, d["to_jimage"]).astype(int))
new_d["to_jimage"] = new_to_jimage
edges_to_remove.append((u, v, k))
if (new_u, new_v, new_to_jimage) not in new_periodic_images:
edges_to_add.append((new_u, new_v, new_d))
new_periodic_images.append((new_u, new_v, new_to_jimage))
logger.debug("Removing {} edges, adding {} new edges.".format(len(edges_to_remove), len(edges_to_add)))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
new_g.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
new_g.add_edge(u, v, **d)
# return new instance of StructureGraph with supercell
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": new_structure.as_dict(),
"graphs": json_graph.adjacency_data(new_g),
}
sg = StructureGraph.from_dict(d)
return sg
def __rmul__(self, other):
return self.__mul__(other)
@classmethod
def _edges_to_string(cls, g):
header = "from to to_image "
header_line = "---- ---- ------------"
edge_weight_name = g.graph["edge_weight_name"]
if edge_weight_name:
print_weights = ["weight"]
edge_label = g.graph["edge_weight_name"]
edge_weight_units = g.graph["edge_weight_units"]
if edge_weight_units:
edge_label += " ({})".format(edge_weight_units)
header += " {}".format(edge_label)
header_line += " {}".format("-" * max([18, len(edge_label)]))
else:
print_weights = False
s = header + "\n" + header_line + "\n"
edges = list(g.edges(data=True))
# sort edges for consistent ordering
edges.sort(key=itemgetter(0, 1))
if print_weights:
for u, v, data in edges:
s += "{:4} {:4} {:12} {:.3e}\n".format(
u, v, str(data.get("to_jimage", (0, 0, 0))), data.get("weight", 0)
)
else:
for u, v, data in edges:
s += "{:4} {:4} {:12}\n".format(u, v, str(data.get("to_jimage", (0, 0, 0))))
return s
def __str__(self):
s = "Structure Graph"
s += "\nStructure: \n{}".format(self.structure.__str__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __repr__(self):
s = "Structure Graph"
s += "\nStructure: \n{}".format(self.structure.__repr__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __len__(self):
"""
:return: length of Structure / number of nodes in graph
"""
return len(self.structure)
def sort(self, key=None, reverse=False):
"""
Same as Structure.sort(), also remaps nodes in graph.
:param key:
:param reverse:
:return:
"""
old_structure = self.structure.copy()
# sort Structure
self.structure._sites = sorted(self.structure._sites, key=key, reverse=reverse)
# apply Structure ordering to graph
mapping = {idx: self.structure.index(site) for idx, site in enumerate(old_structure)}
self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)
# normalize directions of edges
edges_to_remove = []
edges_to_add = []
for u, v, k, d in self.graph.edges(keys=True, data=True):
if v < u:
new_v, new_u, new_d = u, v, d.copy()
new_d["to_jimage"] = tuple(np.multiply(-1, d["to_jimage"]).astype(int))
edges_to_remove.append((u, v, k))
edges_to_add.append((new_u, new_v, new_d))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
self.graph.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
self.graph.add_edge(u, v, **d)
def __copy__(self):
return StructureGraph.from_dict(self.as_dict())
def __eq__(self, other):
"""
Two StructureGraphs are equal if they have equal Structures,
and have the same edges between Sites. Edge weights can be
different and StructureGraphs can still be considered equal.
:param other: StructureGraph
:return (bool):
"""
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords): self.structure.index(site) for site in other.structure}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d["to_jimage"]) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d["to_jimage"]) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
return (edges == edges_other) and (self.structure == other_sorted.structure)
def diff(self, other, strict=True):
"""
Compares two StructureGraphs. Returns dict with
keys 'self', 'other', 'both' with edges that are
present in only one StructureGraph ('self' and
'other'), and edges that are present in both.
The Jaccard distance is a simple measure of the
dissimilarity between two StructureGraphs (ignoring
edge weights), and is defined by 1 - (size of the
intersection / size of the union) of the sets of
edges. This is returned with key 'dist'.
Important note: all node indices are in terms
of the StructureGraph this method is called
from, not the 'other' StructureGraph: there
is no guarantee the node indices will be the
same if the underlying Structures are ordered
differently.
:param other: StructureGraph
:param strict: if False, will compare bonds
from different Structures, with node indices
replaced by Species strings, will not count
number of occurrences of bonds
:return:
"""
if self.structure != other.structure and strict:
return ValueError("Meaningless to compare StructureGraphs if " "corresponding Structures are different.")
if strict:
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords): self.structure.index(site) for site in other.structure}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d["to_jimage"]) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v, d["to_jimage"]) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
else:
edges = {
(str(self.structure[u].specie), str(self.structure[v].specie))
for u, v, d in self.graph.edges(keys=False, data=True)
}
edges_other = {
(str(other.structure[u].specie), str(other.structure[v].specie))
for u, v, d in other.graph.edges(keys=False, data=True)
}
if len(edges) == 0 and len(edges_other) == 0:
jaccard_dist = 0 # by definition
else:
jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))
return {
"self": edges - edges_other,
"other": edges_other - edges,
"both": edges.intersection(edges_other),
"dist": jaccard_dist,
}
def get_subgraphs_as_molecules(self, use_weights=False):
"""
Retrieve subgraphs as molecules, useful for extracting
molecules from periodic crystals.
Will only return unique molecules, not any duplicates
present in the crystal (a duplicate defined as an
isomorphic subgraph).
:param use_weights (bool): If True, only treat subgraphs
as isomorphic if edges have the same weights. Typically,
this means molecules will need to have the same bond
lengths to be defined as duplicates, otherwise bond
lengths can differ. This is a fairly robust approach,
but will treat e.g. enantiomers as being duplicates.
:return: list of unique Molecules in Structure
"""
# creating a supercell is an easy way to extract
# molecules (and not, e.g., layers of a 2D crystal)
# without adding extra logic
if getattr(self, "_supercell_sg", None) is None:
self._supercell_sg = supercell_sg = self * (3, 3, 3)
# make undirected to find connected subgraphs
supercell_sg.graph = nx.Graph(supercell_sg.graph)
# find subgraphs
all_subgraphs = [supercell_sg.graph.subgraph(c) for c in nx.connected_components(supercell_sg.graph)]
# discount subgraphs that lie across *supercell* boundaries
# these will subgraphs representing crystals
molecule_subgraphs = []
for subgraph in all_subgraphs:
intersects_boundary = any(d["to_jimage"] != (0, 0, 0) for u, v, d in subgraph.edges(data=True))
if not intersects_boundary:
molecule_subgraphs.append(nx.MultiDiGraph(subgraph))
# add specie names to graph to be able to test for isomorphism
for subgraph in molecule_subgraphs:
for n in subgraph:
subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie))
# now define how we test for isomorphism
def node_match(n1, n2):
return n1["specie"] == n2["specie"]
def edge_match(e1, e2):
if use_weights:
return e1["weight"] == e2["weight"]
return True
# prune duplicate subgraphs
unique_subgraphs = []
for subgraph in molecule_subgraphs:
already_present = [
nx.is_isomorphic(subgraph, g, node_match=node_match, edge_match=edge_match) for g in unique_subgraphs
]
if not any(already_present):
unique_subgraphs.append(subgraph)
# get Molecule objects for each subgraph
molecules = []
for subgraph in unique_subgraphs:
coords = [supercell_sg.structure[n].coords for n in subgraph.nodes()]
species = [supercell_sg.structure[n].specie for n in subgraph.nodes()]
molecule = Molecule(species, coords)
# shift so origin is at center of mass
molecule = molecule.get_centered_molecule()
molecules.append(molecule)
return molecules
class MolGraphSplitError(Exception):
"""
Raised when a molecule graph is failed to split into two disconnected
subgraphs
"""
pass
class MoleculeGraph(MSONable):
"""
This is a class for annotating a Molecule with
bond information, stored in the form of a graph. A "bond" does
not necessarily have to be a chemical bond, but can store any
kind of information that connects two Sites.
"""
def __init__(self, molecule, graph_data=None):
"""
If constructing this class manually, use the `with_empty_graph`
method or `with_local_env_strategy` method (using an algorithm
provided by the `local_env` module, such as O'Keeffe).
This class that contains connection information:
relationships between sites represented by a Graph structure,
and an associated structure object.
This class uses the NetworkX package to store and operate
on the graph itself, but contains a lot of helper methods
to make associating a graph with a given molecule easier.
Use cases for this include storing bonding information,
NMR J-couplings, Heisenberg exchange parameters, etc.
:param molecule: Molecule object
:param graph_data: dict containing graph information in
dict format (not intended to be constructed manually,
see as_dict method for format)
"""
if isinstance(molecule, MoleculeGraph):
# just make a copy from input
graph_data = molecule.as_dict()["graphs"]
self.molecule = molecule
self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)
# tidy up edge attr dicts, reading to/from json duplicates
# information
for u, v, k, d in self.graph.edges(keys=True, data=True):
if "id" in d:
del d["id"]
if "key" in d:
del d["key"]
# ensure images are tuples (conversion to lists happens
# when serializing back from json), it's important images
# are hashable/immutable
if "to_jimage" in d:
d["to_jimage"] = tuple(d["to_jimage"])
if "from_jimage" in d:
d["from_jimage"] = tuple(d["from_jimage"])
self.set_node_attributes()
@classmethod
def with_empty_graph(cls, molecule, name="bonds", edge_weight_name=None, edge_weight_units=None):
"""
Constructor for MoleculeGraph, returns a MoleculeGraph
object with an empty graph (no edges, only nodes defined
that correspond to Sites in Molecule).
:param molecule (Molecule):
:param name (str): name of graph, e.g. "bonds"
:param edge_weight_name (str): name of edge weights,
e.g. "bond_length" or "exchange_constant"
:param edge_weight_units (str): name of edge weight units
e.g. "Å" or "eV"
:return (MoleculeGraph):
"""
if edge_weight_name and (edge_weight_units is None):
raise ValueError(
"Please specify units associated "
"with your edge weights. Can be "
"empty string if arbitrary or "
"dimensionless."
)
# construct graph with one node per site
# graph attributes don't change behavior of graph,
# they're just for book-keeping
graph = nx.MultiDiGraph(
edge_weight_name=edge_weight_name,
edge_weight_units=edge_weight_units,
name=name,
)
graph.add_nodes_from(range(len(molecule)))
graph_data = json_graph.adjacency_data(graph)
return cls(molecule, graph_data=graph_data)
@staticmethod
def with_edges(molecule, edges):
"""
Constructor for MoleculeGraph, using pre-existing or pre-defined edges
with optional edge parameters.
:param molecule: Molecule object
:param edges: dict representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. Props should be None if no
additional properties are to be specified.
:return: mg, a MoleculeGraph
"""
mg = MoleculeGraph.with_empty_graph(molecule, name="bonds", edge_weight_name="weight", edge_weight_units="")
for edge, props in edges.items():
try:
from_index = edge[0]
to_index = edge[1]
except TypeError:
raise ValueError("Edges must be given as (from_index, to_index)" "tuples")
if props is not None:
if "weight" in props.keys():
weight = props["weight"]
del props["weight"]
else:
weight = None
if len(props.items()) == 0:
props = None
else:
weight = None
nodes = mg.graph.nodes
if not (from_index in nodes and to_index in nodes):
raise ValueError(
"Edges cannot be added if nodes are not" " present in the graph. Please check your" " indices."
)
mg.add_edge(from_index, to_index, weight=weight, edge_properties=props)
mg.set_node_attributes()
return mg
@staticmethod
def with_local_env_strategy(molecule, strategy):
"""
Constructor for MoleculeGraph, using a strategy
from :Class: `pymatgen.analysis.local_env`.
:param molecule: Molecule object
:param strategy: an instance of a
:Class: `pymatgen.analysis.local_env.NearNeighbors` object
:return: mg, a MoleculeGraph
"""
if not strategy.molecules_allowed:
raise ValueError(
"Chosen strategy is not designed for use with molecules! " "Please choose another strategy."
)
extend_structure = strategy.extend_structure_molecules
mg = MoleculeGraph.with_empty_graph(molecule, name="bonds", edge_weight_name="weight", edge_weight_units="")
# NearNeighbor classes only (generally) work with structures
# molecules have to be boxed first
coords = molecule.cart_coords
if extend_structure:
a = max(coords[:, 0]) - min(coords[:, 0]) + 100
b = max(coords[:, 1]) - min(coords[:, 1]) + 100
c = max(coords[:, 2]) - min(coords[:, 2]) + 100
structure = molecule.get_boxed_structure(a, b, c, no_cross=True, reorder=False)
else:
structure = None
for n in range(len(molecule)):
if structure is None:
neighbors = strategy.get_nn_info(molecule, n)
else:
neighbors = strategy.get_nn_info(structure, n)
for neighbor in neighbors:
# all bonds in molecules should not cross
# (artificial) periodic boundaries
if not np.array_equal(neighbor["image"], [0, 0, 0]):
continue
if n > neighbor["site_index"]:
from_index = neighbor["site_index"]
to_index = n
else:
from_index = n
to_index = neighbor["site_index"]
mg.add_edge(
from_index=from_index,
to_index=to_index,
weight=neighbor["weight"],
warn_duplicates=False,
)
duplicates = []
for edge in mg.graph.edges:
if edge[2] != 0:
duplicates.append(edge)
for duplicate in duplicates:
mg.graph.remove_edge(duplicate[0], duplicate[1], key=duplicate[2])
mg.set_node_attributes()
return mg
@property
def name(self):
"""
:return: Name of graph
"""
return self.graph.graph["name"]
@property
def edge_weight_name(self):
"""
:return: Name of the edge weight property of graph
"""
return self.graph.graph["edge_weight_name"]
@property
def edge_weight_unit(self):
"""
:return: Units of the edge weight property of graph
"""
return self.graph.graph["edge_weight_units"]
def add_edge(
self,
from_index,
to_index,
weight=None,
warn_duplicates=True,
edge_properties=None,
):
"""
Add edge to graph.
Since physically a 'bond' (or other connection
between sites) doesn't have a direction, from_index,
from_jimage can be swapped with to_index, to_jimage.
However, images will always always be shifted so that
from_index < to_index and from_jimage becomes (0, 0, 0).
:param from_index: index of site connecting from
:param to_index: index of site connecting to
:param weight (float): e.g. bond length
:param warn_duplicates (bool): if True, will warn if
trying to add duplicate edges (duplicate edges will not
be added in either case)
:param edge_properties (dict): any other information to
store on graph edges, similar to Structure's site_properties
:return:
"""
# this is not necessary for the class to work, but
# just makes it neater
if to_index < from_index:
to_index, from_index = from_index, to_index
# sanitize types
from_index, to_index = int(from_index), int(to_index)
# check we're not trying to add a duplicate edge
# there should only ever be at most one edge
# between two sites
existing_edge_data = self.graph.get_edge_data(from_index, to_index)
if existing_edge_data and warn_duplicates:
warnings.warn(
"Trying to add an edge that already exists from " "site {} to site {}.".format(from_index, to_index)
)
return
# generic container for additional edge properties,
# similar to site properties
edge_properties = edge_properties or {}
if weight:
self.graph.add_edge(from_index, to_index, weight=weight, **edge_properties)
else:
self.graph.add_edge(from_index, to_index, **edge_properties)
def insert_node(
self,
i,
species,
coords,
validate_proximity=False,
site_properties=None,
edges=None,
):
"""
A wrapper around Molecule.insert(), which also incorporates the new
site into the MoleculeGraph.
:param i: Index at which to insert the new site
:param species: Species for the new site
:param coords: 3x1 array representing coordinates of the new site
:param validate_proximity: For Molecule.insert(); if True (default
False), distance will be checked to ensure that site can be safely
added.
:param site_properties: Site properties for Molecule
:param edges: List of dicts representing edges to be added to the
MoleculeGraph. These edges must include the index of the new site i,
and all indices used for these edges should reflect the
MoleculeGraph AFTER the insertion, NOT before. Each dict should at
least have a "to_index" and "from_index" key, and can also have a
"weight" and a "properties" key.
:return:
"""
self.molecule.insert(
i,
species,
coords,
validate_proximity=validate_proximity,
properties=site_properties,
)
mapping = {}
for j in range(len(self.molecule) - 1):
if j < i:
mapping[j] = j
else:
mapping[j] = j + 1
nx.relabel_nodes(self.graph, mapping, copy=False)
self.graph.add_node(i)
self.set_node_attributes()
if edges is not None:
for edge in edges:
try:
self.add_edge(
edge["from_index"],
edge["to_index"],
weight=edge.get("weight", None),
edge_properties=edge.get("properties", None),
)
except KeyError:
raise RuntimeError("Some edges are invalid.")
def set_node_attributes(self):
"""
Replicates molecule site properties (specie, coords, etc.) in the
MoleculeGraph.
:return:
"""
species = {}
coords = {}
properties = {}
for node in self.graph.nodes():
species[node] = self.molecule[node].specie.symbol
coords[node] = self.molecule[node].coords
properties[node] = self.molecule[node].properties
nx.set_node_attributes(self.graph, species, "specie")
nx.set_node_attributes(self.graph, coords, "coords")
nx.set_node_attributes(self.graph, properties, "properties")
def alter_edge(self, from_index, to_index, new_weight=None, new_edge_properties=None):
"""
Alters either the weight or the edge_properties of
an edge in the MoleculeGraph.
:param from_index: int
:param to_index: int
:param new_weight: alter_edge does not require
that weight be altered. As such, by default, this
is None. If weight is to be changed, it should be a
float.
:param new_edge_properties: alter_edge does not require
that edge_properties be altered. As such, by default,
this is None. If any edge properties are to be changed,
it should be a dictionary of edge properties to be changed.
:return:
"""
existing_edge = self.graph.get_edge_data(from_index, to_index)
# ensure that edge exists before attempting to change it
if not existing_edge:
raise ValueError(
"Edge between {} and {} cannot be altered;\
no edge exists between those sites.".format(
from_index, to_index
)
)
# Third index should always be 0 because there should only be one edge between any two nodes
if new_weight is not None:
self.graph[from_index][to_index][0]["weight"] = new_weight
if new_edge_properties is not None:
for prop in list(new_edge_properties.keys()):
self.graph[from_index][to_index][0][prop] = new_edge_properties[prop]
def break_edge(self, from_index, to_index, allow_reverse=False):
"""
Remove an edge from the MoleculeGraph
:param from_index: int
:param to_index: int
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return:
"""
# ensure that edge exists before attempting to remove it
existing_edge = self.graph.get_edge_data(from_index, to_index)
existing_reverse = None
if existing_edge:
self.graph.remove_edge(from_index, to_index)
else:
if allow_reverse:
existing_reverse = self.graph.get_edge_data(to_index, from_index)
if existing_reverse:
self.graph.remove_edge(to_index, from_index)
else:
raise ValueError(
"Edge cannot be broken between {} and {};\
no edge exists between those sites.".format(
from_index, to_index
)
)
def remove_nodes(self, indices):
"""
A wrapper for Molecule.remove_sites().
:param indices: list of indices in the current Molecule (and graph) to
be removed.
:return:
"""
self.molecule.remove_sites(indices)
self.graph.remove_nodes_from(indices)
mapping = {}
for correct, current in enumerate(sorted(self.graph.nodes)):
mapping[current] = correct
nx.relabel_nodes(self.graph, mapping, copy=False)
self.set_node_attributes()
def get_disconnected_fragments(self):
"""
Determine if the MoleculeGraph is connected. If it is not, separate the
MoleculeGraph into different MoleculeGraphs, where each resulting
MoleculeGraph is a disconnected subgraph of the original.
Currently, this function naively assigns the charge
of the total molecule to a single submolecule. A
later effort will be to actually accurately assign
charge.
NOTE: This function does not modify the original
MoleculeGraph. It creates a copy, modifies that, and
returns two or more new MoleculeGraph objects.
:return: list of MoleculeGraphs
"""
if nx.is_weakly_connected(self.graph):
return [copy.deepcopy(self)]
original = copy.deepcopy(self)
sub_mols = list()
# Had to use nx.weakly_connected_components because of deprecation
# of nx.weakly_connected_component_subgraphs
subgraphs = [original.graph.subgraph(c) for c in nx.weakly_connected_components(original.graph)]
for subg in subgraphs:
nodes = sorted(list(subg.nodes))
# Molecule indices are essentially list-based, so node indices
# must be remapped, incrementing from 0
mapping = {}
for i, n in enumerate(nodes):
mapping[n] = i
# just give charge to whatever subgraph has node with index 0
# TODO: actually figure out how to distribute charge
if 0 in nodes:
charge = self.molecule.charge
else:
charge = 0
# relabel nodes in graph to match mapping
new_graph = nx.relabel_nodes(subg, mapping)
species = nx.get_node_attributes(new_graph, "specie")
coords = nx.get_node_attributes(new_graph, "coords")
raw_props = nx.get_node_attributes(new_graph, "properties")
properties = {}
for prop_set in raw_props.values():
for prop in prop_set.keys():
if prop in properties:
properties[prop].append(prop_set[prop])
else:
properties[prop] = [prop_set[prop]]
# Site properties must be present for all atoms in the molecule
# in order to be used for Molecule instantiation
for k, v in properties.items():
if len(v) != len(species):
del properties[k]
new_mol = Molecule(species, coords, charge=charge, site_properties=properties)
graph_data = json_graph.adjacency_data(new_graph)
# create new MoleculeGraph
sub_mols.append(MoleculeGraph(new_mol, graph_data=graph_data))
return sub_mols
def split_molecule_subgraphs(self, bonds, allow_reverse=False, alterations=None):
"""
Split MoleculeGraph into two or more MoleculeGraphs by
breaking a set of bonds. This function uses
MoleculeGraph.break_edge repeatedly to create
disjoint graphs (two or more separate molecules).
This function does not only alter the graph
information, but also changes the underlying
Molecules.
If the bonds parameter does not include sufficient
bonds to separate two molecule fragments, then this
function will fail.
Currently, this function naively assigns the charge
of the total molecule to a single submolecule. A
later effort will be to actually accurately assign
charge.
NOTE: This function does not modify the original
MoleculeGraph. It creates a copy, modifies that, and
returns two or more new MoleculeGraph objects.
:param bonds: list of tuples (from_index, to_index)
representing bonds to be broken to split the MoleculeGraph.
:param alterations: a dict {(from_index, to_index): alt},
where alt is a dictionary including weight and/or edge
properties to be changed following the split.
:param allow_reverse: If allow_reverse is True, then break_edge will
attempt to break both (from_index, to_index) and, failing that,
will attempt to break (to_index, from_index).
:return: list of MoleculeGraphs
"""
self.set_node_attributes()
original = copy.deepcopy(self)
for bond in bonds:
original.break_edge(bond[0], bond[1], allow_reverse=allow_reverse)
if nx.is_weakly_connected(original.graph):
raise MolGraphSplitError(
"Cannot split molecule; \
MoleculeGraph is still connected."
)
# alter any bonds before partition, to avoid remapping
if alterations is not None:
for (u, v) in alterations.keys():
if "weight" in alterations[(u, v)]:
weight = alterations[(u, v)]["weight"]
del alterations[(u, v)]["weight"]
edge_properties = alterations[(u, v)] if len(alterations[(u, v)]) != 0 else None
original.alter_edge(u, v, new_weight=weight, new_edge_properties=edge_properties)
else:
original.alter_edge(u, v, new_edge_properties=alterations[(u, v)])
return original.get_disconnected_fragments()
def build_unique_fragments(self):
"""
Find all possible fragment combinations of the MoleculeGraphs (in other
words, all connected induced subgraphs)
:return:
"""
self.set_node_attributes()
graph = self.graph.to_undirected()
# find all possible fragments, aka connected induced subgraphs
frag_dict = {}
for ii in range(1, len(self.molecule)):
for combination in combinations(graph.nodes, ii):
mycomp = []
for idx in combination:
mycomp.append(str(self.molecule[idx].specie))
mycomp = "".join(sorted(mycomp))
subgraph = nx.subgraph(graph, combination)
if nx.is_connected(subgraph):
mykey = mycomp + str(len(subgraph.edges()))
if mykey not in frag_dict:
frag_dict[mykey] = [copy.deepcopy(subgraph)]
else:
frag_dict[mykey].append(copy.deepcopy(subgraph))
# narrow to all unique fragments using graph isomorphism
unique_frag_dict = {}
for key in frag_dict:
unique_frags = []
for frag in frag_dict[key]:
found = False
for f in unique_frags:
if _isomorphic(frag, f):
found = True
break
if not found:
unique_frags.append(frag)
unique_frag_dict[key] = copy.deepcopy(unique_frags)
# convert back to molecule graphs
unique_mol_graph_dict = {}
for key in unique_frag_dict:
unique_mol_graph_list = []
for fragment in unique_frag_dict[key]:
mapping = {e: i for i, e in enumerate(sorted(fragment.nodes))}
remapped = nx.relabel_nodes(fragment, mapping)
species = nx.get_node_attributes(remapped, "specie")
coords = nx.get_node_attributes(remapped, "coords")
edges = {}
for from_index, to_index, key in remapped.edges:
edge_props = fragment.get_edge_data(from_index, to_index, key=key)
edges[(from_index, to_index)] = edge_props
unique_mol_graph_list.append(
self.with_edges(
Molecule(species=species, coords=coords, charge=self.molecule.charge),
edges,
)
)
frag_key = (
str(unique_mol_graph_list[0].molecule.composition.alphabetical_formula)
+ " E"
+ str(len(unique_mol_graph_list[0].graph.edges()))
)
unique_mol_graph_dict[frag_key] = copy.deepcopy(unique_mol_graph_list)
return unique_mol_graph_dict
def substitute_group(
self,
index,
func_grp,
strategy,
bond_order=1,
graph_dict=None,
strategy_params=None,
):
"""
Builds off of Molecule.substitute to replace an atom in self.molecule
with a functional group. This method also amends self.graph to
incorporate the new functional group.
NOTE: using a MoleculeGraph will generally produce a different graph
compared with using a Molecule or str (when not using graph_dict).
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are three options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
3. A MoleculeGraph object.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
def map_indices(grp):
grp_map = {}
# Get indices now occupied by functional group
# Subtracting 1 because the dummy atom X should not count
atoms = len(grp) - 1
offset = len(self.molecule) - atoms
for i in range(atoms):
grp_map[i] = i + offset
return grp_map
# Work is simplified if a graph is already in place
if isinstance(func_grp, MoleculeGraph):
self.molecule.substitute(index, func_grp.molecule, bond_order=bond_order)
mapping = map_indices(func_grp.molecule)
for (u, v) in list(func_grp.graph.edges()):
edge_props = func_grp.graph.get_edge_data(u, v)[0]
weight = None
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(mapping[u], mapping[v], weight=weight, edge_properties=edge_props)
else:
if isinstance(func_grp, Molecule):
func_grp = copy.deepcopy(func_grp)
else:
try:
func_grp = copy.deepcopy(FunctionalGroups[func_grp])
except Exception:
raise RuntimeError("Can't find functional group in list. " "Provide explicit coordinate instead")
self.molecule.substitute(index, func_grp, bond_order=bond_order)
mapping = map_indices(func_grp)
# Remove dummy atom "X"
func_grp.remove_species("X")
if graph_dict is not None:
for (u, v) in graph_dict.keys():
edge_props = graph_dict[(u, v)]
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
self.add_edge(
mapping[u],
mapping[v],
weight=weight,
edge_properties=edge_props,
)
else:
if strategy_params is None:
strategy_params = {}
strat = strategy(**strategy_params)
graph = self.with_local_env_strategy(func_grp, strat)
for (u, v) in list(graph.graph.edges()):
edge_props = graph.graph.get_edge_data(u, v)[0]
weight = None
if "weight" in edge_props.keys():
weight = edge_props["weight"]
del edge_props["weight"]
if 0 not in list(graph.graph.nodes()):
# If graph indices have different indexing
u, v = (u - 1), (v - 1)
self.add_edge(
mapping[u],
mapping[v],
weight=weight,
edge_properties=edge_props,
)
def replace_group(
self,
index,
func_grp,
strategy,
bond_order=1,
graph_dict=None,
strategy_params=None,
):
"""
Builds off of Molecule.substitute and MoleculeGraph.substitute_group
to replace a functional group in self.molecule with a functional group.
This method also amends self.graph to incorporate the new functional
group.
TODO: Figure out how to replace into a ring structure.
:param index: Index of atom to substitute.
:param func_grp: Substituent molecule. There are three options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecies X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
3. A MoleculeGraph object.
:param strategy: Class from pymatgen.analysis.local_env.
:param bond_order: A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
:param graph_dict: Dictionary representing the bonds of the functional
group (format: {(u, v): props}, where props is a dictionary of
properties, including weight. If None, then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen.analysis.local_env.
:param strategy_params: dictionary of keyword arguments for strategy.
If None, default parameters will be used.
:return:
"""
self.set_node_attributes()
neighbors = self.get_connected_sites(index)
# If the atom at index is terminal
if len(neighbors) == 1:
self.substitute_group(
index,
func_grp,
strategy,
bond_order=bond_order,
graph_dict=graph_dict,
strategy_params=strategy_params,
)
else:
rings = self.find_rings(including=[index])
if len(rings) != 0:
raise RuntimeError(
"Currently functional group replacement" "cannot occur at an atom within a ring" "structure."
)
to_remove = set()
sizes = dict()
disconnected = self.graph.to_undirected()
disconnected.remove_node(index)
for neighbor in neighbors:
sizes[neighbor[2]] = len(nx.descendants(disconnected, neighbor[2]))
keep = max(sizes, key=lambda x: sizes[x])
for i in sizes.keys():
if i != keep:
to_remove.add(i)
self.remove_nodes(list(to_remove))
self.substitute_group(
index,
func_grp,
strategy,
bond_order=bond_order,
graph_dict=graph_dict,
strategy_params=strategy_params,
)
def find_rings(self, including=None):
"""
Find ring structures in the MoleculeGraph.
:param including: list of site indices. If
including is not None, then find_rings will
only return those rings including the specified
sites. By default, this parameter is None, and
all rings will be returned.
:return: dict {index:cycle}. Each
entry will be a ring (cycle, in graph theory terms) including the index
found in the Molecule. If there is no cycle including an index, the
value will be an empty list.
"""
# Copies self.graph such that all edges (u, v) matched by edges (v, u)
undirected = self.graph.to_undirected()
directed = undirected.to_directed()
cycles_nodes = []
cycles_edges = []
# Remove all two-edge cycles
all_cycles = [c for c in nx.simple_cycles(directed) if len(c) > 2]
# Using to_directed() will mean that each cycle always appears twice
# So, we must also remove duplicates
unique_sorted = []
unique_cycles = []
for cycle in all_cycles:
if sorted(cycle) not in unique_sorted:
unique_sorted.append(sorted(cycle))
unique_cycles.append(cycle)
if including is None:
cycles_nodes = unique_cycles
else:
for i in including:
for cycle in unique_cycles:
if i in cycle and cycle not in cycles_nodes:
cycles_nodes.append(cycle)
for cycle in cycles_nodes:
edges = []
for i, e in enumerate(cycle):
edges.append((cycle[i - 1], e))
cycles_edges.append(edges)
return cycles_edges
def get_connected_sites(self, n):
"""
Returns a named tuple of neighbors of site n:
periodic_site, jimage, index, weight.
Index is the index of the corresponding site
in the original structure, weight can be
None if not defined.
:param n: index of Site in Molecule
:param jimage: lattice vector of site
:return: list of ConnectedSite tuples,
sorted by closest first
"""
connected_sites = set()
out_edges = list(self.graph.out_edges(n, data=True))
in_edges = list(self.graph.in_edges(n, data=True))
for u, v, d in out_edges + in_edges:
weight = d.get("weight", None)
if v == n:
site = self.molecule[u]
dist = self.molecule[v].distance(self.molecule[u])
connected_site = ConnectedSite(site=site, jimage=(0, 0, 0), index=u, weight=weight, dist=dist)
else:
site = self.molecule[v]
dist = self.molecule[u].distance(self.molecule[v])
connected_site = ConnectedSite(site=site, jimage=(0, 0, 0), index=v, weight=weight, dist=dist)
connected_sites.add(connected_site)
# return list sorted by closest sites first
connected_sites = list(connected_sites)
connected_sites.sort(key=lambda x: x.dist)
return connected_sites
def get_coordination_of_site(self, n):
"""
Returns the number of neighbors of site n.
In graph terms, simply returns degree
of node corresponding to site n.
:param n: index of site
:return (int):
"""
number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])
return self.graph.degree(n) - number_of_self_loops
def draw_graph_to_file(
self,
filename="graph",
diff=None,
hide_unconnected_nodes=False,
hide_image_edges=True,
edge_colors=False,
node_labels=False,
weight_labels=False,
image_labels=False,
color_scheme="VESTA",
keep_dot=False,
algo="fdp",
):
"""
Draws graph using GraphViz.
The networkx graph object itself can also be drawn
with networkx's in-built graph drawing methods, but
note that this might give misleading results for
multigraphs (edges are super-imposed on each other).
If visualization is difficult to interpret,
`hide_image_edges` can help, especially in larger
graphs.
:param filename: filename to output, will detect filetype
from extension (any graphviz filetype supported, such as
pdf or png)
:param diff (StructureGraph): an additional graph to
compare with, will color edges red that do not exist in diff
and edges green that are in diff graph but not in the
reference graph
:param hide_unconnected_nodes: if True, hide unconnected
nodes
:param hide_image_edges: if True, do not draw edges that
go through periodic boundaries
:param edge_colors (bool): if True, use node colors to
color edges
:param node_labels (bool): if True, label nodes with
species and site index
:param weight_labels (bool): if True, label edges with
weights
:param image_labels (bool): if True, label edges with
their periodic images (usually only used for debugging,
edges to periodic images always appear as dashed lines)
:param color_scheme (str): "VESTA" or "JMOL"
:param keep_dot (bool): keep GraphViz .dot file for later
visualization
:param algo: any graphviz algo, "neato" (for simple graphs)
or "fdp" (for more crowded graphs) usually give good outputs
:return:
"""
if not which(algo):
raise RuntimeError("StructureGraph graph drawing requires " "GraphViz binaries to be in the path.")
# Developer note: NetworkX also has methods for drawing
# graphs using matplotlib, these also work here. However,
# a dedicated tool like GraphViz allows for much easier
# control over graph appearance and also correctly displays
# mutli-graphs (matplotlib can superimpose multiple edges).
g = self.graph.copy()
g.graph = {"nodesep": 10.0, "dpi": 300, "overlap": "false"}
# add display options for nodes
for n in g.nodes():
# get label by species name
label = "{}({})".format(str(self.molecule[n].specie), n) if node_labels else ""
# use standard color scheme for nodes
c = EL_COLORS[color_scheme].get(str(self.molecule[n].specie.symbol), [0, 0, 0])
# get contrasting font color
# magic numbers account for perceived luminescence
# https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color
fontcolor = "#000000" if 1 - (c[0] * 0.299 + c[1] * 0.587 + c[2] * 0.114) / 255 < 0.5 else "#ffffff"
# convert color to hex string
color = "#{:02x}{:02x}{:02x}".format(c[0], c[1], c[2])
g.add_node(
n,
fillcolor=color,
fontcolor=fontcolor,
label=label,
fontname="Helvetica-bold",
style="filled",
shape="circle",
)
edges_to_delete = []
# add display options for edges
for u, v, k, d in g.edges(keys=True, data=True):
# retrieve from/to images, set as origin if not defined
if "to_image" in d:
to_image = d["to_jimage"]
else:
to_image = (0, 0, 0)
# set edge style
d["style"] = "solid"
if to_image != (0, 0, 0):
d["style"] = "dashed"
if hide_image_edges:
edges_to_delete.append((u, v, k))
# don't show edge directions
d["arrowhead"] = "none"
# only add labels for images that are not the origin
if image_labels:
d["headlabel"] = "" if to_image == (0, 0, 0) else "to {}".format((to_image))
d["arrowhead"] = "normal" if d["headlabel"] else "none"
# optionally color edges using node colors
color_u = g.node[u]["fillcolor"]
color_v = g.node[v]["fillcolor"]
d["color_uv"] = "{};0.5:{};0.5".format(color_u, color_v) if edge_colors else "#000000"
# optionally add weights to graph
if weight_labels:
units = g.graph.get("edge_weight_units", "")
if d.get("weight"):
d["label"] = "{:.2f} {}".format(d["weight"], units)
# update edge with our new style attributes
g.edges[u, v, k].update(d)
# optionally remove periodic image edges,
# these can be confusing due to periodic boundaries
if hide_image_edges:
for edge_to_delete in edges_to_delete:
g.remove_edge(*edge_to_delete)
# optionally hide unconnected nodes,
# these can appear when removing periodic edges
if hide_unconnected_nodes:
g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])
# optionally highlight differences with another graph
if diff:
diff = self.diff(diff, strict=True)
green_edges = []
red_edges = []
for u, v, k, d in g.edges(keys=True, data=True):
if (u, v, d["to_jimage"]) in diff["self"]:
# edge has been deleted
red_edges.append((u, v, k))
elif (u, v, d["to_jimage"]) in diff["other"]:
# edge has been added
green_edges.append((u, v, k))
for u, v, k in green_edges:
g.edges[u, v, k].update({"color_uv": "#00ff00"})
for u, v, k in red_edges:
g.edges[u, v, k].update({"color_uv": "#ff0000"})
basename, extension = os.path.splitext(filename)
extension = extension[1:]
write_dot(g, basename + ".dot")
with open(filename, "w") as f:
args = [algo, "-T", extension, basename + ".dot"]
rs = subprocess.Popen(args, stdout=f, stdin=subprocess.PIPE, close_fds=True)
rs.communicate()
if rs.returncode != 0:
raise RuntimeError("{} exited with return code {}.".format(algo, rs.returncode))
if not keep_dot:
os.remove(basename + ".dot")
def as_dict(self):
"""
As in :Class: `pymatgen.core.Molecule` except
with using `to_dict_of_dicts` from NetworkX
to store graph information.
"""
d = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"molecule": self.molecule.as_dict(),
"graphs": json_graph.adjacency_data(self.graph),
}
return d
@classmethod
def from_dict(cls, d):
"""
As in :Class: `pymatgen.core.Molecule` except
restoring graphs using `from_dict_of_dicts`
from NetworkX to restore graph information.
"""
m = Molecule.from_dict(d["molecule"])
return cls(m, d["graphs"])
@classmethod
def _edges_to_string(cls, g):
header = "from to to_image "
header_line = "---- ---- ------------"
edge_weight_name = g.graph["edge_weight_name"]
if edge_weight_name:
print_weights = ["weight"]
edge_label = g.graph["edge_weight_name"]
edge_weight_units = g.graph["edge_weight_units"]
if edge_weight_units:
edge_label += " ({})".format(edge_weight_units)
header += " {}".format(edge_label)
header_line += " {}".format("-" * max([18, len(edge_label)]))
else:
print_weights = False
s = header + "\n" + header_line + "\n"
edges = list(g.edges(data=True))
# sort edges for consistent ordering
edges.sort(key=itemgetter(0, 1))
if print_weights:
for u, v, data in edges:
s += "{:4} {:4} {:12} {:.3e}\n".format(
u, v, str(data.get("to_jimage", (0, 0, 0))), data.get("weight", 0)
)
else:
for u, v, data in edges:
s += "{:4} {:4} {:12}\n".format(u, v, str(data.get("to_jimage", (0, 0, 0))))
return s
def __str__(self):
s = "Molecule Graph"
s += "\nMolecule: \n{}".format(self.molecule.__str__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __repr__(self):
s = "Molecule Graph"
s += "\nMolecule: \n{}".format(self.molecule.__repr__())
s += "\nGraph: {}\n".format(self.name)
s += self._edges_to_string(self.graph)
return s
def __len__(self):
"""
:return: length of Molecule / number of nodes in graph
"""
return len(self.molecule)
def sort(self, key=None, reverse=False):
"""
Same as Molecule.sort(), also remaps nodes in graph.
:param key:
:param reverse:
:return:
"""
old_molecule = self.molecule.copy()
# sort Molecule
self.molecule._sites = sorted(self.molecule._sites, key=key, reverse=reverse)
# apply Molecule ordering to graph
mapping = {idx: self.molecule.index(site) for idx, site in enumerate(old_molecule)}
self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)
# normalize directions of edges
edges_to_remove = []
edges_to_add = []
for u, v, k, d in self.graph.edges(keys=True, data=True):
if v < u:
new_v, new_u, new_d = u, v, d.copy()
new_d["to_jimage"] = (0, 0, 0)
edges_to_remove.append((u, v, k))
edges_to_add.append((new_u, new_v, new_d))
# add/delete marked edges
for edges_to_remove in edges_to_remove:
self.graph.remove_edge(*edges_to_remove)
for (u, v, d) in edges_to_add:
self.graph.add_edge(u, v, **d)
def __copy__(self):
return MoleculeGraph.from_dict(self.as_dict())
def __eq__(self, other):
"""
Two MoleculeGraphs are equal if they have equal Molecules,
and have the same edges between Sites. Edge weights can be
different and MoleculeGraphs can still be considered equal.
:param other: MoleculeGraph
:return (bool):
"""
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
try:
mapping = {tuple(site.coords): self.molecule.index(site) for site in other.molecule}
except ValueError:
return False
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.coords)])
edges = {(u, v) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {(u, v) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}
return (edges == edges_other) and (self.molecule == other_sorted.molecule)
def isomorphic_to(self, other):
"""
Checks if the graphs of two MoleculeGraphs are isomorphic to one
another. In order to prevent problems with misdirected edges, both
graphs are converted into undirected nx.Graph objects.
:param other: MoleculeGraph object to be compared.
:return: bool
"""
if len(self.molecule) != len(other.molecule):
return False
if self.molecule.composition.alphabetical_formula != other.molecule.composition.alphabetical_formula:
return False
if len(self.graph.edges()) != len(other.graph.edges()):
return False
return _isomorphic(self.graph, other.graph)
def diff(self, other, strict=True):
"""
Compares two MoleculeGraphs. Returns dict with
keys 'self', 'other', 'both' with edges that are
present in only one MoleculeGraph ('self' and
'other'), and edges that are present in both.
The Jaccard distance is a simple measure of the
dissimilarity between two MoleculeGraphs (ignoring
edge weights), and is defined by 1 - (size of the
intersection / size of the union) of the sets of
edges. This is returned with key 'dist'.
Important note: all node indices are in terms
of the MoleculeGraph this method is called
from, not the 'other' MoleculeGraph: there
is no guarantee the node indices will be the
same if the underlying Molecules are ordered
differently.
:param other: MoleculeGraph
:param strict: if False, will compare bonds
from different Molecules, with node indices
replaced by Species strings, will not count
number of occurrences of bonds
:return:
"""
if self.molecule != other.molecule and strict:
return ValueError("Meaningless to compare MoleculeGraphs if " "corresponding Molecules are different.")
if strict:
# sort for consistent node indices
# PeriodicSite should have a proper __hash__() value,
# using its frac_coords as a convenient key
mapping = {tuple(site.frac_coords): self.molecule.index(site) for site in other.molecule}
other_sorted = other.__copy__()
other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])
edges = {(u, v, d.get("to_jimage", (0, 0, 0))) for u, v, d in self.graph.edges(keys=False, data=True)}
edges_other = {
(u, v, d.get("to_jimage", (0, 0, 0))) for u, v, d in other_sorted.graph.edges(keys=False, data=True)
}
else:
edges = {
(str(self.molecule[u].specie), str(self.molecule[v].specie))
for u, v, d in self.graph.edges(keys=False, data=True)
}
edges_other = {
(str(other.structure[u].specie), str(other.structure[v].specie))
for u, v, d in other.graph.edges(keys=False, data=True)
}
if len(edges) == 0 and len(edges_other) == 0:
jaccard_dist = 0 # by definition
else:
jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))
return {
"self": edges - edges_other,
"other": edges_other - edges,
"both": edges.intersection(edges_other),
"dist": jaccard_dist,
}
| [
"logging.getLogger",
"networkx.readwrite.json_graph.adjacency_graph",
"pymatgen.util.coord.lattice_points_in_supercell",
"scipy.spatial.KDTree",
"pymatgen.core.PeriodicSite",
"numpy.array",
"networkx.weakly_connected_components",
"copy.deepcopy",
"operator.itemgetter",
"igraph.Graph",
"networkx.relabel_nodes",
"numpy.multiply",
"networkx.algorithms.isomorphism.categorical_node_match",
"subprocess.Popen",
"networkx.is_connected",
"numpy.subtract",
"networkx.simple_cycles",
"numpy.dot",
"networkx.union",
"warnings.warn",
"networkx.is_weakly_connected",
"pymatgen.core.Molecule",
"numpy.round",
"networkx.readwrite.json_graph.adjacency_data",
"networkx.MultiDiGraph",
"collections.namedtuple",
"numpy.eye",
"numpy.add",
"pymatgen.core.PeriodicSite.from_dict",
"networkx.connected_components",
"pymatgen.core.Molecule.from_dict",
"networkx.subgraph",
"pymatgen.core.Structure.from_dict",
"numpy.around",
"networkx.descendants",
"networkx.is_isomorphic",
"monty.os.path.which",
"scipy.stats.describe",
"networkx.Graph",
"itertools.combinations",
"collections.defaultdict",
"networkx.set_node_attributes",
"numpy.array_equal",
"networkx.get_node_attributes",
"networkx.drawing.nx_agraph.write_dot",
"pymatgen.core.Structure.from_sites"
] | [((1010, 1037), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1027, 1037), False, 'import logging\n'), ((1243, 1307), 'collections.namedtuple', 'namedtuple', (['"""ConnectedSite"""', '"""site, jimage, index, weight, dist"""'], {}), "('ConnectedSite', 'site, jimage, index, weight, dist')\n", (1253, 1307), False, 'from collections import defaultdict, namedtuple\n'), ((1684, 1698), 'igraph.Graph', 'igraph.Graph', ([], {}), '()\n', (1696, 1698), False, 'import igraph\n'), ((3004, 3049), 'networkx.algorithms.isomorphism.categorical_node_match', 'iso.categorical_node_match', (['"""specie"""', '"""ERROR"""'], {}), "('specie', 'ERROR')\n", (3030, 3049), True, 'import networkx.algorithms.isomorphism as iso\n'), ((4776, 4827), 'networkx.readwrite.json_graph.adjacency_graph', 'nx.readwrite.json_graph.adjacency_graph', (['graph_data'], {}), '(graph_data)\n', (4815, 4827), True, 'import networkx as nx\n'), ((6565, 6668), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {'edge_weight_name': 'edge_weight_name', 'edge_weight_units': 'edge_weight_units', 'name': 'name'}), '(edge_weight_name=edge_weight_name, edge_weight_units=\n edge_weight_units, name=name)\n', (6580, 6668), True, 'import networkx as nx\n'), ((6785, 6817), 'networkx.readwrite.json_graph.adjacency_data', 'json_graph.adjacency_data', (['graph'], {}), '(graph)\n', (6810, 6817), False, 'from networkx.readwrite import json_graph\n'), ((17318, 17367), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['self.graph', 'mapping'], {'copy': '(False)'}), '(self.graph, mapping, copy=False)\n', (17334, 17367), True, 'import networkx as nx\n'), ((18480, 18533), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['self.graph', 'species', '"""specie"""'], {}), "(self.graph, species, 'specie')\n", (18502, 18533), True, 'import networkx as nx\n'), ((18542, 18594), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['self.graph', 'coords', '"""coords"""'], {}), "(self.graph, coords, 'coords')\n", (18564, 18594), True, 'import networkx as nx\n'), ((18603, 18663), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['self.graph', 'properties', '"""properties"""'], {}), "(self.graph, properties, 'properties')\n", (18625, 18663), True, 'import networkx as nx\n'), ((22658, 22707), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['self.graph', 'mapping'], {'copy': '(False)'}), '(self.graph, mapping, copy=False)\n', (22674, 22707), True, 'import networkx as nx\n'), ((36080, 36111), 'networkx.drawing.nx_agraph.write_dot', 'write_dot', (['g', "(basename + '.dot')"], {}), "(g, basename + '.dot')\n", (36089, 36111), False, 'from networkx.drawing.nx_agraph import write_dot\n'), ((37176, 37193), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (37187, 37193), False, 'from collections import defaultdict, namedtuple\n'), ((37732, 37772), 'scipy.stats.describe', 'describe', (['all_weights'], {'nan_policy': '"""omit"""'}), "(all_weights, nan_policy='omit')\n", (37740, 37772), False, 'from scipy.stats import describe\n'), ((40196, 40231), 'pymatgen.core.Structure.from_dict', 'Structure.from_dict', (["d['structure']"], {}), "(d['structure'])\n", (40215, 40231), False, 'from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure\n'), ((41833, 41867), 'numpy.array', 'np.array', (['scaling_matrix', 'np.int16'], {}), '(scaling_matrix, np.int16)\n', (41841, 41867), True, 'import numpy as np\n'), ((42242, 42283), 'pymatgen.util.coord.lattice_points_in_supercell', 'lattice_points_in_supercell', (['scale_matrix'], {}), '(scale_matrix)\n', (42269, 42283), False, 'from pymatgen.util.coord import lattice_points_in_supercell\n'), ((43056, 43087), 'pymatgen.core.Structure.from_sites', 'Structure.from_sites', (['new_sites'], {}), '(new_sites)\n', (43076, 43087), False, 'from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure\n'), ((43151, 43168), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {}), '()\n', (43166, 43168), True, 'import networkx as nx\n'), ((43740, 43773), 'scipy.spatial.KDTree', 'KDTree', (['new_structure.cart_coords'], {}), '(new_structure.cart_coords)\n', (43746, 43773), False, 'from scipy.spatial import KDTree\n'), ((51317, 51365), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['self.graph', 'mapping'], {'copy': '(True)'}), '(self.graph, mapping, copy=True)\n', (51333, 51365), True, 'import networkx as nx\n'), ((56983, 57011), 'networkx.Graph', 'nx.Graph', (['supercell_sg.graph'], {}), '(supercell_sg.graph)\n', (56991, 57011), True, 'import networkx as nx\n'), ((60558, 60609), 'networkx.readwrite.json_graph.adjacency_graph', 'nx.readwrite.json_graph.adjacency_graph', (['graph_data'], {}), '(graph_data)\n', (60597, 60609), True, 'import networkx as nx\n'), ((62376, 62479), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {'edge_weight_name': 'edge_weight_name', 'edge_weight_units': 'edge_weight_units', 'name': 'name'}), '(edge_weight_name=edge_weight_name, edge_weight_units=\n edge_weight_units, name=name)\n', (62391, 62479), True, 'import networkx as nx\n'), ((62595, 62627), 'networkx.readwrite.json_graph.adjacency_data', 'json_graph.adjacency_data', (['graph'], {}), '(graph)\n', (62620, 62627), False, 'from networkx.readwrite import json_graph\n'), ((70932, 70981), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['self.graph', 'mapping'], {'copy': '(False)'}), '(self.graph, mapping, copy=False)\n', (70948, 70981), True, 'import networkx as nx\n'), ((71969, 72022), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['self.graph', 'species', '"""specie"""'], {}), "(self.graph, species, 'specie')\n", (71991, 72022), True, 'import networkx as nx\n'), ((72031, 72083), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['self.graph', 'coords', '"""coords"""'], {}), "(self.graph, coords, 'coords')\n", (72053, 72083), True, 'import networkx as nx\n'), ((72092, 72152), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['self.graph', 'properties', '"""properties"""'], {}), "(self.graph, properties, 'properties')\n", (72114, 72152), True, 'import networkx as nx\n'), ((75360, 75409), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['self.graph', 'mapping'], {'copy': '(False)'}), '(self.graph, mapping, copy=False)\n', (75376, 75409), True, 'import networkx as nx\n'), ((76149, 76183), 'networkx.is_weakly_connected', 'nx.is_weakly_connected', (['self.graph'], {}), '(self.graph)\n', (76171, 76183), True, 'import networkx as nx\n'), ((76246, 76265), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (76259, 76265), False, 'import copy\n'), ((79878, 79897), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (79891, 79897), False, 'import copy\n'), ((80017, 80055), 'networkx.is_weakly_connected', 'nx.is_weakly_connected', (['original.graph'], {}), '(original.graph)\n', (80039, 80055), True, 'import networkx as nx\n'), ((102849, 102880), 'networkx.drawing.nx_agraph.write_dot', 'write_dot', (['g', "(basename + '.dot')"], {}), "(g, basename + '.dot')\n", (102858, 102880), False, 'from networkx.drawing.nx_agraph import write_dot\n'), ((103980, 104013), 'pymatgen.core.Molecule.from_dict', 'Molecule.from_dict', (["d['molecule']"], {}), "(d['molecule'])\n", (103998, 104013), False, 'from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure\n'), ((106378, 106426), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['self.graph', 'mapping'], {'copy': '(True)'}), '(self.graph, mapping, copy=True)\n', (106394, 106426), True, 'import networkx as nx\n'), ((12553, 12591), 'numpy.array_equal', 'np.array_equal', (['from_jimage', '(0, 0, 0)'], {}), '(from_jimage, (0, 0, 0))\n', (12567, 12591), True, 'import numpy as np\n'), ((12651, 12682), 'numpy.subtract', 'np.subtract', (['from_jimage', 'shift'], {}), '(from_jimage, shift)\n', (12662, 12682), True, 'import numpy as np\n'), ((12707, 12736), 'numpy.subtract', 'np.subtract', (['to_jimage', 'shift'], {}), '(to_jimage, shift)\n', (12718, 12736), True, 'import numpy as np\n'), ((12993, 13095), 'warnings.warn', 'warnings.warn', (['"""Please specify to_jimage to be unambiguous, trying to automatically detect."""'], {}), "(\n 'Please specify to_jimage to be unambiguous, trying to automatically detect.'\n )\n", (13006, 13095), False, 'import warnings\n'), ((25401, 25424), 'copy.deepcopy', 'copy.deepcopy', (['func_grp'], {}), '(func_grp)\n', (25414, 25424), False, 'import copy\n'), ((28499, 28529), 'pymatgen.core.PeriodicSite.from_dict', 'PeriodicSite.from_dict', (['site_d'], {}), '(site_d)\n', (28521, 28529), False, 'from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure\n'), ((28612, 28642), 'numpy.subtract', 'np.subtract', (['to_jimage', 'jimage'], {}), '(to_jimage, jimage)\n', (28623, 28642), True, 'import numpy as np\n'), ((31781, 31792), 'monty.os.path.which', 'which', (['algo'], {}), '(algo)\n', (31786, 31792), False, 'from monty.os.path import which\n'), ((36232, 36303), 'subprocess.Popen', 'subprocess.Popen', (['args'], {'stdout': 'f', 'stdin': 'subprocess.PIPE', 'close_fds': '(True)'}), '(args, stdout=f, stdin=subprocess.PIPE, close_fds=True)\n', (36248, 36303), False, 'import subprocess\n'), ((39889, 39926), 'networkx.readwrite.json_graph.adjacency_data', 'json_graph.adjacency_data', (['self.graph'], {}), '(self.graph)\n', (39914, 39926), False, 'from networkx.readwrite import json_graph\n'), ((42172, 42223), 'numpy.dot', 'np.dot', (['scale_matrix', 'self.structure.lattice.matrix'], {}), '(scale_matrix, self.structure.lattice.matrix)\n', (42178, 42223), True, 'import numpy as np\n'), ((43226, 43252), 'networkx.union', 'nx.union', (['new_g', 'new_graph'], {}), '(new_g, new_graph)\n', (43234, 43252), True, 'import networkx as nx\n'), ((48799, 48831), 'networkx.readwrite.json_graph.adjacency_data', 'json_graph.adjacency_data', (['new_g'], {}), '(new_g)\n', (48824, 48831), False, 'from networkx.readwrite import json_graph\n'), ((58727, 58752), 'pymatgen.core.Molecule', 'Molecule', (['species', 'coords'], {}), '(species, coords)\n', (58735, 58752), False, 'from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure\n'), ((77167, 77198), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['subg', 'mapping'], {}), '(subg, mapping)\n', (77183, 77198), True, 'import networkx as nx\n'), ((77222, 77265), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['new_graph', '"""specie"""'], {}), "(new_graph, 'specie')\n", (77244, 77265), True, 'import networkx as nx\n'), ((77287, 77330), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['new_graph', '"""coords"""'], {}), "(new_graph, 'coords')\n", (77309, 77330), True, 'import networkx as nx\n'), ((77355, 77402), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['new_graph', '"""properties"""'], {}), "(new_graph, 'properties')\n", (77377, 77402), True, 'import networkx as nx\n'), ((78004, 78072), 'pymatgen.core.Molecule', 'Molecule', (['species', 'coords'], {'charge': 'charge', 'site_properties': 'properties'}), '(species, coords, charge=charge, site_properties=properties)\n', (78012, 78072), False, 'from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure\n'), ((78098, 78134), 'networkx.readwrite.json_graph.adjacency_data', 'json_graph.adjacency_data', (['new_graph'], {}), '(new_graph)\n', (78123, 78134), False, 'from networkx.readwrite import json_graph\n'), ((81357, 81386), 'itertools.combinations', 'combinations', (['graph.nodes', 'ii'], {}), '(graph.nodes, ii)\n', (81369, 81386), False, 'from itertools import combinations\n'), ((82444, 82471), 'copy.deepcopy', 'copy.deepcopy', (['unique_frags'], {}), '(unique_frags)\n', (82457, 82471), False, 'import copy\n'), ((83716, 83752), 'copy.deepcopy', 'copy.deepcopy', (['unique_mol_graph_list'], {}), '(unique_mol_graph_list)\n', (83729, 83752), False, 'import copy\n'), ((98463, 98474), 'monty.os.path.which', 'which', (['algo'], {}), '(algo)\n', (98468, 98474), False, 'from monty.os.path import which\n'), ((103001, 103072), 'subprocess.Popen', 'subprocess.Popen', (['args'], {'stdout': 'f', 'stdin': 'subprocess.PIPE', 'close_fds': '(True)'}), '(args, stdout=f, stdin=subprocess.PIPE, close_fds=True)\n', (103017, 103072), False, 'import subprocess\n'), ((103674, 103711), 'networkx.readwrite.json_graph.adjacency_data', 'json_graph.adjacency_data', (['self.graph'], {}), '(self.graph)\n', (103699, 103711), False, 'from networkx.readwrite import json_graph\n'), ((13904, 13975), 'numpy.subtract', 'np.subtract', (['nnsite.frac_coords', 'self.structure[from_index].frac_coords'], {}), '(nnsite.frac_coords, self.structure[from_index].frac_coords)\n', (13915, 13975), True, 'import numpy as np\n'), ((25483, 25524), 'copy.deepcopy', 'copy.deepcopy', (['FunctionalGroups[func_grp]'], {}), '(FunctionalGroups[func_grp])\n', (25496, 25524), False, 'import copy\n'), ((28266, 28292), 'numpy.multiply', 'np.multiply', (['(-1)', 'to_jimage'], {}), '(-1, to_jimage)\n', (28277, 28292), True, 'import numpy as np\n'), ((42642, 42778), 'pymatgen.core.PeriodicSite', 'PeriodicSite', (['site.species', '(site.coords + v)', 'new_lattice'], {'properties': 'site.properties', 'coords_are_cartesian': '(True)', 'to_unit_cell': '(False)'}), '(site.species, site.coords + v, new_lattice, properties=site.\n properties, coords_are_cartesian=True, to_unit_cell=False)\n', (42654, 42778), False, 'from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure\n'), ((42981, 43029), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['self.graph', 'mapping'], {'copy': '(True)'}), '(self.graph, mapping, copy=True)\n', (42997, 43029), True, 'import networkx as nx\n'), ((44499, 44549), 'numpy.add', 'np.add', (['self.structure[n_v].frac_coords', 'to_jimage'], {}), '(self.structure[n_v].frac_coords, to_jimage)\n', (44505, 44549), True, 'import numpy as np\n'), ((44963, 44996), 'numpy.subtract', 'np.subtract', (['v_image_cart', 'u_cart'], {}), '(v_image_cart, u_cart)\n', (44974, 44996), True, 'import numpy as np\n'), ((49782, 49798), 'operator.itemgetter', 'itemgetter', (['(0)', '(1)'], {}), '(0, 1)\n', (49792, 49798), False, 'from operator import itemgetter\n'), ((57103, 57146), 'networkx.connected_components', 'nx.connected_components', (['supercell_sg.graph'], {}), '(supercell_sg.graph)\n', (57126, 57146), True, 'import networkx as nx\n'), ((58215, 58290), 'networkx.is_isomorphic', 'nx.is_isomorphic', (['subgraph', 'g'], {'node_match': 'node_match', 'edge_match': 'edge_match'}), '(subgraph, g, node_match=node_match, edge_match=edge_match)\n', (58231, 58290), True, 'import networkx as nx\n'), ((76205, 76224), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (76218, 76224), False, 'import copy\n'), ((76478, 76524), 'networkx.weakly_connected_components', 'nx.weakly_connected_components', (['original.graph'], {}), '(original.graph)\n', (76508, 76524), True, 'import networkx as nx\n'), ((81598, 81629), 'networkx.subgraph', 'nx.subgraph', (['graph', 'combination'], {}), '(graph, combination)\n', (81609, 81629), True, 'import networkx as nx\n'), ((81649, 81674), 'networkx.is_connected', 'nx.is_connected', (['subgraph'], {}), '(subgraph)\n', (81664, 81674), True, 'import networkx as nx\n'), ((82783, 82818), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['fragment', 'mapping'], {}), '(fragment, mapping)\n', (82799, 82818), True, 'import networkx as nx\n'), ((82846, 82888), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['remapped', '"""specie"""'], {}), "(remapped, 'specie')\n", (82868, 82888), True, 'import networkx as nx\n'), ((82914, 82956), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['remapped', '"""coords"""'], {}), "(remapped, 'coords')\n", (82936, 82956), True, 'import networkx as nx\n'), ((87142, 87165), 'copy.deepcopy', 'copy.deepcopy', (['func_grp'], {}), '(func_grp)\n', (87155, 87165), False, 'import copy\n'), ((93607, 93633), 'networkx.simple_cycles', 'nx.simple_cycles', (['directed'], {}), '(directed)\n', (93623, 93633), True, 'import networkx as nx\n'), ((104860, 104876), 'operator.itemgetter', 'itemgetter', (['(0)', '(1)'], {}), '(0, 1)\n', (104870, 104876), False, 'from operator import itemgetter\n'), ((28333, 28358), 'numpy.add', 'np.add', (['to_jimage', 'jimage'], {}), '(to_jimage, jimage)\n', (28339, 28358), True, 'import numpy as np\n'), ((28438, 28470), 'numpy.add', 'np.add', (["site_d['abc']", 'to_jimage'], {}), "(site_d['abc'], to_jimage)\n", (28444, 28470), True, 'import numpy as np\n'), ((41960, 41969), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (41966, 41969), True, 'import numpy as np\n'), ((46990, 47025), 'numpy.around', 'np.around', (['v_expec_frac'], {'decimals': '(3)'}), '(v_expec_frac, decimals=3)\n', (46999, 47025), True, 'import numpy as np\n'), ((47132, 47172), 'numpy.subtract', 'np.subtract', (['v_expec_frac', 'v_expec_image'], {}), '(v_expec_frac, v_expec_image)\n', (47143, 47172), True, 'import numpy as np\n'), ((57531, 57556), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', (['subgraph'], {}), '(subgraph)\n', (57546, 57556), True, 'import networkx as nx\n'), ((66010, 66054), 'numpy.array_equal', 'np.array_equal', (["neighbor['image']", '[0, 0, 0]'], {}), "(neighbor['image'], [0, 0, 0])\n", (66024, 66054), True, 'import numpy as np\n'), ((87236, 87277), 'copy.deepcopy', 'copy.deepcopy', (['FunctionalGroups[func_grp]'], {}), '(FunctionalGroups[func_grp])\n', (87249, 87277), False, 'import copy\n'), ((92180, 92221), 'networkx.descendants', 'nx.descendants', (['disconnected', 'neighbor[2]'], {}), '(disconnected, neighbor[2])\n', (92194, 92221), True, 'import networkx as nx\n'), ((14004, 14023), 'numpy.round', 'np.round', (['to_jimage'], {}), '(to_jimage)\n', (14012, 14023), True, 'import numpy as np\n'), ((83310, 83379), 'pymatgen.core.Molecule', 'Molecule', ([], {'species': 'species', 'coords': 'coords', 'charge': 'self.molecule.charge'}), '(species=species, coords=coords, charge=self.molecule.charge)\n', (83318, 83379), False, 'from pymatgen.core import Lattice, Molecule, PeriodicSite, Structure\n'), ((51646, 51677), 'numpy.multiply', 'np.multiply', (['(-1)', "d['to_jimage']"], {}), "(-1, d['to_jimage'])\n", (51657, 51677), True, 'import numpy as np\n'), ((81831, 81854), 'copy.deepcopy', 'copy.deepcopy', (['subgraph'], {}), '(subgraph)\n', (81844, 81854), False, 'import copy\n'), ((81930, 81953), 'copy.deepcopy', 'copy.deepcopy', (['subgraph'], {}), '(subgraph)\n', (81943, 81953), False, 'import copy\n'), ((47824, 47855), 'numpy.multiply', 'np.multiply', (['(-1)', "d['to_jimage']"], {}), "(-1, d['to_jimage'])\n", (47835, 47855), True, 'import numpy as np\n')] |
from django.shortcuts import render, redirect
from django.http import HttpResponse
from .models import Article
from django.contrib.auth.decorators import login_required
from . import forms
def Articles(request):
articles = Article.objects.all().order_by('date')
return render(request, 'articles/article_list.html', {'articles': articles})
def article_detail(request, slug):
# return HttpResponse(slug)
article = Article.objects.get(slug=slug)
return render(request, 'articles/article_details.html', {'article': article})
@login_required(login_url="/accounts/login")
def article_create(request):
if request.method == 'POST':
form = forms.CreateArticle(request.POST, request.FILES)
if form.is_valid():
#save article to DB
instance = form.save(commit=False)
instance.author = request.user
instance.save( )
return redirect ('articles:list')
else:
form = forms.CreateArticle()
return render(request, 'articles/article_create.html', {'form':form})
| [
"django.shortcuts.render",
"django.shortcuts.redirect",
"django.contrib.auth.decorators.login_required"
] | [((548, 591), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""/accounts/login"""'}), "(login_url='/accounts/login')\n", (562, 591), False, 'from django.contrib.auth.decorators import login_required\n'), ((278, 347), 'django.shortcuts.render', 'render', (['request', '"""articles/article_list.html"""', "{'articles': articles}"], {}), "(request, 'articles/article_list.html', {'articles': articles})\n", (284, 347), False, 'from django.shortcuts import render, redirect\n'), ((474, 544), 'django.shortcuts.render', 'render', (['request', '"""articles/article_details.html"""', "{'article': article}"], {}), "(request, 'articles/article_details.html', {'article': article})\n", (480, 544), False, 'from django.shortcuts import render, redirect\n'), ((1001, 1064), 'django.shortcuts.render', 'render', (['request', '"""articles/article_create.html"""', "{'form': form}"], {}), "(request, 'articles/article_create.html', {'form': form})\n", (1007, 1064), False, 'from django.shortcuts import render, redirect\n'), ((916, 941), 'django.shortcuts.redirect', 'redirect', (['"""articles:list"""'], {}), "('articles:list')\n", (924, 941), False, 'from django.shortcuts import render, redirect\n')] |
# (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define the base of targets (i.e. systems to be tested)
TargetID = Column(Integer(11), primary_key=True)
IPAddress = Column(String(15), nullable=False)
CompanyID = Column(Integer(11), ForeignKey("Companies.CompanyID"))
Namespace = Column(String(30), nullable=False)
SMIVersion = Column(String(15), nullable=False)
Product = Column(String(30), nullable=False)
Principal = Column(String(30), nullable=False)
Credential = Column(String(30), nullable=False)
CimomVersion = Column(String(30), nullable=False)
InteropNamespace = Column(String(30), nullable=False)
Notify = Column(Enum('Enabled', 'Disabled'), default='Disabled')
NotifyUsers = Column(String(12), nullable=False)
ScanEnabled = Column(Enum('Enabled', 'Disabled'), default='Enabled')
Protocol = Column(String(10), default='http')
Port = Column(String(10), nullable=False)
"""
# TODO change ip_address to hostname where host name is name : port
from __future__ import print_function, absolute_import
import os
import csv
import re
from collections import OrderedDict
from textwrap import wrap
import six
from mysql.connector import Error as mysqlerror
from ._dbtablebase import DBTableBase
from ._mysqldbmixin import MySQLDBMixin
from ._common import get_url_str
from ._logging import AUDIT_LOGGER_NAME, get_logger
from ._companiestable import CompaniesTable
__all__ = ['TargetsTable']
class TargetsTable(DBTableBase):
"""
Class representing the targets db table.
This base contains information on the targets, host systems, etc. in the
environment.
The factory method should be used to construct a new TargetsTable object
since that creates the correct object for the defined database type.
"""
table_name = 'Targets'
key_field = 'TargetID'
# Fields that are required to create new records
required_fields = [
'IPAddress', 'CompanyID', 'Namespace',
'SMIVersion', 'Product', 'Principal', 'Credential',
'CimomVersion', 'InteropNamespace', 'Notify', 'NotifyUsers',
'ScanEnabled', 'Protocol', 'Port']
# All fields in each record.
fields = [key_field] + required_fields
join_fields = ['CompanyName']
all_fields = fields + join_fields
hints = {
'IPAddress': "Host name or ip address",
'CompanyID': "DB id of company",
'Namespace': "User namespace",
'SMIVersion': "SMI version",
'Product': "Product name",
'Principal': "User Name to access target",
'Credential': "User password to access target",
'CimomVersion': "Version of CIMOM",
'InteropNamespace': "Interop Namespace name",
'Notify': "'Enabled' if users to be notified of issues, else "
"'Disabled'",
'NotifyUsers': "List of UserIDs to notify",
'ScanEnabled': "Enabled if this target to be scanned",
'Protocol': '"http" or "https"',
'Port': "Integer defining WBEM server port."}
# # Defines each record for the data base and outputs.
# # The Name is the database name for the property
# # The value tuple is display name and max width for the record
table_format_dict = OrderedDict([
('TargetID', ('ID', 2, int)),
('CompanyName', ('CompanyName', 12, str)),
('Namespace', ('Namespace', 12, str)),
('SMIVersion', ('SMIVersion', 12, str)),
('Product', ('Product', 15, str)),
('Principal', ('Principal', 12, str)),
('Credential', ('Credential', 12, str)),
('CimomVersion', ('CimomVersion', 15, str)),
('IPAddress', ('IPAddress', 12, str)),
('InteropNamespace', ('Interop', 8, str)),
('Notify', ('Notify', 12, str)),
('NotifyUsers', ('NotifyUsers', 12, str)),
('Protocol', ('Prot', 5, str)),
('Port', ('Port', 4, int)),
('ScanEnabled', ('Enabled', 6, str)),
]) # noqa: E123
def __init__(self, db_dict, db_type, verbose, output_format):
"""Initialize the abstract Targets instance.
This controls all other
target bases. This defines the common definition of all targets bases
including field names, and common methods.
Parameters:
db_dict (:term: `dictionary')
Dictionary containing all of the parameters to open the database
defined by the db_dict attribute.
db_type (:term: `string`)
String defining one of the allowed database types for the
target database.
verbose (:class:`py:bool`)
Boolean. If true detailed info is displayed on the processing
of the TargetData class
output_format (:term:`string`)
String defining one of the legal report output formats. If not
provided, the default is a simple report format.
"""
super(TargetsTable, self).__init__(db_dict, db_type, verbose)
self.output_format = output_format
# def __str__(self):
# # # TODO this and __repr__ do not really match.
# # """String info on targetdata. TODO. Put more info here"""
# # return ('type=%s db=%s, len=%s' % (self.db_type, self.get_dbdict(),
# # # len(self.data_dict)))
# def __repr__(self):
# # """Rep of target data"""
# # return ('Targetdata db_type %s, rep count=%s' %
# # # (self.db_type, len(self.data_dict)))
def test_fieldnames(self, fields):
"""Test a list of field names. This test generates an exception,
KeyError if a field in fields is not in the table
"""
for field in fields:
self.table_format_dict[field] # pylint: disable=pointless-statement
def get_dbdict(self):
"""Get string for the db_dict"""
return '%s' % self.db_dict
@classmethod
def factory(cls, db_dict, db_type, verbose, output_format='simple'):
"""Factory method to select subclass based on database type (db_type).
Currently the types sql and csv are supported.
Returns instance object of the defined provider type.
"""
inst = None
if verbose:
print('targetdata factory datafile %s dbtype %s verbose %s'
% (db_dict, db_type, verbose))
if db_type == ('csv'):
inst = CsvTargetsTable(db_dict, db_type, verbose,
output_format=output_format)
elif db_type == ('mysql'):
inst = MySQLTargetsTable(db_dict, db_type, verbose,
output_format=output_format)
else:
ValueError('Invalid targets factory db_type %s' % db_type)
if verbose:
print('Resulting targets factory inst %r' % inst)
return inst
def get_field_list(self):
"""Return a list of the base table field names in the order defined."""
return list(self.table_format_dict)
def get_format_dict(self, name):
"""Return tuple of display name and length for name."""
return self.table_format_dict[name]
def get_enabled_targetids(self):
"""Get list of target ids that are marked enabled."""
return [x for x in self.data_dict if not self.disabled_target_id(x)]
def get_disabled_targetids(self):
"""Get list of target ids that are marked disabled"""
return [x for x in self.data_dict
if self.disabled_target_id(x)]
# TODO we have multiple of these. See get dict_for_host,get_hostid_list
def get_targets_host(self, host_data):
"""
If an record for `host_data` exists return that record,
otherwise return None.
There may be multiple ipaddress, port entries for a
single ipaddress, port in the database
Parameters:
host_id(tuple of hostname or ipaddress and port)
Returns list of targetdata keys
"""
# TODO clean up for PY 3
return_list = []
for key, value in self.data_dict.items():
port = value["Port"]
# TODO port from database is a string. Should be int internal.
if value["IPAddress"] == host_data[0] and int(port) == host_data[1]:
return_list.append(key)
return return_list
def get_target(self, targetid):
"""
Get the target data for the parameter target_id.
This is alternate to using [id] directly. It does an additonal check
for correct type for target_id
Returns:
target as dictionary
Exceptions:
KeyError if target not in targets dictionary
"""
if not isinstance(targetid, six.integer_types):
targetid = int(targetid)
return self.data_dict[targetid]
def filter_targets(self, ip_filter=None, company_name_filter=None):
"""
Filter for match of ip_filter and companyname filter if they exist
and return list of any targets that match.
The filters are regex strings.
"""
rtn = OrderedDict()
for key, value in self.data_dict.items():
if ip_filter and re.match(ip_filter, value['IPAddress']):
rtn[key] = value
if company_name_filter and \
re.match(value['CompanyName'], company_name_filter):
rtn[key] = value
return rtn
def build_url(self, targetid):
"""Get the string representing the url for targetid. Gets the
Protocol, IPaddress and port and uses the common get_url_str to
create a string. Port info is included only if it is not the
WBEM CIM-XML standard definitions.
"""
target = self[targetid]
return get_url_str(target['Protocol'], target['IPAddress'],
target['Port'])
def get_hostid_list(self, ip_filter=None, company_name_filter=None):
"""
Get all WBEM Server ipaddresses in the targets base.
Returns list of IP addresses:port entries.
TODO: Does not include port right now.
"""
output_list = []
# TODO clean up for python 3
for _id, value in self.data_dict.items():
if self.verbose:
print('get_hostid_list value %s' % (value,))
output_list.append(value['IPAddress'])
return output_list
def tbl_hdr(self, record_list):
"""Return a list of all the column headers from the record_list."""
hdr = []
for name in record_list:
value = self.get_format_dict(name)
hdr.append(value[0])
return hdr
def get_notifyusers(self, targetid):
"""
Get list of entries in the notify users field and split into python
list and return the list of integers representing the userids.
This list stored in db as string of integers separated by commas.
Returns None if there is no data in NotifyUsers.
"""
notify_users = self[targetid]['NotifyUsers']
if notify_users:
notify_users_list = notify_users.split(',')
notify_users_list = [int(userid) for userid in notify_users_list]
return notify_users_list
return None
def format_record(self, record_id, fields, fold=False):
"""Return the fields defined in field_list for the record_id in
display format.
String fields will be folded if their width is greater than the
specification in the format_dictionary and fold=True
"""
# TODO can we make this a std cvt function.
target = self.get_target(record_id)
line = []
for field_name in fields:
field_value = target[field_name]
fmt_value = self.get_format_dict(field_name)
max_width = fmt_value[1]
field_type = fmt_value[2]
if isinstance(field_type, six.string_types) and field_value:
if max_width < len(field_value):
line.append('\n'.join(wrap(field_value, max_width)))
else:
line.append('%s' % field_value)
else:
line.append('%s' % field_value)
return line
def disabled_target(self, target_record): # pylint: disable=no-self-use
"""
If target_record disabled, return true, else return false.
"""
val = target_record['ScanEnabled'].lower()
if val == 'enabled':
return False
if val == 'disabled':
return True
ValueError('ScanEnabled field must contain "Enabled" or "Disabled'
' string. %s is invalid.' % val)
def disabled_target_id(self, targetid):
"""
Return True if target recorded for this target_id marked
disabled. Otherwise return True
Parameters:
target_id(:term:`integer`)
Valid target Id for the Target_Tableue .
Returns: (:class:`py:bool`)
True if this target id disabled
Exceptions:
KeyError if target_id not in database
"""
return(self.disabled_target(self.data_dict[targetid]))
def get_output_width(self, col_list):
"""
Get the width of a table from the column names in the list
"""
total_width = 0
for name in col_list:
value = self.get_format_dict(name)
total_width += value[1]
return total_width
def get_unique_creds(self):
"""
Get the set of Credentials and Principal that represents the
unique combination of both. The result could be used to test with
all Principals/Credentials knows in the db.
Return list of targetIDs that represent unique sets of Principal and
Credential
"""
creds = {k: '%s%s' % (v['Principal'], v['Credential'])
for k, v in self.data_dict.items()}
ucreds = dict([[v, k] for k, v in creds.items()])
unique_keys = dict([[v, k] for k, v in ucreds.items()])
unique_creds = [(self.data_dict[k]['Principal'],
self.data_dict[k]['Credential']) for k in unique_keys]
return unique_creds
class SQLTargetsTable(TargetsTable):
"""
Subclass of Targets data for all SQL databases. Subclasses of this class
support specialized sql databases.
"""
def __init__(self, db_dict, dbtype, verbose, output_format):
"""Pass through to SQL"""
if verbose:
print('SQL Database type %s verbose=%s' % (db_dict, verbose))
super(SQLTargetsTable, self).__init__(db_dict, dbtype, verbose,
output_format)
self.connection = None
class MySQLTargetsTable(SQLTargetsTable, MySQLDBMixin):
"""
This subclass of TargetsTable process targets infromation from an sql
database.
Generate the targetstable from the sql database targets table and
the companies table, by mapping the data to the dictionary defined
for targets
"""
# TODO filename is config file name, not actual file name.
def __init__(self, db_dict, dbtype, verbose, output_format):
"""Read the input file into a dictionary."""
super(MySQLTargetsTable, self).__init__(db_dict, dbtype, verbose,
output_format)
self.connectdb(db_dict, verbose)
self._load_table()
self._load_joins()
def _load_joins(self):
"""
Load the tables that would normally be joins. In this case it is the
companies table. Move the companyName into the targets table
TODO we should not be doing this in this manner but with a
join.
"""
# Get companies table and insert into targets table:
# TODO in smipyping name is db_dict. Elsewhere it is db_info
companies_tbl = CompaniesTable.factory(self.db_dict,
self.db_type,
self.verbose)
try:
# set the companyname into the targets table
for target_key in self.data_dict:
target = self.data_dict[target_key]
if target['CompanyID'] in companies_tbl:
company = companies_tbl[target['CompanyID']]
target['CompanyName'] = company['CompanyName']
else:
target['CompanyName'] = "TableError CompanyID %s" % \
target['CompanyID']
except Exception as ex:
raise ValueError('Error: putting Company Name in table %r error %s'
% (self.db_dict, ex))
def update_fields(self, targetid, changes):
"""
Update the database record defined by targetid with the dictionary
of items defined by changes where each item is an entry in the
target record. Update does NOT test if the new value is the same
as the original value.
"""
cursor = self.connection.cursor()
# dynamically build the update sql based on the changes dictionary
set_names = "SET "
values = []
comma = False
for key, value in changes.items():
if comma:
set_names = set_names + ", "
else:
comma = True
set_names = set_names + "{0} = %s".format(key)
values.append(value)
values.append(targetid)
sql = "Update Targets " + set_names
# append targetid component
sql = sql + " WHERE TargetID=%s"
# Record the original data for the audit log.
original_data = {}
target_record = self.get_target(targetid)
for change in changes:
original_data[change] = target_record[change]
try:
cursor.execute(sql, tuple(values))
self.connection.commit()
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.info('TargetsTable TargetID: %s, update fields: %s, '
'original fields: %s',
targetid, changes, original_data)
except Exception as ex:
self.connection.rollback()
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.error('TargetsTable TargetID: %s failed SQL update. '
'SQL: %s Changes: %s Exception: %s',
targetid, sql, changes, ex)
raise ex
finally:
self._load_table()
self._load_joins()
cursor.close()
def activate(self, targetid, activate_flag):
"""
Activate or deactivate the table entry defined by the
targetid parameter to the value defined by the activate_flag
Parameters:
targetid (:term:`py:integer`):
The database key property for this table
activate_flag (:class:`py:bool`):
Next state that will be set into the database for this target.
Since the db field is an enum it actually sete Active or Inactive
strings into the field
"""
cursor = self.connection.cursor()
enabled_kw = 'Enabled' if activate_flag else 'Disabled'
sql = 'UPDATE Targets SET ScanEnabled = %s WHERE TargetID = %s'
try:
cursor.execute(sql, (enabled_kw, targetid)) # noqa F841
self.connection.commit()
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.info('TargetTable TargetId %s,set scanEnabled to %s',
targetid, enabled_kw)
except mysqlerror as ex:
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.error('TargetTable userid %s failed SQL change '
'ScanEnabled. SQL=%s '
'Change to %s exception %s: %s',
targetid, sql, enabled_kw, ex.__class__.__name__,
ex)
self.connection.rollback()
raise ex
finally:
self._load_table()
self._load_joins()
def delete(self, targetid):
"""
Delete the target in the targets table defined by the targetid
"""
cursor = self.connection.cursor()
sql = "DELETE FROM Targets WHERE TargetID=%s"
try:
# pylint: disable=unused-variable
mydata = cursor.execute(sql, (targetid,)) # noqa F841
self.connection.commit()
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.info('TargetTable TargetId %s Deleted', targetid)
except mysqlerror as ex:
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.error('TargetTable targetid %s failed SQL DELETE. '
'SQL=%s exception %s: %s',
targetid, sql, ex.__class__.__name__, ex)
self.connection.rollback()
raise ex
finally:
self._load_table()
self._load_joins()
self.connection.close()
def insert(self, fields):
"""
Write a new record to the database containing the fields defined in
the input.
Parameters:
field_data ()
Dictionary of fields to be inserted into the table. There is
one entry in the dictionary for each field to be inserted.
Exceptions:
"""
cursor = self.connection.cursor()
placeholders = ', '.join(['%s'] * len(fields))
columns = ', '.join(fields.keys())
sql = "INSERT INTO %s ( %s ) VALUES ( %s )" % (self.table_name,
columns,
placeholders)
try:
cursor.execute(sql, fields.values())
self.connection.commit()
new_targetid = cursor.lastrowid
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.info('TargetsTable TargetId %s added. %s',
new_targetid, fields)
except mysqlerror as ex:
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.error('TargetTable INSERT failed SQL update. SQL=%s. '
'data=%s. Exception %s: %s', sql, fields,
ex.__class__.__name__, ex)
self.connection.rollback()
raise ex
finally:
self._load_table()
self._load_joins()
self.connection.close()
class CsvTargetsTable(TargetsTable):
"""Comma Separated Values form of the Target base."""
def __init__(self, db_dict, dbtype, verbose, output_format):
"""Read the input file into a dictionary."""
super(CsvTargetsTable, self).__init__(db_dict, dbtype, verbose,
output_format)
fn = db_dict['targetsfilename']
self.filename = fn
# If the filename is not a full directory, the data file must be
# either in the local directory or the same directory as the
# config file defined by the db_dict entry directory
if os.path.isabs(fn):
if not os.path.isfile(fn):
ValueError('CSV file %s does not exist ' % fn)
else:
self.filename = fn
else:
if os.path.isfile(fn):
self.filename = fn
else:
full_fn = os.path.join(db_dict['directory'], fn)
if not os.path.isfile(full_fn):
ValueError('CSV file %s does not exist '
'in local directory or config directory %s' %
(fn, db_dict['directory']))
else:
self.filename = full_fn
with open(self.filename) as input_file:
reader = csv.DictReader(input_file)
# create dictionary (id = key) with dictionary for
# each set of entries
result = {}
for row in reader:
key = int(row['TargetID'])
if key in result:
# duplicate row handling
print('ERROR. Duplicate Id in table: %s\nrow=%s' %
(key, row))
raise ValueError('Input Error. duplicate Id')
else:
result[key] = row
self.data_dict = result
def write_updated_record(self, record_id):
"""Backup the existing file and write the new one.
with cvs it writes the whole file back
"""
backfile = '%s.bak' % self.filename
# TODO does this cover directories/clean up for possible exceptions.
if os.path.isfile(backfile):
os.remove(backfile)
os.rename(self.filename, backfile)
self.write_file(self.filename)
def write_file(self, file_name):
"""Write the current Target base to the named file."""
with open(file_name, 'wb') as f:
writer = csv.DictWriter(f, fieldnames=self.get_field_list())
writer.writeheader()
for key, value in sorted(self.data_dict.items()):
writer.writerow(value)
| [
"collections.OrderedDict",
"csv.DictReader",
"os.path.isabs",
"os.rename",
"re.match",
"os.path.join",
"os.path.isfile",
"textwrap.wrap",
"os.remove"
] | [((3808, 4429), 'collections.OrderedDict', 'OrderedDict', (["[('TargetID', ('ID', 2, int)), ('CompanyName', ('CompanyName', 12, str)), (\n 'Namespace', ('Namespace', 12, str)), ('SMIVersion', ('SMIVersion', 12,\n str)), ('Product', ('Product', 15, str)), ('Principal', ('Principal', \n 12, str)), ('Credential', ('Credential', 12, str)), ('CimomVersion', (\n 'CimomVersion', 15, str)), ('IPAddress', ('IPAddress', 12, str)), (\n 'InteropNamespace', ('Interop', 8, str)), ('Notify', ('Notify', 12, str\n )), ('NotifyUsers', ('NotifyUsers', 12, str)), ('Protocol', ('Prot', 5,\n str)), ('Port', ('Port', 4, int)), ('ScanEnabled', ('Enabled', 6, str))]"], {}), "([('TargetID', ('ID', 2, int)), ('CompanyName', ('CompanyName', \n 12, str)), ('Namespace', ('Namespace', 12, str)), ('SMIVersion', (\n 'SMIVersion', 12, str)), ('Product', ('Product', 15, str)), (\n 'Principal', ('Principal', 12, str)), ('Credential', ('Credential', 12,\n str)), ('CimomVersion', ('CimomVersion', 15, str)), ('IPAddress', (\n 'IPAddress', 12, str)), ('InteropNamespace', ('Interop', 8, str)), (\n 'Notify', ('Notify', 12, str)), ('NotifyUsers', ('NotifyUsers', 12, str\n )), ('Protocol', ('Prot', 5, str)), ('Port', ('Port', 4, int)), (\n 'ScanEnabled', ('Enabled', 6, str))])\n", (3819, 4429), False, 'from collections import OrderedDict\n'), ((9728, 9741), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9739, 9741), False, 'from collections import OrderedDict\n'), ((24106, 24123), 'os.path.isabs', 'os.path.isabs', (['fn'], {}), '(fn)\n', (24119, 24123), False, 'import os\n'), ((25695, 25719), 'os.path.isfile', 'os.path.isfile', (['backfile'], {}), '(backfile)\n', (25709, 25719), False, 'import os\n'), ((25761, 25795), 'os.rename', 'os.rename', (['self.filename', 'backfile'], {}), '(self.filename, backfile)\n', (25770, 25795), False, 'import os\n'), ((24309, 24327), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (24323, 24327), False, 'import os\n'), ((24828, 24854), 'csv.DictReader', 'csv.DictReader', (['input_file'], {}), '(input_file)\n', (24842, 24854), False, 'import csv\n'), ((25733, 25752), 'os.remove', 'os.remove', (['backfile'], {}), '(backfile)\n', (25742, 25752), False, 'import os\n'), ((9821, 9860), 're.match', 're.match', (['ip_filter', "value['IPAddress']"], {}), "(ip_filter, value['IPAddress'])\n", (9829, 9860), False, 'import re\n'), ((9956, 10007), 're.match', 're.match', (["value['CompanyName']", 'company_name_filter'], {}), "(value['CompanyName'], company_name_filter)\n", (9964, 10007), False, 'import re\n'), ((24144, 24162), 'os.path.isfile', 'os.path.isfile', (['fn'], {}), '(fn)\n', (24158, 24162), False, 'import os\n'), ((24408, 24446), 'os.path.join', 'os.path.join', (["db_dict['directory']", 'fn'], {}), "(db_dict['directory'], fn)\n", (24420, 24446), False, 'import os\n'), ((24470, 24493), 'os.path.isfile', 'os.path.isfile', (['full_fn'], {}), '(full_fn)\n', (24484, 24493), False, 'import os\n'), ((12722, 12750), 'textwrap.wrap', 'wrap', (['field_value', 'max_width'], {}), '(field_value, max_width)\n', (12726, 12750), False, 'from textwrap import wrap\n')] |
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
import os
from az_code_gen.base import *
from AzReflectionCpp import format_cpp_annotations
class AZEBusInline_Driver(TemplateDriver):
def apply_transformations(self, json_object):
format_cpp_annotations(json_object)
def render_templates(self, input_file, **template_kwargs):
input_file_name, input_file_ext = os.path.splitext(input_file)
self.render_template_to_file(
"AzEBusInline.tpl", template_kwargs, '{}.generated.inline'.format(input_file_name))
# Factory function - called from launcher
def create_drivers(env):
return [AZEBusInline_Driver(env)]
| [
"AzReflectionCpp.format_cpp_annotations",
"os.path.splitext"
] | [((719, 754), 'AzReflectionCpp.format_cpp_annotations', 'format_cpp_annotations', (['json_object'], {}), '(json_object)\n', (741, 754), False, 'from AzReflectionCpp import format_cpp_annotations\n'), ((861, 889), 'os.path.splitext', 'os.path.splitext', (['input_file'], {}), '(input_file)\n', (877, 889), False, 'import os\n')] |
import os
from QUANTAXIS.QASetting import QALocalize
#from QUANTAXIS_CRAWLY.run_selenium_alone import (read_east_money_page_zjlx_to_sqllite, open_chrome_driver, close_chrome_dirver)
from QUANTAXIS_CRAWLY.run_selenium_alone import *
import urllib
import pandas as pd
import time
from QUANTAXIS.QAUtil import (DATABASE)
def QA_request_eastmoney_zjlx( param_stock_code_list ):
# 改用
strUrl = "http://data.eastmoney.com/zjlx/{}.html".format(param_stock_code_list[0])
# 延时
time.sleep(1.223)
response = urllib.request.urlopen(strUrl)
content = response.read()
# 🛠todo 改用 re 正则表达式做匹配
strings = content.decode("utf-8", "ignore")
string_lines = strings.split("\r\n")
#for aline in string_lines:
# aline = aline.strip()
# if '_stockCode' in aline:
# _stockCode = aline[len('var _stockCode = '):]
# _stockCode = _stockCode.strip("\"\"\,")
# if '_stockMarke' in aline:
# _stockMarke = aline[len('_stockMarke = '):]
# _stockMarke = _stockMarke.strip("\"\"\,")
# # 60XXXX ,
#_stockMarke = 1
# 00XXXX ,
# _stockMarke = 2
# 30XXXX ,
# _stockMarke = 2
# if '_stockName' in aline:
# _stockName = aline[len('_stockName = '):]
# _stockName = _stockName.strip("\"\"\,")
# if '_market' in aline:
# _market = aline[len('_market = '):]
# _market = _market.strip("\"\"\,")
# break
#_market= 'hsa'
# print(_stockCode)
# print(_stockMarke)
# print(_stockName)
# print(_market)
values = []
for aline in string_lines:
aline = aline.strip()
if 'EM_CapitalFlowInterface' in aline:
# print(aline)
# print('------------------')
aline = aline.strip()
if aline.startswith('var strUrl = '):
if 'var strUrl = ' in aline:
aline = aline[len('var strUrl = '):]
values = aline.split('+')
# print(values)
break
# print('------------------')
print(values)
for iStockCode in range(len(param_stock_code_list)):
requestStr = ""
strCode = param_stock_code_list[iStockCode]
if strCode[0:2] == '60':
_stockMarke = '1'
elif strCode[0:2] == '00' or strCode[0:2] == '30':
_stockMarke = '2'
else:
print(strCode + " 暂不支持, 60, 00, 30 开头的股票代码")
return
for iItem in values:
if '_stockCode' in iItem:
requestStr = requestStr + param_stock_code_list[iStockCode]
elif '_stockMarke' in iItem:
requestStr = requestStr + _stockMarke
else:
if 'http://ff.eastmoney.com/' in iItem:
requestStr = 'http://ff.eastmoney.com/'
else:
iItem = iItem.strip(' "')
iItem = iItem.rstrip(' "')
requestStr = requestStr + iItem
# print(requestStr)
# 延时
time.sleep(1.456)
response = urllib.request.urlopen(requestStr)
content2 = response.read()
# print(content2)
strings = content2.decode("utf-8", "ignore")
# print(strings)
list_data_zjlx = []
if 'var aff_data=({data:[["' in strings:
leftChars = strings[len('var aff_data=({data:[["'):]
# print(leftChars)
dataArrays = leftChars.split(',')
# print(dataArrays)
for aItemIndex in range(0, len(dataArrays), 13):
'''
日期
收盘价
涨跌幅
主力净流入 净额 净占比
超大单净流入 净额 净占比
大单净流入 净额 净占比
中单净流入 净额 净占比
小单净流入 净额 净占比
'''
dict_row = {}
dict_row['stock_code'] = param_stock_code_list[iStockCode]
# 日期
# print(aItemIndex)
data01 = dataArrays[aItemIndex]
data01 = data01.strip('"')
# print('日期',data01)
dict_row['date'] = data01
# 主力净流入 净额
data02 = dataArrays[aItemIndex + 1]
data02 = data02.strip('"')
# print('主力净流入 净额',data02)
dict_row['zljll_je_wy'] = data02
# 主力净流入 净占比
data03 = dataArrays[aItemIndex + 2]
data03 = data03.strip('"')
# print('主力净流入 净占比',data03)
# date01 = aItemData.strip('[\'\'')
dict_row['zljll_jzb_bfb'] = data03
# 超大单净流入 净额
data04 = dataArrays[aItemIndex + 3]
data04 = data04.strip('"')
# print('超大单净流入 净额',data04)
dict_row['cddjll_je_wy'] = data04
# 超大单净流入 净占比
data05 = dataArrays[aItemIndex + 4]
data05 = data05.strip('"')
# print('超大单净流入 净占比',data05)
dict_row['cddjll_je_jzb'] = data05
# 大单净流入 净额
data06 = dataArrays[aItemIndex + 5]
data06 = data06.strip('"')
# print('大单净流入 净额',data06)
dict_row['ddjll_je_wy'] = data06
# 大单净流入 净占比
data07 = dataArrays[aItemIndex + 6]
data07 = data07.strip('"')
# print('大单净流入 净占比',data07)
dict_row['ddjll_je_jzb'] = data07
# 中单净流入 净额
data08 = dataArrays[aItemIndex + 7]
data08 = data08.strip('"')
# print('中单净流入 净额',data08)
dict_row['zdjll_je_wy'] = data08
# 中单净流入 净占比
data09 = dataArrays[aItemIndex + 8]
data09 = data09.strip('"')
# print('中单净流入 净占比',data09)
dict_row['zdjll_je_jzb'] = data09
# 小单净流入 净额
data10 = dataArrays[aItemIndex + 9]
data10 = data10.strip('"')
# print('小单净流入 净额',data10)
dict_row['xdjll_je_wy'] = data10
# 小单净流入 净占比
data11 = dataArrays[aItemIndex + 10]
data11 = data11.strip('"')
# print('小单净流入 净占比',data11)
dict_row['xdjll_je_jzb'] = data11
# 收盘价
data12 = dataArrays[aItemIndex + 11]
data12 = data12.strip('"')
# print('收盘价',data12)
dict_row['close_price'] = data12
# 涨跌幅
data13 = dataArrays[aItemIndex + 12]
data13 = data13.strip('"')
data13 = data13.strip('"]]})')
# print('涨跌幅',data13)
dict_row['change_price'] = data13
# 读取一条记录成功
# print("成功读取一条记录")
# print(dict_row)
list_data_zjlx.append(dict_row)
# print(list_data_zjlx)
df = pd.DataFrame(list_data_zjlx)
# print(df)
client = DATABASE
coll_stock_zjlx = client.eastmoney_stock_zjlx
# coll_stock_zjlx.insert_many(QA_util_to_json_from_pandas(df))
for i in range(len(list_data_zjlx)):
aRec = list_data_zjlx[i]
# 🛠todo 当天结束后,获取当天的资金流相,当天的资金流向是瞬时间点的
ret = coll_stock_zjlx.find_one(aRec)
if ret == None:
coll_stock_zjlx.insert_one(aRec)
print("🤑 插入新的记录 ", aRec)
else:
print("😵 记录已经存在 ", ret)
'''
作为测试用例来获取, 对比 reqeust 方式的获取数据是否一致
'''
def QA_read_eastmoney_zjlx_web_page_to_sqllite(stockCodeList = None):
# todo 🛠 check stockCode 是否存在有效合法
# todo 🛠 QALocalize 从QALocalize 目录中读取 固定位置存放驱动文件
print("📨当前工作路径文件位置 : ",os.getcwd())
path_check = os.getcwd()+"/QUANTAXIS_WEBDRIVER"
if os.path.exists(path_check) == False:
print("😵 确认当前路径是否包含selenium_driver目录 😰 ")
return
else:
print(os.getcwd()+"/QUANTAXIS_WEBDRIVER"," 目录存在 😁")
print("")
# path_for_save_data = QALocalize.download_path + "/eastmoney_stock_zjlx"
# isExists = os.path.exists(path_for_save_data)
# if isExists == False:
# os.mkdir(path_for_save_data)
# isExists = os.path.exists(path_for_save_data)
# if isExists == True:
# print(path_for_save_data,"目录不存在! 成功建立目录 😢")
# else:
# print(path_for_save_data,"目录不存在! 失败建立目录 🤮, 可能没有权限 🈲")
# return
# else:
# print(path_for_save_data,"目录存在!准备读取数据 😋")
browser = open_chrome_driver()
for indexCode in range(len(stockCodeList)):
#full_path_name = path_for_save_data + "/" + stockCodeList[indexCode] + "_zjlx.sqlite.db"
read_east_money_page_zjlx_to_sqllite(stockCodeList[indexCode], browser)
pass
close_chrome_dirver(browser)
#创建目录
#启动线程读取网页,写入数据库
#等待完成 | [
"os.path.exists",
"time.sleep",
"os.getcwd",
"pandas.DataFrame",
"urllib.request.urlopen"
] | [((488, 505), 'time.sleep', 'time.sleep', (['(1.223)'], {}), '(1.223)\n', (498, 505), False, 'import time\n'), ((522, 552), 'urllib.request.urlopen', 'urllib.request.urlopen', (['strUrl'], {}), '(strUrl)\n', (544, 552), False, 'import urllib\n'), ((3133, 3150), 'time.sleep', 'time.sleep', (['(1.456)'], {}), '(1.456)\n', (3143, 3150), False, 'import time\n'), ((3171, 3205), 'urllib.request.urlopen', 'urllib.request.urlopen', (['requestStr'], {}), '(requestStr)\n', (3193, 3205), False, 'import urllib\n'), ((7139, 7167), 'pandas.DataFrame', 'pd.DataFrame', (['list_data_zjlx'], {}), '(list_data_zjlx)\n', (7151, 7167), True, 'import pandas as pd\n'), ((7951, 7962), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7960, 7962), False, 'import os\n'), ((7969, 7980), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7978, 7980), False, 'import os\n'), ((8011, 8037), 'os.path.exists', 'os.path.exists', (['path_check'], {}), '(path_check)\n', (8025, 8037), False, 'import os\n'), ((8137, 8148), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8146, 8148), False, 'import os\n')] |
#!/usr/bin/env python
import logging
import sys
from app import app as application
def setup_flask_logging():
# Log to stdout
handler = logging.StreamHandler(sys.stdout)
# Log to a file
#handler = logging.FileHandler('./application.log')
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter(
'%(asctime)s [%(funcName)s] %(levelname)s: %(message)s '
))
application.logger.addHandler(handler)
# Set default log level for the general logger
# each handler can then restrict the messages logged
application.logger.setLevel(logging.INFO)
setup_flask_logging()
if __name__ == '__main__':
application.run()
| [
"app.app.run",
"logging.StreamHandler",
"logging.Formatter",
"app.app.logger.setLevel",
"app.app.logger.addHandler"
] | [((552, 593), 'app.app.logger.setLevel', 'application.logger.setLevel', (['logging.INFO'], {}), '(logging.INFO)\n', (579, 593), True, 'from app import app as application\n'), ((146, 179), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (167, 179), False, 'import logging\n'), ((411, 449), 'app.app.logger.addHandler', 'application.logger.addHandler', (['handler'], {}), '(handler)\n', (440, 449), True, 'from app import app as application\n'), ((649, 666), 'app.app.run', 'application.run', ([], {}), '()\n', (664, 666), True, 'from app import app as application\n'), ((316, 391), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s [%(funcName)s] %(levelname)s: %(message)s """'], {}), "('%(asctime)s [%(funcName)s] %(levelname)s: %(message)s ')\n", (333, 391), False, 'import logging\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-25 15:10
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rates', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='rate',
old_name='euro_rate',
new_name='eur_rate',
),
migrations.RenameField(
model_name='rate',
old_name='pound_rates',
new_name='gbp_rate',
),
]
| [
"django.db.migrations.RenameField"
] | [((279, 368), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""rate"""', 'old_name': '"""euro_rate"""', 'new_name': '"""eur_rate"""'}), "(model_name='rate', old_name='euro_rate', new_name=\n 'eur_rate')\n", (301, 368), False, 'from django.db import migrations\n'), ((420, 511), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""rate"""', 'old_name': '"""pound_rates"""', 'new_name': '"""gbp_rate"""'}), "(model_name='rate', old_name='pound_rates', new_name=\n 'gbp_rate')\n", (442, 511), False, 'from django.db import migrations\n')] |
# coding: utf-8
"""
Quetzal API
Quetzal: an API to manage data files and their associated metadata.
OpenAPI spec version: 0.5.0
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
from setuptools import setup, find_packages # noqa: H301
NAME = "quetzal-openapi-client"
VERSION = "0.5.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="Quetzal API auto-generated client",
author='<NAME>',
author_email="<EMAIL>",
url="https://github.com/quet.zal/quetzal-openapi-client",
project_urls={
"Documentation": "https://quetzal-openapi-client.readthedocs.io",
"Code": "https://github.com/quetz-al/quetzal-openapi-client",
"Issue tracker": "https://github.com/quetz-al/quetzal-openapi-client/issues",
},
license="BSD-3-Clause",
keywords=["OpenAPI", "OpenAPI-Generator", "Quetzal API"],
install_requires=REQUIRES,
packages=find_packages(exclude=['test', 'docs']),
namespace_packages=['quetzal'],
include_package_data=True,
long_description="""\
quetzal-openapi-client
======================
This is an auto-generated package using
[openapi-generator](https://github.com/OpenAPITools/openapi-generator)
from an OpenAPI specification of the Quetzal API.
An improvement layer on this client exists in the quetzal-client package.
Quetzal is an API to manage data files and their associated metadata.
See more at [quetz.al](https://quetz.al) and its
[readthedocs documentation](https://quetzal-api.readthedocs.io).
""",
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Database :: Front-Ends',
'Topic :: Internet :: WWW/HTTP',
'Topic :: System :: Archiving',
],
)
| [
"setuptools.find_packages"
] | [((1145, 1184), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['test', 'docs']"}), "(exclude=['test', 'docs'])\n", (1158, 1184), False, 'from setuptools import setup, find_packages\n')] |
# coding: utf-8
from __future__ import unicode_literals
import re
from .adobepass import AdobePassIE
from ..compat import compat_str
from ..utils import (
fix_xml_ampersands,
xpath_text,
int_or_none,
determine_ext,
float_or_none,
parse_duration,
xpath_attr,
update_url_query,
ExtractorError,
strip_or_none,
url_or_none,
)
class TurnerBaseIE(AdobePassIE):
_AKAMAI_SPE_TOKEN_CACHE = {}
def _extract_timestamp(self, video_data):
return int_or_none(xpath_attr(video_data, 'dateCreated', 'uts'))
def _add_akamai_spe_token(self, tokenizer_src, video_url, content_id, ap_data, custom_tokenizer_query=None):
secure_path = self._search_regex(r'https?://[^/]+(.+/)', video_url, 'secure path') + '*'
token = self._AKAMAI_SPE_TOKEN_CACHE.get(secure_path)
if not token:
query = {
'path': secure_path,
}
if custom_tokenizer_query:
query.update(custom_tokenizer_query)
else:
query['videoId'] = content_id
if ap_data.get('auth_required'):
query['accessToken'] = self._extract_mvpd_auth(ap_data['url'], content_id, ap_data['site_name'], ap_data['site_name'])
auth = self._download_xml(
tokenizer_src, content_id, query=query)
error_msg = xpath_text(auth, 'error/msg')
if error_msg:
raise ExtractorError(error_msg, expected=True)
token = xpath_text(auth, 'token')
if not token:
return video_url
self._AKAMAI_SPE_TOKEN_CACHE[secure_path] = token
return video_url + '?hdnea=' + token
def _extract_cvp_info(self, data_src, video_id, path_data={}, ap_data={}, fatal=False):
video_data = self._download_xml(
data_src, video_id,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=fatal)
if not video_data:
return {}
video_id = video_data.attrib['id']
title = xpath_text(video_data, 'headline', fatal=True)
content_id = xpath_text(video_data, 'contentId') or video_id
# rtmp_src = xpath_text(video_data, 'akamai/src')
# if rtmp_src:
# split_rtmp_src = rtmp_src.split(',')
# if len(split_rtmp_src) == 2:
# rtmp_src = split_rtmp_src[1]
# aifp = xpath_text(video_data, 'akamai/aifp', default='')
urls = []
formats = []
thumbnails = []
subtitles = {}
rex = re.compile(
r'(?P<width>[0-9]+)x(?P<height>[0-9]+)(?:_(?P<bitrate>[0-9]+))?')
# Possible formats locations: files/file, files/groupFiles/files
# and maybe others
for video_file in video_data.findall('.//file'):
video_url = url_or_none(video_file.text.strip())
if not video_url:
continue
ext = determine_ext(video_url)
if video_url.startswith('/mp4:protected/'):
continue
# TODO Correct extraction for these files
# protected_path_data = path_data.get('protected')
# if not protected_path_data or not rtmp_src:
# continue
# protected_path = self._search_regex(
# r'/mp4:(.+)\.[a-z0-9]', video_url, 'secure path')
# auth = self._download_webpage(
# protected_path_data['tokenizer_src'], query={
# 'path': protected_path,
# 'videoId': content_id,
# 'aifp': aifp,
# })
# token = xpath_text(auth, 'token')
# if not token:
# continue
# video_url = rtmp_src + video_url + '?' + token
elif video_url.startswith('/secure/'):
secure_path_data = path_data.get('secure')
if not secure_path_data:
continue
video_url = self._add_akamai_spe_token(
secure_path_data['tokenizer_src'],
secure_path_data['media_src'] + video_url,
content_id, ap_data)
elif not re.match('https?://', video_url):
base_path_data = path_data.get(ext, path_data.get('default', {}))
media_src = base_path_data.get('media_src')
if not media_src:
continue
video_url = media_src + video_url
if video_url in urls:
continue
urls.append(video_url)
format_id = video_file.get('bitrate')
if ext in ('scc', 'srt', 'vtt'):
subtitles.setdefault('en', []).append({
'ext': ext,
'url': video_url,
})
elif ext == 'png':
thumbnails.append({
'id': format_id,
'url': video_url,
})
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
video_url, video_id, fatal=False))
elif re.match(r'https?://[^/]+\.akamaihd\.net/[iz]/', video_url):
formats.extend(self._extract_akamai_formats(
video_url, video_id, {
'hds': path_data.get('f4m', {}).get('host'),
# nba.cdn.turner.com, ht.cdn.turner.com, ht2.cdn.turner.com
# ht3.cdn.turner.com, i.cdn.turner.com, s.cdn.turner.com
# ssl.cdn.turner.com
'http': 'pmd.cdn.turner.com',
}))
elif ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
video_url, video_id, 'mp4',
m3u8_id=format_id or 'hls', fatal=False)
if '/secure/' in video_url and '?hdnea=' in video_url:
for f in m3u8_formats:
f['_seekable'] = False
formats.extend(m3u8_formats)
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
update_url_query(video_url, {'hdcore': '3.7.0'}),
video_id, f4m_id=format_id or 'hds', fatal=False))
else:
f = {
'format_id': format_id,
'url': video_url,
'ext': ext,
}
mobj = rex.search(video_url)
if mobj:
f.update({
'width': int(mobj.group('width')),
'height': int(mobj.group('height')),
'tbr': int_or_none(mobj.group('bitrate')),
})
elif isinstance(format_id, compat_str):
if format_id.isdigit():
f['tbr'] = int(format_id)
else:
mobj = re.match(r'ios_(audio|[0-9]+)$', format_id)
if mobj:
if mobj.group(1) == 'audio':
f.update({
'vcodec': 'none',
'ext': 'm4a',
})
else:
f['tbr'] = int(mobj.group(1))
formats.append(f)
self._sort_formats(formats)
for source in video_data.findall('closedCaptions/source'):
for track in source.findall('track'):
track_url = url_or_none(track.get('url'))
if not track_url or track_url.endswith('/big'):
continue
lang = track.get('lang') or track.get('label') or 'en'
subtitles.setdefault(lang, []).append({
'url': track_url,
'ext': {
'scc': 'scc',
'webvtt': 'vtt',
'smptett': 'tt',
}.get(source.get('format'))
})
thumbnails.extend({
'id': image.get('cut') or image.get('name'),
'url': image.text,
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in video_data.findall('images/image'))
is_live = xpath_text(video_data, 'isLive') == 'true'
return {
'id': video_id,
'title': self._live_title(title) if is_live else title,
'formats': formats,
'subtitles': subtitles,
'thumbnails': thumbnails,
'thumbnail': xpath_text(video_data, 'poster'),
'description': strip_or_none(xpath_text(video_data, 'description')),
'duration': parse_duration(xpath_text(video_data, 'length') or xpath_text(video_data, 'trt')),
'timestamp': self._extract_timestamp(video_data),
'upload_date': xpath_attr(video_data, 'metas', 'version'),
'series': xpath_text(video_data, 'showTitle'),
'season_number': int_or_none(xpath_text(video_data, 'seasonNumber')),
'episode_number': int_or_none(xpath_text(video_data, 'episodeNumber')),
'is_live': is_live,
}
def _extract_ngtv_info(self, media_id, tokenizer_query, ap_data=None):
streams_data = self._download_json(
'http://medium.ngtv.io/media/%s/tv' % media_id,
media_id)['media']['tv']
duration = None
chapters = []
formats = []
for supported_type in ('unprotected', 'bulkaes'):
stream_data = streams_data.get(supported_type, {})
m3u8_url = stream_data.get('secureUrl') or stream_data.get('url')
if not m3u8_url:
continue
if stream_data.get('playlistProtection') == 'spe':
m3u8_url = self._add_akamai_spe_token(
'http://token.ngtv.io/token/token_spe',
m3u8_url, media_id, ap_data or {}, tokenizer_query)
formats.extend(self._extract_m3u8_formats(
m3u8_url, media_id, 'mp4', m3u8_id='hls', fatal=False))
duration = float_or_none(stream_data.get('totalRuntime'))
if not chapters:
for chapter in stream_data.get('contentSegments', []):
start_time = float_or_none(chapter.get('start'))
chapter_duration = float_or_none(chapter.get('duration'))
if start_time is None or chapter_duration is None:
continue
chapters.append({
'start_time': start_time,
'end_time': start_time + chapter_duration,
})
self._sort_formats(formats)
return {
'formats': formats,
'chapters': chapters,
'duration': duration,
}
| [
"re.match",
"re.compile"
] | [((2585, 2660), 're.compile', 're.compile', (['"""(?P<width>[0-9]+)x(?P<height>[0-9]+)(?:_(?P<bitrate>[0-9]+))?"""'], {}), "('(?P<width>[0-9]+)x(?P<height>[0-9]+)(?:_(?P<bitrate>[0-9]+))?')\n", (2595, 2660), False, 'import re\n'), ((4294, 4326), 're.match', 're.match', (['"""https?://"""', 'video_url'], {}), "('https?://', video_url)\n", (4302, 4326), False, 'import re\n'), ((5241, 5301), 're.match', 're.match', (['"""https?://[^/]+\\\\.akamaihd\\\\.net/[iz]/"""', 'video_url'], {}), "('https?://[^/]+\\\\.akamaihd\\\\.net/[iz]/', video_url)\n", (5249, 5301), False, 'import re\n'), ((7089, 7131), 're.match', 're.match', (['"""ios_(audio|[0-9]+)$"""', 'format_id'], {}), "('ios_(audio|[0-9]+)$', format_id)\n", (7097, 7131), False, 'import re\n')] |
from alpha_vantage.timeseries import TimeSeries
from pprint import pprint
import json
import argparse
def save_dataset(symbol='MSFT', time_window='daily_adj'):
credentials = json.load(open('creds.json', 'r'))
api_key = credentials['av_api_key']
print(symbol, time_window)
ts = TimeSeries(key=api_key, output_format='pandas')
if time_window == 'intraday':
data, meta_data = ts.get_intraday(
symbol=symbol, interval='1min', outputsize='full')
elif time_window == 'daily':
data, meta_data = ts.get_daily(symbol, outputsize='full')
elif time_window == 'daily_adj':
data, meta_data = ts.get_daily_adjusted(symbol, outputsize='full')
pprint(data.head(10))
data.to_csv(f'./{symbol}_{time_window}.csv')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('symbol', type=str, help="the stock symbol you want to download")
parser.add_argument('time_window', type=str, choices=[
'intraday', 'daily', 'daily_adj'], help="the time period you want to download the stock history for")
namespace = parser.parse_args()
save_dataset(**vars(namespace))
| [
"alpha_vantage.timeseries.TimeSeries",
"argparse.ArgumentParser"
] | [((295, 342), 'alpha_vantage.timeseries.TimeSeries', 'TimeSeries', ([], {'key': 'api_key', 'output_format': '"""pandas"""'}), "(key=api_key, output_format='pandas')\n", (305, 342), False, 'from alpha_vantage.timeseries import TimeSeries\n'), ((813, 838), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (836, 838), False, 'import argparse\n')] |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module define a WulffShape class to generate the Wulff shape from
a lattice, a list of indices and their corresponding surface energies,
and the total area and volume of the wulff shape,the weighted surface energy,
the anisotropy and shape_factor can also be calculated.
In support of plotting from a given view in terms of miller index.
The lattice is from the conventional unit cell, and (hkil) for hexagonal
lattices.
If you use this code extensively, consider citing the following:
<NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.
(2016). Surface energies of elemental crystals. Scientific Data.
"""
from pymatgen.core.structure import Structure
from pymatgen.util.coord import get_angle
import numpy as np
import scipy as sp
from scipy.spatial import ConvexHull
import logging
import warnings
__author__ = '<NAME>, <NAME>, <NAME>'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__date__ = 'May 5 2016'
logger = logging.getLogger(__name__)
def hkl_tuple_to_str(hkl):
"""
Prepare for display on plots
"(hkl)" for surfaces
Agrs:
hkl: in the form of [h, k, l] or (h, k, l)
"""
str_format = '($'
for x in hkl:
if x < 0:
str_format += '\\overline{' + str(-x) + '}'
else:
str_format += str(x)
str_format += '$)'
return str_format
def get_tri_area(pts):
"""
Given a list of coords for 3 points,
Compute the area of this triangle.
Args:
pts: [a, b, c] three points
"""
a, b, c = pts[0], pts[1], pts[2]
v1 = np.array(b) - np.array(a)
v2 = np.array(c) - np.array(a)
area_tri = abs(sp.linalg.norm(sp.cross(v1, v2)) / 2)
return area_tri
class WulffFacet:
"""
Helper container for each Wulff plane.
"""
def __init__(self, normal, e_surf, normal_pt, dual_pt, index, m_ind_orig,
miller):
"""
:param normal:
:param e_surf:
:param normal_pt:
:param dual_pt:
:param index:
:param m_ind_orig:
:param miller:
"""
self.normal = normal
self.e_surf = e_surf
self.normal_pt = normal_pt
self.dual_pt = dual_pt
self.index = index
self.m_ind_orig = m_ind_orig
self.miller = miller
self.points = []
self.outer_lines = []
class WulffShape:
"""
Generate Wulff Shape from list of miller index and surface energies,
with given conventional unit cell.
surface energy (Jm^2) is the length of normal.
Wulff shape is the convex hull.
Based on:
http://scipy.github.io/devdocs/generated/scipy.spatial.ConvexHull.html
Process:
1. get wulff simplices
2. label with color
3. get wulff_area and other properties
.. attribute:: debug (bool)
.. attribute:: alpha
transparency
.. attribute:: color_set
.. attribute:: grid_off (bool)
.. attribute:: axis_off (bool)
.. attribute:: show_area
.. attribute:: off_color
color of facets off wulff
.. attribute:: structure
Structure object, input conventional unit cell (with H ) from lattice
.. attribute:: miller_list
list of input miller index, for hcp in the form of hkil
.. attribute:: hkl_list
modify hkill to hkl, in the same order with input_miller
.. attribute:: e_surf_list
list of input surface energies, in the same order with input_miller
.. attribute:: lattice
Lattice object, the input lattice for the conventional unit cell
.. attribute:: facets
[WulffFacet] for all facets considering symm
.. attribute:: dual_cv_simp
simplices from the dual convex hull (dual_pt)
.. attribute:: wulff_pt_list
.. attribute:: wulff_cv_simp
simplices from the convex hull of wulff_pt_list
.. attribute:: on_wulff
list for all input_miller, True is on wulff.
.. attribute:: color_area
list for all input_miller, total area on wulff, off_wulff = 0.
.. attribute:: miller_area
($hkl$): area for all input_miller
"""
def __init__(self, lattice, miller_list, e_surf_list, symprec=1e-5):
"""
Args:
lattice: Lattice object of the conventional unit cell
miller_list ([(hkl), ...]: list of hkl or hkil for hcp
e_surf_list ([float]): list of corresponding surface energies
symprec (float): for recp_operation, default is 1e-5.
"""
if any([se < 0 for se in e_surf_list]):
warnings.warn("Unphysical (negative) surface energy detected.")
self.color_ind = list(range(len(miller_list)))
self.input_miller_fig = [hkl_tuple_to_str(x) for x in miller_list]
# store input data
self.structure = Structure(lattice, ["H"], [[0, 0, 0]])
self.miller_list = tuple([tuple(x) for x in miller_list])
self.hkl_list = tuple([(x[0], x[1], x[-1]) for x in miller_list])
self.e_surf_list = tuple(e_surf_list)
self.lattice = lattice
self.symprec = symprec
# 2. get all the data for wulff construction
# get all the surface normal from get_all_miller_e()
self.facets = self._get_all_miller_e()
logger.debug(len(self.facets))
# 3. consider the dual condition
dual_pts = [x.dual_pt for x in self.facets]
dual_convex = ConvexHull(dual_pts)
dual_cv_simp = dual_convex.simplices
# simplices (ndarray of ints, shape (nfacet, ndim))
# list of [i, j, k] , ndim = 3
# i, j, k: ind for normal_e_m
# recalculate the dual of dual, get the wulff shape.
# conner <-> surface
# get cross point from the simplices of the dual convex hull
wulff_pt_list = [self._get_cross_pt_dual_simp(dual_simp)
for dual_simp in dual_cv_simp]
wulff_convex = ConvexHull(wulff_pt_list)
wulff_cv_simp = wulff_convex.simplices
logger.debug(", ".join([str(len(x)) for x in wulff_cv_simp]))
# store simplices and convex
self.dual_cv_simp = dual_cv_simp
self.wulff_pt_list = wulff_pt_list
self.wulff_cv_simp = wulff_cv_simp
self.wulff_convex = wulff_convex
self.on_wulff, self.color_area = self._get_simpx_plane()
miller_area = []
for m, in_mill_fig in enumerate(self.input_miller_fig):
miller_area.append(
in_mill_fig + ' : ' + str(round(self.color_area[m], 4)))
self.miller_area = miller_area
def _get_all_miller_e(self):
"""
from self:
get miller_list(unique_miller), e_surf_list and symmetry
operations(symmops) according to lattice
apply symmops to get all the miller index, then get normal,
get all the facets functions for wulff shape calculation:
|normal| = 1, e_surf is plane's distance to (0, 0, 0),
normal[0]x + normal[1]y + normal[2]z = e_surf
return:
[WulffFacet]
"""
all_hkl = []
color_ind = self.color_ind
planes = []
recp = self.structure.lattice.reciprocal_lattice_crystallographic
recp_symmops = self.lattice.get_recp_symmetry_operation(self.symprec)
for i, (hkl, energy) in enumerate(zip(self.hkl_list,
self.e_surf_list)):
for op in recp_symmops:
miller = tuple([int(x) for x in op.operate(hkl)])
if miller not in all_hkl:
all_hkl.append(miller)
normal = recp.get_cartesian_coords(miller)
normal /= sp.linalg.norm(normal)
normal_pt = [x * energy for x in normal]
dual_pt = [x / energy for x in normal]
color_plane = color_ind[divmod(i, len(color_ind))[1]]
planes.append(WulffFacet(normal, energy, normal_pt,
dual_pt, color_plane, i, hkl))
# sort by e_surf
planes.sort(key=lambda x: x.e_surf)
return planes
def _get_cross_pt_dual_simp(self, dual_simp):
"""
|normal| = 1, e_surf is plane's distance to (0, 0, 0),
plane function:
normal[0]x + normal[1]y + normal[2]z = e_surf
from self:
normal_e_m to get the plane functions
dual_simp: (i, j, k) simplices from the dual convex hull
i, j, k: plane index(same order in normal_e_m)
"""
matrix_surfs = [self.facets[dual_simp[i]].normal for i in range(3)]
matrix_e = [self.facets[dual_simp[i]].e_surf for i in range(3)]
cross_pt = sp.dot(sp.linalg.inv(matrix_surfs), matrix_e)
return cross_pt
def _get_simpx_plane(self):
"""
Locate the plane for simpx of on wulff_cv, by comparing the center of
the simpx triangle with the plane functions.
"""
on_wulff = [False] * len(self.miller_list)
surface_area = [0.0] * len(self.miller_list)
for simpx in self.wulff_cv_simp:
pts = [self.wulff_pt_list[simpx[i]] for i in range(3)]
center = np.sum(pts, 0) / 3.0
# check whether the center of the simplices is on one plane
for plane in self.facets:
abs_diff = abs(np.dot(plane.normal, center) - plane.e_surf)
if abs_diff < 1e-5:
on_wulff[plane.index] = True
surface_area[plane.index] += get_tri_area(pts)
plane.points.append(pts)
plane.outer_lines.append([simpx[0], simpx[1]])
plane.outer_lines.append([simpx[1], simpx[2]])
plane.outer_lines.append([simpx[0], simpx[2]])
# already find the plane, move to the next simplices
break
for plane in self.facets:
plane.outer_lines.sort()
plane.outer_lines = [line for line in plane.outer_lines
if plane.outer_lines.count(line) != 2]
return on_wulff, surface_area
def _get_colors(self, color_set, alpha, off_color, custom_colors={}):
"""
assign colors according to the surface energies of on_wulff facets.
return:
(color_list, color_proxy, color_proxy_on_wulff, miller_on_wulff,
e_surf_on_wulff_list)
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
color_list = [off_color] * len(self.hkl_list)
color_proxy_on_wulff = []
miller_on_wulff = []
e_surf_on_wulff = [(i, e_surf)
for i, e_surf in enumerate(self.e_surf_list)
if self.on_wulff[i]]
c_map = plt.get_cmap(color_set)
e_surf_on_wulff.sort(key=lambda x: x[1], reverse=False)
e_surf_on_wulff_list = [x[1] for x in e_surf_on_wulff]
if len(e_surf_on_wulff) > 1:
cnorm = mpl.colors.Normalize(vmin=min(e_surf_on_wulff_list),
vmax=max(e_surf_on_wulff_list))
else:
# if there is only one hkl on wulff, choose the color of the median
cnorm = mpl.colors.Normalize(vmin=min(e_surf_on_wulff_list) - 0.1,
vmax=max(e_surf_on_wulff_list) + 0.1)
scalar_map = mpl.cm.ScalarMappable(norm=cnorm, cmap=c_map)
for i, e_surf in e_surf_on_wulff:
color_list[i] = scalar_map.to_rgba(e_surf, alpha=alpha)
if tuple(self.miller_list[i]) in custom_colors.keys():
color_list[i] = custom_colors[tuple(self.miller_list[i])]
color_proxy_on_wulff.append(
plt.Rectangle((2, 2), 1, 1, fc=color_list[i], alpha=alpha))
miller_on_wulff.append(self.input_miller_fig[i])
scalar_map.set_array([x[1] for x in e_surf_on_wulff])
color_proxy = [plt.Rectangle((2, 2), 1, 1, fc=x, alpha=alpha)
for x in color_list]
return color_list, color_proxy, color_proxy_on_wulff, miller_on_wulff, e_surf_on_wulff_list
def show(self, *args, **kwargs):
r"""
Show the Wulff plot.
Args:
*args: Passed to get_plot.
**kwargs: Passed to get_plot.
"""
self.get_plot(*args, **kwargs).show()
def get_line_in_facet(self, facet):
"""
Returns the sorted pts in a facet used to draw a line
"""
lines = list(facet.outer_lines)
pt = []
prev = None
while len(lines) > 0:
if prev is None:
l = lines.pop(0)
else:
for i, l in enumerate(lines):
if prev in l:
l = lines.pop(i)
if l[1] == prev:
l.reverse()
break
# make sure the lines are connected one by one.
# find the way covering all pts and facets
pt.append(self.wulff_pt_list[l[0]].tolist())
pt.append(self.wulff_pt_list[l[1]].tolist())
prev = l[1]
return pt
def get_plot(self, color_set='PuBu', grid_off=True, axis_off=True,
show_area=False, alpha=1, off_color='red', direction=None,
bar_pos=(0.75, 0.15, 0.05, 0.65), bar_on=False, units_in_JPERM2=True,
legend_on=True, aspect_ratio=(8, 8), custom_colors={}):
"""
Get the Wulff shape plot.
Args:
color_set: default is 'PuBu'
grid_off (bool): default is True
axis_off (bool): default is Ture
show_area (bool): default is False
alpha (float): chosen from 0 to 1 (float), default is 1
off_color: Default color for facets not present on the Wulff shape.
direction: default is (1, 1, 1)
bar_pos: default is [0.75, 0.15, 0.05, 0.65]
bar_on (bool): default is False
legend_on (bool): default is True
aspect_ratio: default is (8, 8)
custom_colors ({(h,k,l}: [r,g,b,alpha}): Customize color of each
facet with a dictionary. The key is the corresponding Miller
index and value is the color. Undefined facets will use default
color site. Note: If you decide to set your own colors, it
probably won't make any sense to have the color bar on.
Return:
(matplotlib.pyplot)
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d as mpl3
color_list, color_proxy, color_proxy_on_wulff, miller_on_wulff, e_surf_on_wulff = self._get_colors(
color_set, alpha, off_color, custom_colors=custom_colors)
if not direction:
# If direction is not specified, use the miller indices of
# maximum area.
direction = max(self.area_fraction_dict.items(),
key=lambda x: x[1])[0]
fig = plt.figure()
fig.set_size_inches(aspect_ratio[0], aspect_ratio[1])
azim, elev = self._get_azimuth_elev([direction[0], direction[1],
direction[-1]])
wulff_pt_list = self.wulff_pt_list
ax = mpl3.Axes3D(fig, azim=azim, elev=elev)
for plane in self.facets:
# check whether [pts] is empty
if len(plane.points) < 1:
# empty, plane is not on_wulff.
continue
# assign the color for on_wulff facets according to its
# index and the color_list for on_wulff
plane_color = color_list[plane.index]
pt = self.get_line_in_facet(plane)
# plot from the sorted pts from [simpx]
tri = mpl3.art3d.Poly3DCollection([pt])
tri.set_color(plane_color)
tri.set_edgecolor("#808080")
ax.add_collection3d(tri)
# set ranges of x, y, z
# find the largest distance between on_wulff pts and the origin,
# to ensure complete and consistent display for all directions
r_range = max([np.linalg.norm(x) for x in wulff_pt_list])
ax.set_xlim([-r_range * 1.1, r_range * 1.1])
ax.set_ylim([-r_range * 1.1, r_range * 1.1])
ax.set_zlim([-r_range * 1.1, r_range * 1.1])
# add legend
if legend_on:
color_proxy = color_proxy
if show_area:
ax.legend(color_proxy, self.miller_area, loc='upper left',
bbox_to_anchor=(0, 1), fancybox=True, shadow=False)
else:
ax.legend(color_proxy_on_wulff, miller_on_wulff,
loc='upper center',
bbox_to_anchor=(0.5, 1), ncol=3, fancybox=True,
shadow=False)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
# Add colorbar
if bar_on:
cmap = plt.get_cmap(color_set)
cmap.set_over('0.25')
cmap.set_under('0.75')
bounds = [round(e, 2) for e in e_surf_on_wulff]
bounds.append(1.2 * bounds[-1])
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
# display surface energies
ax1 = fig.add_axes(bar_pos)
cbar = mpl.colorbar.ColorbarBase(
ax1, cmap=cmap, norm=norm, boundaries=[0] + bounds + [10],
extend='both', ticks=bounds[:-1], spacing='proportional',
orientation='vertical')
units = "$J/m^2$" if units_in_JPERM2 else r"$eV/\AA^2$"
cbar.set_label('Surface Energies (%s)' % (units), fontsize=100)
if grid_off:
ax.grid('off')
if axis_off:
ax.axis('off')
return plt
def _get_azimuth_elev(self, miller_index):
"""
Args:
miller_index: viewing direction
Returns:
azim, elev for plotting
"""
if miller_index == (0, 0, 1) or miller_index == (0, 0, 0, 1):
return 0, 90
else:
cart = self.lattice.get_cartesian_coords(miller_index)
azim = get_angle([cart[0], cart[1], 0], (1, 0, 0))
v = [cart[0], cart[1], 0]
elev = get_angle(cart, v)
return azim, elev
@property
def volume(self):
"""
Volume of the Wulff shape
"""
return self.wulff_convex.volume
@property
def miller_area_dict(self):
"""
Returns {hkl: area_hkl on wulff}
"""
return dict(zip(self.miller_list, self.color_area))
@property
def miller_energy_dict(self):
"""
Returns {hkl: surface energy_hkl}
"""
return dict(zip(self.miller_list, self.e_surf_list))
@property
def surface_area(self):
"""
Total surface area of Wulff shape.
"""
return sum(self.miller_area_dict.values())
@property
def weighted_surface_energy(self):
"""
Returns:
sum(surface_energy_hkl * area_hkl)/ sum(area_hkl)
"""
return self.total_surface_energy / self.surface_area
@property
def area_fraction_dict(self):
"""
Returns:
(dict): {hkl: area_hkl/total area on wulff}
"""
return {hkl: self.miller_area_dict[hkl] / self.surface_area
for hkl in self.miller_area_dict.keys()}
@property
def anisotropy(self):
"""
Returns:
(float) Coefficient of Variation from weighted surface energy
The ideal sphere is 0.
"""
square_diff_energy = 0
weighted_energy = self.weighted_surface_energy
area_frac_dict = self.area_fraction_dict
miller_energy_dict = self.miller_energy_dict
for hkl in miller_energy_dict.keys():
square_diff_energy += (miller_energy_dict[hkl] - weighted_energy) \
** 2 * area_frac_dict[hkl]
return np.sqrt(square_diff_energy) / weighted_energy
@property
def shape_factor(self):
"""
This is useful for determining the critical nucleus size.
A large shape factor indicates great anisotropy.
See <NAME>., <NAME>. & <NAME>. Kinetics
of Materials. (<NAME>, 2005), p.461
Returns:
(float) Shape factor.
"""
return self.surface_area / (self.volume ** (2 / 3))
@property
def effective_radius(self):
"""
Radius of the Wulffshape when the
Wulffshape is approximated as a sphere.
Returns:
(float) radius.
"""
return ((3 / 4) * (self.volume / np.pi)) ** (1 / 3)
@property
def total_surface_energy(self):
"""
Total surface energy of the Wulff shape.
Returns:
(float) sum(surface_energy_hkl * area_hkl)
"""
tot_surface_energy = 0
for hkl in self.miller_energy_dict.keys():
tot_surface_energy += self.miller_energy_dict[hkl] * \
self.miller_area_dict[hkl]
return tot_surface_energy
@property
def tot_corner_sites(self):
"""
Returns the number of vertices in the convex hull.
Useful for identifying catalytically active sites.
"""
return len(self.wulff_convex.vertices)
@property
def tot_edges(self):
"""
Returns the number of edges in the convex hull.
Useful for identifying catalytically active sites.
"""
all_edges = []
for facet in self.facets:
edges = []
pt = self.get_line_in_facet(facet)
lines = []
for i, p in enumerate(pt):
if i == len(pt) / 2:
break
lines.append(tuple(sorted(tuple([tuple(pt[i * 2]), tuple(pt[i * 2 + 1])]))))
for i, p in enumerate(lines):
if p not in all_edges:
edges.append(p)
all_edges.extend(edges)
return len(all_edges)
| [
"logging.getLogger",
"numpy.sqrt",
"pymatgen.core.structure.Structure",
"matplotlib.colorbar.ColorbarBase",
"numpy.array",
"numpy.linalg.norm",
"scipy.cross",
"mpl_toolkits.mplot3d.art3d.Poly3DCollection",
"numpy.dot",
"matplotlib.cm.ScalarMappable",
"matplotlib.pyplot.Rectangle",
"warnings.warn",
"scipy.spatial.ConvexHull",
"scipy.linalg.inv",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.pyplot.get_cmap",
"pymatgen.util.coord.get_angle",
"numpy.sum",
"matplotlib.pyplot.figure",
"scipy.linalg.norm",
"matplotlib.colors.BoundaryNorm"
] | [((1126, 1153), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1143, 1153), False, 'import logging\n'), ((1738, 1749), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (1746, 1749), True, 'import numpy as np\n'), ((1752, 1763), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (1760, 1763), True, 'import numpy as np\n'), ((1773, 1784), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (1781, 1784), True, 'import numpy as np\n'), ((1787, 1798), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (1795, 1798), True, 'import numpy as np\n'), ((4997, 5035), 'pymatgen.core.structure.Structure', 'Structure', (['lattice', "['H']", '[[0, 0, 0]]'], {}), "(lattice, ['H'], [[0, 0, 0]])\n", (5006, 5035), False, 'from pymatgen.core.structure import Structure\n'), ((5601, 5621), 'scipy.spatial.ConvexHull', 'ConvexHull', (['dual_pts'], {}), '(dual_pts)\n', (5611, 5621), False, 'from scipy.spatial import ConvexHull\n'), ((6108, 6133), 'scipy.spatial.ConvexHull', 'ConvexHull', (['wulff_pt_list'], {}), '(wulff_pt_list)\n', (6118, 6133), False, 'from scipy.spatial import ConvexHull\n'), ((11053, 11076), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['color_set'], {}), '(color_set)\n', (11065, 11076), True, 'import matplotlib.pyplot as plt\n'), ((11660, 11705), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'norm': 'cnorm', 'cmap': 'c_map'}), '(norm=cnorm, cmap=c_map)\n', (11681, 11705), True, 'import matplotlib as mpl\n'), ((15385, 15397), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (15395, 15397), True, 'import matplotlib.pyplot as plt\n'), ((15652, 15690), 'mpl_toolkits.mplot3d.Axes3D', 'mpl3.Axes3D', (['fig'], {'azim': 'azim', 'elev': 'elev'}), '(fig, azim=azim, elev=elev)\n', (15663, 15690), True, 'import mpl_toolkits.mplot3d as mpl3\n'), ((4749, 4812), 'warnings.warn', 'warnings.warn', (['"""Unphysical (negative) surface energy detected."""'], {}), "('Unphysical (negative) surface energy detected.')\n", (4762, 4812), False, 'import warnings\n'), ((8946, 8973), 'scipy.linalg.inv', 'sp.linalg.inv', (['matrix_surfs'], {}), '(matrix_surfs)\n', (8959, 8973), True, 'import scipy as sp\n'), ((12221, 12267), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(2, 2)', '(1)', '(1)'], {'fc': 'x', 'alpha': 'alpha'}), '((2, 2), 1, 1, fc=x, alpha=alpha)\n', (12234, 12267), True, 'import matplotlib.pyplot as plt\n'), ((16167, 16200), 'mpl_toolkits.mplot3d.art3d.Poly3DCollection', 'mpl3.art3d.Poly3DCollection', (['[pt]'], {}), '([pt])\n', (16194, 16200), True, 'import mpl_toolkits.mplot3d as mpl3\n'), ((17366, 17389), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['color_set'], {}), '(color_set)\n', (17378, 17389), True, 'import matplotlib.pyplot as plt\n'), ((17582, 17621), 'matplotlib.colors.BoundaryNorm', 'mpl.colors.BoundaryNorm', (['bounds', 'cmap.N'], {}), '(bounds, cmap.N)\n', (17605, 17621), True, 'import matplotlib as mpl\n'), ((17720, 17894), 'matplotlib.colorbar.ColorbarBase', 'mpl.colorbar.ColorbarBase', (['ax1'], {'cmap': 'cmap', 'norm': 'norm', 'boundaries': '([0] + bounds + [10])', 'extend': '"""both"""', 'ticks': 'bounds[:-1]', 'spacing': '"""proportional"""', 'orientation': '"""vertical"""'}), "(ax1, cmap=cmap, norm=norm, boundaries=[0] +\n bounds + [10], extend='both', ticks=bounds[:-1], spacing='proportional',\n orientation='vertical')\n", (17745, 17894), True, 'import matplotlib as mpl\n'), ((18575, 18618), 'pymatgen.util.coord.get_angle', 'get_angle', (['[cart[0], cart[1], 0]', '(1, 0, 0)'], {}), '([cart[0], cart[1], 0], (1, 0, 0))\n', (18584, 18618), False, 'from pymatgen.util.coord import get_angle\n'), ((18676, 18694), 'pymatgen.util.coord.get_angle', 'get_angle', (['cart', 'v'], {}), '(cart, v)\n', (18685, 18694), False, 'from pymatgen.util.coord import get_angle\n'), ((20440, 20467), 'numpy.sqrt', 'np.sqrt', (['square_diff_energy'], {}), '(square_diff_energy)\n', (20447, 20467), True, 'import numpy as np\n'), ((1833, 1849), 'scipy.cross', 'sp.cross', (['v1', 'v2'], {}), '(v1, v2)\n', (1841, 1849), True, 'import scipy as sp\n'), ((9430, 9444), 'numpy.sum', 'np.sum', (['pts', '(0)'], {}), '(pts, 0)\n', (9436, 9444), True, 'import numpy as np\n'), ((12015, 12073), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(2, 2)', '(1)', '(1)'], {'fc': 'color_list[i]', 'alpha': 'alpha'}), '((2, 2), 1, 1, fc=color_list[i], alpha=alpha)\n', (12028, 12073), True, 'import matplotlib.pyplot as plt\n'), ((16518, 16535), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (16532, 16535), True, 'import numpy as np\n'), ((7893, 7915), 'scipy.linalg.norm', 'sp.linalg.norm', (['normal'], {}), '(normal)\n', (7907, 7915), True, 'import scipy as sp\n'), ((9592, 9620), 'numpy.dot', 'np.dot', (['plane.normal', 'center'], {}), '(plane.normal, center)\n', (9598, 9620), True, 'import numpy as np\n')] |
from python_on_rails.either import as_either, Failure, Success
@as_either(TypeError)
def add_one(x):
return x + 1
@as_either()
def times_five(x):
return x * 5
def test_success_executes_bindings():
result = Success(1).bind(add_one).bind(times_five)
assert isinstance(result, Success)
assert result.value == 10
def test_a_failure_stops_the_execution_of_later_bindings():
result = Success("NaN").bind(add_one).bind(times_five)
assert isinstance(result, Failure)
assert type(result.value) == TypeError
assert repr(result.value) == "TypeError('can only concatenate str (not \"int\") to str')"
| [
"python_on_rails.either.Success",
"python_on_rails.either.as_either"
] | [((66, 86), 'python_on_rails.either.as_either', 'as_either', (['TypeError'], {}), '(TypeError)\n', (75, 86), False, 'from python_on_rails.either import as_either, Failure, Success\n'), ((123, 134), 'python_on_rails.either.as_either', 'as_either', ([], {}), '()\n', (132, 134), False, 'from python_on_rails.either import as_either, Failure, Success\n'), ((224, 234), 'python_on_rails.either.Success', 'Success', (['(1)'], {}), '(1)\n', (231, 234), False, 'from python_on_rails.either import as_either, Failure, Success\n'), ((410, 424), 'python_on_rails.either.Success', 'Success', (['"""NaN"""'], {}), "('NaN')\n", (417, 424), False, 'from python_on_rails.either import as_either, Failure, Success\n')] |
from django.db import models
# Create your models here.
# Station
class Stations(models.Model):
stationName = models.CharField(max_length=100)
stationLocation = models.CharField(max_length=100)
stationStaffId = models.CharField(max_length=100)
date = models.DateTimeField(auto_now_add=True)
def __str_(self):
return self.stationName
# Customers
class Customers(models.Model):
customerName = models.CharField(max_length=100)
customerPhone = models.CharField(max_length=100)
customerId = models.CharField(max_length=100)
customerStartLoc = models.CharField(max_length=100)
customerDestinationLoc = models.CharField(max_length=100)
stationStaffId = models.CharField(max_length=100)
date = models.DateTimeField(auto_now_add=True)
def __str_(self):
return self.customerName
# Items
class Items(models.Model):
itemName = models.CharField(max_length=100)
itemType = models.CharField(max_length=100)
Quantity = models.CharField(max_length=100)
originStation = models.CharField(max_length=100)
originCounty = models.CharField(max_length=100)
receiverName = models.CharField(max_length=100)
receiverPhone = models.CharField(max_length=100)
destinationAddress = models.CharField(max_length=100)
destinationCounty = models.CharField(max_length=100)
dateSend= models.CharField(max_length=100)
dateExpected = models.CharField(max_length=100)
def __str__(self):
return self.itemName
# Payments
class Payments(models.Model):
customerPhone = models.CharField(max_length=100)
paymentAmount = models.CharField(max_length=100)
paymentMeans = models.EmailField(max_length=100)
code = models.CharField(max_length=100)
date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.customerPhone
| [
"django.db.models.DateTimeField",
"django.db.models.EmailField",
"django.db.models.CharField"
] | [((119, 151), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (135, 151), False, 'from django.db import models\n'), ((174, 206), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (190, 206), False, 'from django.db import models\n'), ((229, 261), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (245, 261), False, 'from django.db import models\n'), ((274, 313), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (294, 313), False, 'from django.db import models\n'), ((436, 468), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (452, 468), False, 'from django.db import models\n'), ((492, 524), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (508, 524), False, 'from django.db import models\n'), ((548, 580), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (564, 580), False, 'from django.db import models\n'), ((604, 636), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (620, 636), False, 'from django.db import models\n'), ((666, 698), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (682, 698), False, 'from django.db import models\n'), ((721, 753), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (737, 753), False, 'from django.db import models\n'), ((776, 815), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (796, 815), False, 'from django.db import models\n'), ((923, 955), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (939, 955), False, 'from django.db import models\n'), ((971, 1003), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (987, 1003), False, 'from django.db import models\n'), ((1019, 1051), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1035, 1051), False, 'from django.db import models\n'), ((1072, 1104), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1088, 1104), False, 'from django.db import models\n'), ((1124, 1156), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1140, 1156), False, 'from django.db import models\n'), ((1176, 1208), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1192, 1208), False, 'from django.db import models\n'), ((1229, 1261), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1245, 1261), False, 'from django.db import models\n'), ((1287, 1319), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1303, 1319), False, 'from django.db import models\n'), ((1344, 1376), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1360, 1376), False, 'from django.db import models\n'), ((1391, 1423), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1407, 1423), False, 'from django.db import models\n'), ((1443, 1475), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1459, 1475), False, 'from django.db import models\n'), ((1598, 1630), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1614, 1630), False, 'from django.db import models\n'), ((1658, 1690), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1674, 1690), False, 'from django.db import models\n'), ((1718, 1751), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1735, 1751), False, 'from django.db import models\n'), ((1780, 1812), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1796, 1812), False, 'from django.db import models\n'), ((1840, 1879), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1860, 1879), False, 'from django.db import models\n')] |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch - TF 2.0 general utilities."""
import logging
import os
import re
import numpy
logger = logging.getLogger(__name__)
def convert_tf_weight_name_to_pt_weight_name(tf_name, start_prefix_to_remove=""):
""" Convert a TF 2.0 model variable name in a pytorch model weight name.
Conventions for TF2.0 scopes -> PyTorch attribute names conversions:
- '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
- '_._' is replaced by a new level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
return tuple with:
- pytorch model weight name
- transpose: boolean indicating weither TF2.0 and PyTorch weights matrices are transposed with regards to each other
"""
tf_name = tf_name.replace(":0", "") # device ids
tf_name = re.sub(
r"/[^/]*___([^/]*)/", r"/\1/", tf_name
) # '$1___$2' is replaced by $2 (can be used to duplicate or remove layers in TF2.0 vs PyTorch)
tf_name = tf_name.replace(
"_._", "/"
) # '_._' is replaced by a level separation (can be used to convert TF2.0 lists in PyTorch nn.ModulesList)
tf_name = re.sub(r"//+", "/", tf_name) # Remove empty levels at the end
tf_name = tf_name.split("/") # Convert from TF2.0 '/' separators to PyTorch '.' separators
tf_name = tf_name[1:] # Remove level zero
# When should we transpose the weights
transpose = bool(tf_name[-1] == "kernel" or "emb_projs" in tf_name or "out_projs" in tf_name)
# Convert standard TF2.0 names in PyTorch names
if tf_name[-1] == "kernel" or tf_name[-1] == "embeddings" or tf_name[-1] == "gamma":
tf_name[-1] = "weight"
if tf_name[-1] == "beta":
tf_name[-1] = "bias"
# Remove prefix if needed
tf_name = ".".join(tf_name)
if start_prefix_to_remove:
tf_name = tf_name.replace(start_prefix_to_remove, "", 1)
return tf_name, transpose
#####################
# PyTorch => TF 2.0 #
#####################
def load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch checkpoints in a TF 2.0 model
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
pt_path = os.path.abspath(pytorch_checkpoint_path)
logger.info("Loading PyTorch weights from {}".format(pt_path))
pt_state_dict = torch.load(pt_path, map_location="cpu")
logger.info("PyTorch checkpoint contains {:,} parameters".format(sum(t.numel() for t in pt_state_dict.values())))
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch checkpoints in a TF 2.0 model
"""
pt_state_dict = pt_model.state_dict()
return load_pytorch_weights_in_tf2_model(
tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys
)
def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch state_dict in a TF 2.0 model.
"""
try:
import torch # noqa: F401
import tensorflow as tf # noqa: F401
from tensorflow.python.keras import backend as K
except ImportError:
logger.error(
"Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
# Adapt state dict - TODO remove this and update the AWS weights files instead
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in pt_state_dict.keys():
new_key = None
if "gamma" in key:
new_key = key.replace("gamma", "weight")
if "beta" in key:
new_key = key.replace("beta", "bias")
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
pt_state_dict[new_key] = pt_state_dict.pop(old_key)
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(tf_model.base_model_prefix) for s in pt_state_dict.keys()):
start_prefix_to_remove = tf_model.base_model_prefix + "."
symbolic_weights = tf_model.trainable_weights + tf_model.non_trainable_weights
tf_loaded_numel = 0
weight_value_tuples = []
all_pytorch_weights = set(list(pt_state_dict.keys()))
for symbolic_weight in symbolic_weights:
sw_name = symbolic_weight.name
name, transpose = convert_tf_weight_name_to_pt_weight_name(
sw_name, start_prefix_to_remove=start_prefix_to_remove
)
# Find associated numpy array in pytorch model state dict
if name not in pt_state_dict:
if allow_missing_keys:
continue
raise AttributeError("{} not found in PyTorch model".format(name))
array = pt_state_dict[name].numpy()
if transpose:
array = numpy.transpose(array)
if len(symbolic_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(symbolic_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
try:
assert list(symbolic_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (symbolic_weight.shape, array.shape)
raise e
tf_loaded_numel += array.size
# logger.warning("Initialize TF weight {}".format(symbolic_weight.name))
weight_value_tuples.append((symbolic_weight, array))
all_pytorch_weights.discard(name)
K.batch_set_value(weight_value_tuples)
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure restore ops are run
logger.info("Loaded {:,} parameters in the TF 2.0 model.".format(tf_loaded_numel))
logger.info("Weights or buffers not loaded from PyTorch model: {}".format(all_pytorch_weights))
return tf_model
#####################
# TF 2.0 => PyTorch #
#####################
def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
""" Load TF 2.0 HDF5 checkpoint in a PyTorch model
We use HDF5 to easily do transfer learning
(see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357).
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
import transformers
logger.info("Loading TensorFlow weights from {}".format(tf_checkpoint_path))
# Instantiate and load the associated TF 2.0 model
tf_model_class_name = "TF" + pt_model.__class__.__name__ # Add "TF" at the beggining
tf_model_class = getattr(transformers, tf_model_class_name)
tf_model = tf_model_class(pt_model.config)
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tf_model(tf_inputs, training=False) # Make sure model is built
tf_model.load_weights(tf_checkpoint_path, by_name=True)
return load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=allow_missing_keys)
def load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=False):
""" Load TF 2.0 model in a pytorch model
"""
weights = tf_model.weights
return load_tf2_weights_in_pytorch_model(pt_model, weights, allow_missing_keys=allow_missing_keys)
def load_tf2_weights_in_pytorch_model(pt_model, tf_weights, allow_missing_keys=False):
""" Load TF2.0 symbolic weights in a PyTorch model
"""
try:
import tensorflow as tf # noqa: F401
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
new_pt_params_dict = {}
current_pt_params_dict = dict(pt_model.named_parameters())
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ""
if not any(s.startswith(pt_model.base_model_prefix) for s in current_pt_params_dict.keys()):
start_prefix_to_remove = pt_model.base_model_prefix + "."
# Build a map from potential PyTorch weight names to TF 2.0 Variables
tf_weights_map = {}
for tf_weight in tf_weights:
pt_name, transpose = convert_tf_weight_name_to_pt_weight_name(
tf_weight.name, start_prefix_to_remove=start_prefix_to_remove
)
tf_weights_map[pt_name] = (tf_weight.numpy(), transpose)
all_tf_weights = set(list(tf_weights_map.keys()))
loaded_pt_weights_data_ptr = {}
missing_keys_pt = []
for pt_weight_name, pt_weight in current_pt_params_dict.items():
# Handle PyTorch shared weight ()not duplicated in TF 2.0
if pt_weight.data_ptr() in loaded_pt_weights_data_ptr:
new_pt_params_dict[pt_weight_name] = loaded_pt_weights_data_ptr[pt_weight.data_ptr()]
continue
# Find associated numpy array in pytorch model state dict
if pt_weight_name not in tf_weights_map:
if allow_missing_keys:
missing_keys_pt.append(pt_weight_name)
continue
raise AttributeError("{} not found in TF 2.0 model".format(pt_weight_name))
array, transpose = tf_weights_map[pt_weight_name]
if transpose:
array = numpy.transpose(array)
if len(pt_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(pt_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
try:
assert list(pt_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (pt_weight.shape, array.shape)
raise e
# logger.warning("Initialize PyTorch weight {}".format(pt_weight_name))
new_pt_params_dict[pt_weight_name] = torch.from_numpy(array)
loaded_pt_weights_data_ptr[pt_weight.data_ptr()] = torch.from_numpy(array)
all_tf_weights.discard(pt_weight_name)
missing_keys, unexpected_keys = pt_model.load_state_dict(new_pt_params_dict, strict=False)
missing_keys += missing_keys_pt
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from TF 2.0 model: {}".format(pt_model.__class__.__name__, missing_keys)
)
if len(unexpected_keys) > 0:
logger.info(
"Weights from TF 2.0 model not used in {}: {}".format(pt_model.__class__.__name__, unexpected_keys)
)
logger.info("Weights or buffers not loaded from TF 2.0 model: {}".format(all_tf_weights))
return pt_model
| [
"logging.getLogger",
"tensorflow.python.keras.backend.batch_set_value",
"torch.load",
"torch.from_numpy",
"numpy.squeeze",
"numpy.expand_dims",
"os.path.abspath",
"re.sub",
"numpy.transpose"
] | [((802, 829), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (819, 829), False, 'import logging\n'), ((1569, 1614), 're.sub', 're.sub', (['"""/[^/]*___([^/]*)/"""', '"""/\\\\1/"""', 'tf_name'], {}), "('/[^/]*___([^/]*)/', '/\\\\1/', tf_name)\n", (1575, 1614), False, 'import re\n'), ((1901, 1928), 're.sub', 're.sub', (['"""//+"""', '"""/"""', 'tf_name'], {}), "('//+', '/', tf_name)\n", (1907, 1928), False, 'import re\n'), ((3319, 3359), 'os.path.abspath', 'os.path.abspath', (['pytorch_checkpoint_path'], {}), '(pytorch_checkpoint_path)\n', (3334, 3359), False, 'import os\n'), ((3448, 3487), 'torch.load', 'torch.load', (['pt_path'], {'map_location': '"""cpu"""'}), "(pt_path, map_location='cpu')\n", (3458, 3487), False, 'import torch\n'), ((7272, 7310), 'tensorflow.python.keras.backend.batch_set_value', 'K.batch_set_value', (['weight_value_tuples'], {}), '(weight_value_tuples)\n', (7289, 7310), True, 'from tensorflow.python.keras import backend as K\n'), ((12182, 12205), 'torch.from_numpy', 'torch.from_numpy', (['array'], {}), '(array)\n', (12198, 12205), False, 'import torch\n'), ((12265, 12288), 'torch.from_numpy', 'torch.from_numpy', (['array'], {}), '(array)\n', (12281, 12288), False, 'import torch\n'), ((6610, 6632), 'numpy.transpose', 'numpy.transpose', (['array'], {}), '(array)\n', (6625, 6632), False, 'import numpy\n'), ((6712, 6732), 'numpy.squeeze', 'numpy.squeeze', (['array'], {}), '(array)\n', (6725, 6732), False, 'import numpy\n'), ((11646, 11668), 'numpy.transpose', 'numpy.transpose', (['array'], {}), '(array)\n', (11661, 11668), False, 'import numpy\n'), ((11742, 11762), 'numpy.squeeze', 'numpy.squeeze', (['array'], {}), '(array)\n', (11755, 11762), False, 'import numpy\n'), ((6813, 6845), 'numpy.expand_dims', 'numpy.expand_dims', (['array'], {'axis': '(0)'}), '(array, axis=0)\n', (6830, 6845), False, 'import numpy\n'), ((11837, 11869), 'numpy.expand_dims', 'numpy.expand_dims', (['array'], {'axis': '(0)'}), '(array, axis=0)\n', (11854, 11869), False, 'import numpy\n')] |
import logging
import uuid
from django.db import models
from django.urls import reverse
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from model_utils.managers import InheritanceManager
from mayan.apps.django_gpg.exceptions import VerificationError
from mayan.apps.django_gpg.models import Key
from mayan.apps.documents.models import DocumentVersion
from mayan.apps.storage.classes import DefinedStorageLazy
from .literals import STORAGE_NAME_DOCUMENT_SIGNATURES_DETACHED_SIGNATURE
from .managers import DetachedSignatureManager, EmbeddedSignatureManager
logger = logging.getLogger(name=__name__)
def upload_to(*args, **kwargs):
return force_text(s=uuid.uuid4())
class SignatureBaseModel(models.Model):
"""
Fields:
* key_id - Key Identifier - This is what identifies uniquely a key. Not
two keys in the world have the same Key ID. The Key ID is also used to
locate a key in the key servers: http://pgp.mit.edu
* signature_id - Signature ID - Every time a key is used to sign something
it will generate a unique signature ID. No two signature IDs are the same,
even when using the same key.
"""
document_version = models.ForeignKey(
editable=False, on_delete=models.CASCADE, related_name='signatures',
to=DocumentVersion, verbose_name=_('Document version')
)
# Basic fields
date = models.DateField(
blank=True, editable=False, null=True, verbose_name=_('Date signed')
)
key_id = models.CharField(
help_text=_('ID of the key that will be used to sign the document.'),
max_length=40, verbose_name=_('Key ID')
)
# With proper key
signature_id = models.CharField(
blank=True, editable=False, null=True, max_length=64,
verbose_name=_('Signature ID')
)
public_key_fingerprint = models.CharField(
blank=True, editable=False, null=True, max_length=40,
verbose_name=_('Public key fingerprint')
)
objects = InheritanceManager()
class Meta:
ordering = ('pk',)
verbose_name = _('Document version signature')
verbose_name_plural = _('Document version signatures')
def __str__(self):
return self.signature_id or '{} - {}'.format(self.date, self.key_id)
def get_absolute_url(self):
return reverse(
viewname='signatures:document_version_signature_details',
kwargs={'signature_id': self.pk}
)
def get_key_id(self):
if self.public_key_fingerprint:
return self.public_key_fingerprint[-16:]
else:
return self.key_id
def get_signature_type_display(self):
if self.is_detached:
return _('Detached')
else:
return _('Embedded')
@property
def is_detached(self):
return hasattr(self, 'signature_file')
@property
def is_embedded(self):
return not hasattr(self, 'signature_file')
class EmbeddedSignature(SignatureBaseModel):
objects = EmbeddedSignatureManager()
class Meta:
verbose_name = _('Document version embedded signature')
verbose_name_plural = _('Document version embedded signatures')
def save(self, *args, **kwargs):
logger.debug(msg='checking for embedded signature')
if self.pk:
raw = True
else:
raw = False
with self.document_version.open(raw=raw) as file_object:
try:
verify_result = Key.objects.verify_file(
file_object=file_object
)
except VerificationError as exception:
# Not signed
logger.debug(
'embedded signature verification error; %s', exception
)
else:
self.date = verify_result.date
self.key_id = verify_result.key_id
self.signature_id = verify_result.signature_id
self.public_key_fingerprint = verify_result.pubkey_fingerprint
super(EmbeddedSignature, self).save(*args, **kwargs)
class DetachedSignature(SignatureBaseModel):
signature_file = models.FileField(
blank=True, help_text=_(
'Signature file previously generated.'
), null=True, storage=DefinedStorageLazy(
name=STORAGE_NAME_DOCUMENT_SIGNATURES_DETACHED_SIGNATURE
), upload_to=upload_to, verbose_name=_('Signature file')
)
objects = DetachedSignatureManager()
class Meta:
verbose_name = _('Document version detached signature')
verbose_name_plural = _('Document version detached signatures')
def __str__(self):
return '{}-{}'.format(self.document_version, _('signature'))
def delete(self, *args, **kwargs):
if self.signature_file.name:
self.signature_file.storage.delete(name=self.signature_file.name)
super(DetachedSignature, self).delete(*args, **kwargs)
def save(self, *args, **kwargs):
with self.document_version.open() as file_object:
try:
verify_result = Key.objects.verify_file(
file_object=file_object, signature_file=self.signature_file
)
except VerificationError as exception:
# Not signed
logger.debug(
'detached signature verification error; %s', exception
)
else:
self.signature_file.seek(0)
self.date = verify_result.date
self.key_id = verify_result.key_id
self.signature_id = verify_result.signature_id
self.public_key_fingerprint = verify_result.pubkey_fingerprint
return super(DetachedSignature, self).save(*args, **kwargs)
| [
"logging.getLogger",
"model_utils.managers.InheritanceManager",
"django.utils.translation.ugettext_lazy",
"mayan.apps.django_gpg.models.Key.objects.verify_file",
"uuid.uuid4",
"django.urls.reverse",
"mayan.apps.storage.classes.DefinedStorageLazy"
] | [((624, 656), 'logging.getLogger', 'logging.getLogger', ([], {'name': '__name__'}), '(name=__name__)\n', (641, 656), False, 'import logging\n'), ((2025, 2045), 'model_utils.managers.InheritanceManager', 'InheritanceManager', ([], {}), '()\n', (2043, 2045), False, 'from model_utils.managers import InheritanceManager\n'), ((2113, 2144), 'django.utils.translation.ugettext_lazy', '_', (['"""Document version signature"""'], {}), "('Document version signature')\n", (2114, 2144), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2175, 2207), 'django.utils.translation.ugettext_lazy', '_', (['"""Document version signatures"""'], {}), "('Document version signatures')\n", (2176, 2207), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2357, 2461), 'django.urls.reverse', 'reverse', ([], {'viewname': '"""signatures:document_version_signature_details"""', 'kwargs': "{'signature_id': self.pk}"}), "(viewname='signatures:document_version_signature_details', kwargs={\n 'signature_id': self.pk})\n", (2364, 2461), False, 'from django.urls import reverse\n'), ((3118, 3158), 'django.utils.translation.ugettext_lazy', '_', (['"""Document version embedded signature"""'], {}), "('Document version embedded signature')\n", (3119, 3158), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3189, 3230), 'django.utils.translation.ugettext_lazy', '_', (['"""Document version embedded signatures"""'], {}), "('Document version embedded signatures')\n", (3190, 3230), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4586, 4626), 'django.utils.translation.ugettext_lazy', '_', (['"""Document version detached signature"""'], {}), "('Document version detached signature')\n", (4587, 4626), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4657, 4698), 'django.utils.translation.ugettext_lazy', '_', (['"""Document version detached signatures"""'], {}), "('Document version detached signatures')\n", (4658, 4698), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((715, 727), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (725, 727), False, 'import uuid\n'), ((1358, 1379), 'django.utils.translation.ugettext_lazy', '_', (['"""Document version"""'], {}), "('Document version')\n", (1359, 1379), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1494, 1510), 'django.utils.translation.ugettext_lazy', '_', (['"""Date signed"""'], {}), "('Date signed')\n", (1495, 1510), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1566, 1624), 'django.utils.translation.ugettext_lazy', '_', (['"""ID of the key that will be used to sign the document."""'], {}), "('ID of the key that will be used to sign the document.')\n", (1567, 1624), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1662, 1673), 'django.utils.translation.ugettext_lazy', '_', (['"""Key ID"""'], {}), "('Key ID')\n", (1663, 1673), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1822, 1839), 'django.utils.translation.ugettext_lazy', '_', (['"""Signature ID"""'], {}), "('Signature ID')\n", (1823, 1839), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1976, 2003), 'django.utils.translation.ugettext_lazy', '_', (['"""Public key fingerprint"""'], {}), "('Public key fingerprint')\n", (1977, 2003), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2747, 2760), 'django.utils.translation.ugettext_lazy', '_', (['"""Detached"""'], {}), "('Detached')\n", (2748, 2760), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2794, 2807), 'django.utils.translation.ugettext_lazy', '_', (['"""Embedded"""'], {}), "('Embedded')\n", (2795, 2807), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4260, 4301), 'django.utils.translation.ugettext_lazy', '_', (['"""Signature file previously generated."""'], {}), "('Signature file previously generated.')\n", (4261, 4301), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4344, 4420), 'mayan.apps.storage.classes.DefinedStorageLazy', 'DefinedStorageLazy', ([], {'name': 'STORAGE_NAME_DOCUMENT_SIGNATURES_DETACHED_SIGNATURE'}), '(name=STORAGE_NAME_DOCUMENT_SIGNATURES_DETACHED_SIGNATURE)\n', (4362, 4420), False, 'from mayan.apps.storage.classes import DefinedStorageLazy\n'), ((4478, 4497), 'django.utils.translation.ugettext_lazy', '_', (['"""Signature file"""'], {}), "('Signature file')\n", (4479, 4497), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4776, 4790), 'django.utils.translation.ugettext_lazy', '_', (['"""signature"""'], {}), "('signature')\n", (4777, 4790), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3526, 3574), 'mayan.apps.django_gpg.models.Key.objects.verify_file', 'Key.objects.verify_file', ([], {'file_object': 'file_object'}), '(file_object=file_object)\n', (3549, 3574), False, 'from mayan.apps.django_gpg.models import Key\n'), ((5155, 5244), 'mayan.apps.django_gpg.models.Key.objects.verify_file', 'Key.objects.verify_file', ([], {'file_object': 'file_object', 'signature_file': 'self.signature_file'}), '(file_object=file_object, signature_file=self.\n signature_file)\n', (5178, 5244), False, 'from mayan.apps.django_gpg.models import Key\n')] |
from ConfigParser import ConfigParser
from sys import argv
REPLACE_PROPERTIES = ["file_path", "database_connection", "new_file_path"]
MAIN_SECTION = "app:main"
def sync():
# Add or replace the relevant properites from galaxy.ini
# into reports.ini
reports_config_file = "config/reports.ini"
if len(argv) > 1:
reports_config_file = argv[1]
universe_config_file = "config/galaxy.ini"
if len(argv) > 2:
universe_config_file = argv[2]
parser = ConfigParser()
parser.read(universe_config_file)
with open(reports_config_file, "r") as f:
reports_config_lines = f.readlines()
replaced_properties = set([])
with open(reports_config_file, "w") as f:
# Write all properties from reports config replacing as
# needed.
for reports_config_line in reports_config_lines:
(line, replaced_property) = get_synced_line(reports_config_line, parser)
if replaced_property:
replaced_properties.add(replaced_property)
f.write(line)
# If any properties appear in universe config and not in
# reports write these as well.
for replacement_property in REPLACE_PROPERTIES:
if parser.has_option(MAIN_SECTION, replacement_property) and \
not (replacement_property in replaced_properties):
f.write(get_universe_line(replacement_property, parser))
def get_synced_line(reports_line, universe_config):
# Cycle through properties to replace and perform replacement on
# this line if needed.
synced_line = reports_line
replaced_property = None
for replacement_property in REPLACE_PROPERTIES:
if reports_line.startswith(replacement_property) and \
universe_config.has_option(MAIN_SECTION, replacement_property):
synced_line = get_universe_line(replacement_property, universe_config)
replaced_property = replacement_property
break
return (synced_line, replaced_property)
def get_universe_line(property_name, universe_config):
return "%s=%s\n" % (property_name, universe_config.get(MAIN_SECTION, property_name))
if __name__ == '__main__':
sync()
| [
"ConfigParser.ConfigParser"
] | [((489, 503), 'ConfigParser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (501, 503), False, 'from ConfigParser import ConfigParser\n')] |
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# -------------------------------------------------------------------------
#
# test_ssh.py
#
# IDENTIFICATION
# src/gausskernel/dbmind/xtuner/test/test_ssh.py
#
# -------------------------------------------------------------------------
from ssh import ExecutorFactory
def test_remote():
exe = ExecutorFactory().set_host('').set_user('').set_pwd('').get_executor() # padding your information
print(exe.exec_command_sync("cat /proc/cpuinfo | grep \"processor\" | wc -l"))
print(exe.exec_command_sync("cat /proc/self/cmdline | xargs -0"))
print(exe.exec_command_sync("echo -e 'hello \\n world'")[0].count('\n'))
print(exe.exec_command_sync("echo -e 'hello \\n world'")[0])
print(exe.exec_command_sync('echo $SHELL'))
def test_local():
exe = ExecutorFactory().get_executor()
print(exe.exec_command_sync("ping -h"))
if __name__ == "__main__":
test_remote()
test_local()
| [
"ssh.ExecutorFactory"
] | [((1307, 1324), 'ssh.ExecutorFactory', 'ExecutorFactory', ([], {}), '()\n', (1322, 1324), False, 'from ssh import ExecutorFactory\n'), ((835, 852), 'ssh.ExecutorFactory', 'ExecutorFactory', ([], {}), '()\n', (850, 852), False, 'from ssh import ExecutorFactory\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
# pylint: disable= missing-docstring
"Addtional image transforms."
import random
import math
import numpy as np
from mxnet import image, nd
from mxnet.gluon import Block
__all__ = ['RandomCrop', 'RandomErasing']
class RandomCrop(Block):
"""Randomly crop `src` with `size` (width, height).
Padding is optional.
Upsample result if `src` is smaller than `size`.
Parameters
----------
size : int or tuple of (W, H)
Size of the final output.
pad: int or tuple
if int, size of the zero-padding
if tuple, number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths for each axis.
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all axes.
interpolation : int
Interpolation method for resizing. By default uses bilinear
interpolation. See OpenCV's resize function for available choices.
Inputs:
- **data**: input tensor with (Hi x Wi x C) shape.
Outputs:
- **out**: output tensor with (size[0] x size[1] x C) or (size x size x C) shape.
"""
def __init__(self, size, pad=None, interpolation=2):
super(RandomCrop, self).__init__()
numeric_types = (float, int, np.generic)
if isinstance(size, numeric_types):
size = (size, size)
self._args = (size, interpolation)
self.pad = ((pad, pad), (pad, pad), (0, 0)) if isinstance(pad, int) else pad
def forward(self, x):
if self.pad:
return image.random_crop(nd.array(
np.pad(x.asnumpy(), self.pad, mode='constant', constant_values=0)), *self._args)[0]
else:
return image.random_crop(x, *self._args)[0]
class RandomErasing(Block):
"""Randomly erasing the area in `src` between `s_min` and `s_max` with `probability`.
`ratio` controls the ratio between width and height.
`mean` means the value in erasing area.
Parameters
----------
probability : float
Probability of erasing.
s_min : float
Min area to all area.
s_max : float
Max area to all area.
ratio : float
The ratio between width and height.
mean : int or tuple of (R, G, B)
The value in erasing area.
Inputs:
- **data**: input tensor with (Hi x Wi x C) shape.
Outputs:
- **out**: output tensor with (Hi x Wi x C) shape.
"""
def __init__(self, probability=0.5, s_min=0.02, s_max=0.4, ratio=0.3,
mean=(125.31, 122.96, 113.86)):
super(RandomErasing, self).__init__()
self.probability = probability
self.mean = mean
self.s_min = s_min
self.s_max = s_max
self.ratio = ratio
def forward(self, x):
if not isinstance(self.probability, float):
raise TypeError('Got inappropriate size arg')
if not isinstance(self.s_min, float):
raise TypeError('Got inappropriate size arg')
if not isinstance(self.s_max, float):
raise TypeError('Got inappropriate size arg')
if not isinstance(self.ratio, float):
raise TypeError('Got inappropriate size arg')
if not isinstance(self.mean, (int, tuple)):
raise TypeError('Got inappropriate size arg')
if random.uniform(0, 1) > self.probability:
return x
width, height, _ = x.shape
area = width * height
target_area = random.uniform(self.s_min, self.s_max) * area
aspect_ratio = random.uniform(self.ratio, 1/self.ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w < width and h < height:
x1 = random.randint(0, width - w)
y1 = random.randint(0, height - h)
x[x1:x1+w, y1:y1+h, 0] = self.mean[0]
x[x1:x1+w, y1:y1+h, 1] = self.mean[1]
x[x1:x1+w, y1:y1+h, 2] = self.mean[2]
return x
| [
"random.uniform",
"math.sqrt",
"random.randint",
"mxnet.image.random_crop"
] | [((4465, 4507), 'random.uniform', 'random.uniform', (['self.ratio', '(1 / self.ratio)'], {}), '(self.ratio, 1 / self.ratio)\n', (4479, 4507), False, 'import random\n'), ((4246, 4266), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (4260, 4266), False, 'import random\n'), ((4396, 4434), 'random.uniform', 'random.uniform', (['self.s_min', 'self.s_max'], {}), '(self.s_min, self.s_max)\n', (4410, 4434), False, 'import random\n'), ((4684, 4712), 'random.randint', 'random.randint', (['(0)', '(width - w)'], {}), '(0, width - w)\n', (4698, 4712), False, 'import random\n'), ((4730, 4759), 'random.randint', 'random.randint', (['(0)', '(height - h)'], {}), '(0, height - h)\n', (4744, 4759), False, 'import random\n'), ((2633, 2666), 'mxnet.image.random_crop', 'image.random_crop', (['x', '*self._args'], {}), '(x, *self._args)\n', (2650, 2666), False, 'from mxnet import image, nd\n'), ((4528, 4565), 'math.sqrt', 'math.sqrt', (['(target_area * aspect_ratio)'], {}), '(target_area * aspect_ratio)\n', (4537, 4565), False, 'import math\n'), ((4590, 4627), 'math.sqrt', 'math.sqrt', (['(target_area / aspect_ratio)'], {}), '(target_area / aspect_ratio)\n', (4599, 4627), False, 'import math\n')] |
import plotly.graph_objects as go
import streamlit as st
import pandas as pd
from utils import *
import glob
import wfdb
import os
ANNOTATIONS_COL_NAME = 'annotations'
'''
# MIT-BIH Arrhythmia DB Exploration
'''
record_ids = [os.path.basename(file)[:-4] for file in glob.glob('data/*.dat')]
if len(record_ids) == 0:
st.write('Warning ! No data could be found under the ./data/ directory.',
'*\*.dat*, *\*.hea*, *\*.atr* files and such should be placed ',
'immediately under the ./data/ directory')
else:
record_ids.sort()
record_id = st.selectbox('Select a record id', record_ids)
record = wfdb.rdrecord(f'data/{record_id}')
annotation = wfdb.rdann(f'data/{record_id}', 'atr')
st.write('Signals found in this record :')
for idx, signal in enumerate(record.sig_name):
st.write(f'- `{signal}` : in {record.units[idx]}, with a frequency of '
f'{record.fs * record.samps_per_frame[idx]}hz')
st.write(f'Comments for this record : {record.comments}')
signals_df = pd.DataFrame(record.p_signal, columns=record.sig_name)
annot_serie = pd.Series(annotation.symbol, index=annotation.sample,
name=ANNOTATIONS_COL_NAME)
full_df = pd.concat([signals_df, annot_serie], axis=1)
''' ## Annotations '''
beat_annot_count = annot_serie.isin(dict(beat_annotations)).sum()
non_beat_annot_count = annot_serie.isin(dict(non_beat_annotations)).sum()
unique_annot = annot_serie.value_counts().index.values
st.write(f'This record contains `{annot_serie.size}` annotations '
f'among which `{beat_annot_count}` beat annotations and '
f'`{non_beat_annot_count}` non beat annotation(s).')
st.write('The annotations are the followings :')
for annot in unique_annot:
st.write(f'- `{annot}` : {annotation_definitions[annot]}')
st.write('More explanations on the annotations are available here : '
'https://archive.physionet.org/physiobank/annotations.shtml')
# Plot counts for each annotation
annot_counts_df = annot_serie \
.value_counts() \
.rename_axis(ANNOTATIONS_COL_NAME) \
.reset_index(name='counts')
bar_fig = go.Figure(data=[go.Bar(x=annot_counts_df[ANNOTATIONS_COL_NAME],
y=annot_counts_df['counts'],
text=annot_counts_df['counts'],
textposition='auto'
)])
bar_fig.update_layout(title='Annotations by count', yaxis_title='counts',
xaxis_title='annotations')
st.write(bar_fig)
''' ## Explore full dataset '''
signal = st.selectbox('Select a signal', record.sig_name)
# Plot signals and annotations
matching_rows_by_annot = {}
for annot in unique_annot:
matching_rows_by_annot[annot] = full_df[ANNOTATIONS_COL_NAME] == annot
fig = go.Figure(layout=go.Layout(title=go.layout.Title(
text='{} signal with annotations'.format(signal))))
fig.add_trace(go.Scatter(x=full_df.index.values,
y=full_df[signal],
mode='lines',
name=signal))
for annot, annot_matching_rows in matching_rows_by_annot.items():
fig.add_trace(go.Scatter(x=full_df.index[annot_matching_rows].values,
y=full_df[annot_matching_rows][signal].values,
mode='markers',
name='{} (annot)'.format(annot)))
st.plotly_chart(fig)
| [
"pandas.Series",
"plotly.graph_objects.Bar",
"streamlit.write",
"wfdb.rdann",
"plotly.graph_objects.Scatter",
"wfdb.rdrecord",
"os.path.basename",
"streamlit.selectbox",
"pandas.DataFrame",
"streamlit.plotly_chart",
"pandas.concat",
"glob.glob"
] | [((324, 516), 'streamlit.write', 'st.write', (['"""Warning ! No data could be found under the ./data/ directory."""', '"""*\\\\*.dat*, *\\\\*.hea*, *\\\\*.atr* files and such should be placed """', '"""immediately under the ./data/ directory"""'], {}), "('Warning ! No data could be found under the ./data/ directory.',\n '*\\\\*.dat*, *\\\\*.hea*, *\\\\*.atr* files and such should be placed ',\n 'immediately under the ./data/ directory')\n", (332, 516), True, 'import streamlit as st\n'), ((576, 622), 'streamlit.selectbox', 'st.selectbox', (['"""Select a record id"""', 'record_ids'], {}), "('Select a record id', record_ids)\n", (588, 622), True, 'import streamlit as st\n'), ((636, 670), 'wfdb.rdrecord', 'wfdb.rdrecord', (['f"""data/{record_id}"""'], {}), "(f'data/{record_id}')\n", (649, 670), False, 'import wfdb\n'), ((688, 726), 'wfdb.rdann', 'wfdb.rdann', (['f"""data/{record_id}"""', '"""atr"""'], {}), "(f'data/{record_id}', 'atr')\n", (698, 726), False, 'import wfdb\n'), ((731, 773), 'streamlit.write', 'st.write', (['"""Signals found in this record :"""'], {}), "('Signals found in this record :')\n", (739, 773), True, 'import streamlit as st\n'), ((974, 1031), 'streamlit.write', 'st.write', (['f"""Comments for this record : {record.comments}"""'], {}), "(f'Comments for this record : {record.comments}')\n", (982, 1031), True, 'import streamlit as st\n'), ((1049, 1103), 'pandas.DataFrame', 'pd.DataFrame', (['record.p_signal'], {'columns': 'record.sig_name'}), '(record.p_signal, columns=record.sig_name)\n', (1061, 1103), True, 'import pandas as pd\n'), ((1122, 1207), 'pandas.Series', 'pd.Series', (['annotation.symbol'], {'index': 'annotation.sample', 'name': 'ANNOTATIONS_COL_NAME'}), '(annotation.symbol, index=annotation.sample, name=ANNOTATIONS_COL_NAME\n )\n', (1131, 1207), True, 'import pandas as pd\n'), ((1245, 1289), 'pandas.concat', 'pd.concat', (['[signals_df, annot_serie]'], {'axis': '(1)'}), '([signals_df, annot_serie], axis=1)\n', (1254, 1289), True, 'import pandas as pd\n'), ((1529, 1708), 'streamlit.write', 'st.write', (['f"""This record contains `{annot_serie.size}` annotations among which `{beat_annot_count}` beat annotations and `{non_beat_annot_count}` non beat annotation(s)."""'], {}), "(\n f'This record contains `{annot_serie.size}` annotations among which `{beat_annot_count}` beat annotations and `{non_beat_annot_count}` non beat annotation(s).'\n )\n", (1537, 1708), True, 'import streamlit as st\n'), ((1737, 1785), 'streamlit.write', 'st.write', (['"""The annotations are the followings :"""'], {}), "('The annotations are the followings :')\n", (1745, 1785), True, 'import streamlit as st\n'), ((1888, 2026), 'streamlit.write', 'st.write', (['"""More explanations on the annotations are available here : https://archive.physionet.org/physiobank/annotations.shtml"""'], {}), "(\n 'More explanations on the annotations are available here : https://archive.physionet.org/physiobank/annotations.shtml'\n )\n", (1896, 2026), True, 'import streamlit as st\n'), ((2661, 2678), 'streamlit.write', 'st.write', (['bar_fig'], {}), '(bar_fig)\n', (2669, 2678), True, 'import streamlit as st\n'), ((2729, 2777), 'streamlit.selectbox', 'st.selectbox', (['"""Select a signal"""', 'record.sig_name'], {}), "('Select a signal', record.sig_name)\n", (2741, 2777), True, 'import streamlit as st\n'), ((3610, 3630), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig'], {}), '(fig)\n', (3625, 3630), True, 'import streamlit as st\n'), ((229, 251), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (245, 251), False, 'import os\n'), ((269, 292), 'glob.glob', 'glob.glob', (['"""data/*.dat"""'], {}), "('data/*.dat')\n", (278, 292), False, 'import glob\n'), ((833, 958), 'streamlit.write', 'st.write', (['f"""- `{signal}` : in {record.units[idx]}, with a frequency of {record.fs * record.samps_per_frame[idx]}hz"""'], {}), "(\n f'- `{signal}` : in {record.units[idx]}, with a frequency of {record.fs * record.samps_per_frame[idx]}hz'\n )\n", (841, 958), True, 'import streamlit as st\n'), ((1825, 1883), 'streamlit.write', 'st.write', (['f"""- `{annot}` : {annotation_definitions[annot]}"""'], {}), "(f'- `{annot}` : {annotation_definitions[annot]}')\n", (1833, 1883), True, 'import streamlit as st\n'), ((3093, 3178), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'full_df.index.values', 'y': 'full_df[signal]', 'mode': '"""lines"""', 'name': 'signal'}), "(x=full_df.index.values, y=full_df[signal], mode='lines', name=signal\n )\n", (3103, 3178), True, 'import plotly.graph_objects as go\n'), ((2245, 2378), 'plotly.graph_objects.Bar', 'go.Bar', ([], {'x': 'annot_counts_df[ANNOTATIONS_COL_NAME]', 'y': "annot_counts_df['counts']", 'text': "annot_counts_df['counts']", 'textposition': '"""auto"""'}), "(x=annot_counts_df[ANNOTATIONS_COL_NAME], y=annot_counts_df['counts'],\n text=annot_counts_df['counts'], textposition='auto')\n", (2251, 2378), True, 'import plotly.graph_objects as go\n')] |
import pandas as pd
import ete2
from ete2 import faces, Tree, AttrFace, TreeStyle
import pylab
from matplotlib.colors import hex2color, rgb2hex, hsv_to_rgb, rgb_to_hsv
kelly_colors_hex = [
0xFFB300, # Vivid Yellow
0x803E75, # Strong Purple
0xFF6800, # Vivid Orange
0xA6BDD7, # Very Light Blue
0xC10020, # Vivid Red
0xCEA262, # Grayish Yellow
0x817066, # Medium Gray
# The following don't work well for people with defective color vision
0x007D34, # Vivid Green
0xF6768E, # Strong Purplish Pink
0x00538A, # Strong Blue
0xFF7A5C, # Strong Yellowish Pink
0x53377A, # Strong Violet
0xFF8E00, # Vivid Orange Yellow
0xB32851, # Strong Purplish Red
0xF4C800, # Vivid Greenish Yellow
0x7F180D, # Strong Reddish Brown
0x93AA00, # Vivid Yellowish Green
0x593315, # Deep Yellowish Brown
0xF13A13, # Vivid Reddish Orange
0x232C16, # Dark Olive Green
]
def my_layout(node):
if node.is_leaf():
# If terminal node, draws its name
name_face = AttrFace("name")
else:
# If internal node, draws label with smaller font size
name_face = AttrFace("name", fsize=10)
# Adds the name face to the image at the preferred position
faces.add_face_to_node(name_face, node, column=0, position="branch-right")
def adjust_kelly_brightness(hex_color, val, recon_min, recon_max):
"""set brightness according to change in continuous reconstruction value"""
h, s, v = rgb_to_hsv(hex2color('#{0:06X}'.format(hex_color)))
scale_factor = 1 - (recon_max - val) / (recon_max - recon_min)
v_new = v - (v * (scale_factor))
return rgb2hex(hsv_to_rgb(pd.np.array([h, s, v_new])))
def get_style():
ts = TreeStyle()
# Do not add leaf names automatically
ts.show_leaf_name = False
ts.show_scale = True
ts.force_topology = False
# Use my custom layout
ts.layout_fn = my_layout
return ts
def plot_tree(pt_tree, target_node, out):
#pt_tree, feats, pf2color = get_tree(phenotype = phenotype, feat_list = "top_cor", is_ml_plus_phypat = True, target_node = target_node)
pt_tree.dist = 0
target = pt_tree.search_nodes(name = target_node)[0]
target.render(out + '_tree.pdf', tree_style = get_style())
#target.render(out + '_tree.png', tree_style = get_style())
return target, feats, pf2color
def plot_legend(feats, out, pf2color, pf_desc = False, pf_acc = True, include_class = False):
fig = pylab.figure()
figlegend = pylab.figure(figsize = (9, 6))
ax = fig.add_subplot(111)
x = [0,1]
lines = [ax.plot(x, pd.np.ones(len(x)), 'o', color = "#%06x" % (pf2color[feats.index[i]]))[0] for i in range(len(pf2color))]
labels= [i for i in feats.index]
#labels= ["%s" %(feats.loc[:,"Pfam_acc"].iloc[i]) for i in range(feats.shape[0])]
#if include_class:
# labels= ["%s %s" %(labels[i], feats.loc[:, "class"].iloc[i]) for i in range(len(labels))]
#if pf_desc:
# labels = ["%s %s" % (labels[i], pf2short_desc.loc[feats.loc[:,"Pfam_acc"].iloc[i], 1]) for i in range(len(labels))]
#if pf_acc:
# labels = ["%s %s" % (labels[i], pf2acc.loc[feats.loc[:,"Pfam_acc"].iloc[i], 1]) for i in range(len(labels))]
figlegend.legend(lines, labels, markerscale = 2.5, numpoints = 1, frameon = False)
#fig.show()
fig.tight_layout()
figlegend.savefig(out + "_legend.svg")
figlegend.savefig(out + "_legend.png")
return figlegend
def get_tree(phenotype, tree, gain_recon, loss_recon, node_recon, pfam_mapping, feat_list, sample_mapping, threshold = 0.5, target_node = None, are_continuous_features_with_discrete_phenotype = False, max_feats = 10, miscl = None, node_annotation = None):
#read target feats
feats = pd.read_csv(feat_list, index_col = 0, sep = "\t")
pt_tree = ete2.Tree(tree, format = 1)
pt_tree.ladderize()
if not node_annotation is None:
node_table = pd.read_csv(node_annotation, sep = "\t", index_col = 0)
sample_mapping = pd.read_csv(sample_mapping, index_col = 0, sep = "\t")
#read node and edge reconstruction matrices
node_recon = pd.read_csv(node_recon, sep = "\t", index_col = 0)
gain_recon = pd.read_csv(gain_recon, sep = "\t", index_col = 0)
gain_recon.index = ["_".join(("_".join(i.split("_")[:-1]), i.split("_")[-1])) for i in gain_recon.index.values]
loss_recon = pd.read_csv(loss_recon, sep = "\t", index_col = 0)
loss_recon.index = ["_".join(("_".join(i.split("_")[:-1]), i.split("_")[-1])) for i in loss_recon.index.values]
#prune to target node
if target_node is not None:
pt_tree = pt_tree.search_nodes(name = target_node)[0]
node2name = dict((i.name, i.name) for i in pt_tree.traverse(strategy = 'preorder'))
pfams_with_event = set()
pfam2color = {}
#set the style of the branches and nodes according to the posterior probability
top10_feats = feats.iloc[:max_feats,]
#for visualization of continuous feature get the range of values for each feature
if are_continuous_features_with_discrete_phenotype:
recon_min = gain_recon.abs().apply(pd.np.min)
recon_max = gain_recon.abs().apply(pd.np.max)
if not miscl is None:
miscl_m = pd.read_csv(miscl, sep = "\t", index_col = 0)
for n in pt_tree.traverse():
#ignore the root
if n.name == "N1":
continue
if not node_annotation is None:
if n.name in node_table.index:
for attr,i in zip(node_table.columns, range(len(node_table.columns))):
value = node_table.loc[n.name, attr]
if not pd.isnull(value):
if value == 0:
rf = ete2.CircleFace(radius = 8, style = "circle", color = 'red')
elif value == 2:
rf = faces.CircleFace(radius = 8, style = "circle", color = 'orange')
else:
rf = faces.CircleFace(radius = 8, style = "circle", color = 'green')
else:
rf = faces.CircleFace(radius = 8, style = "circle", color = 'grey')
n.add_face(rf, column = i, position = "aligned")
ns = node_recon.loc[n.name, phenotype]
style = ete2.NodeStyle()
style["shape"] = 'square'
style['size'] = 10
if pd.isnull(ns):
style['fgcolor'] = 'grey'
elif ns < threshold:
style['fgcolor'] = 'darkred'
else:
style['fgcolor'] = 'green'
if not n.name == "N1":
branch_id = n.name + "_" + n.up.name
if gain_recon.loc[branch_id, phenotype] > threshold:
style["hz_line_type"] = 1
style["hz_line_color"] = 'green'
style["hz_line_width"] = 3
elif loss_recon.loc[branch_id, phenotype] > threshold:
style["hz_line_type"] = 1
style["hz_line_color"] = 'red'
style["hz_line_width"] = 3
else:
style["hz_line_type"] = 0
style["hz_line_color"] = 'black'
n.set_style(style)
#check if sample was misclassified and add misclassified label
if not miscl is None:
if node2name[n.name] in miscl_m.index:
tf = faces.TextFace("misclassified")
n.add_face(tf, column = 0, position = "branch-right")
#set species name instead of tax id
if n.name in sample_mapping.index:
node2name[n.name] = sample_mapping.loc[n.name,][0]
#add majority feature gains and losses
events = []
for i in range(top10_feats.shape[0]):
if not are_continuous_features_with_discrete_phenotype:
cf = faces.CircleFace(radius = 8, style = "circle", color = kelly_colors_hex[i])
#gain events
if gain_recon.loc[branch_id, top10_feats.index[i]] > threshold:
pfam2color[top10_feats.index[i]] = kelly_colors_hex[i]
tf = faces.TextFace("-")
events.append(tf)
pfams_with_event.add(node_recon.index[i])
events.append(cf)
#loss events
elif loss_recon.loc[branch_id, top10_feats.index[i]] > threshold:
pfam2color[top10_feats.index[i]] = kelly_colors_hex[i]
tf = faces.TextFace("-")
events.append(tf)
pfams_with_event.add(node_recon.index[i])
events.append(cf)
#continuous features
else:
adjusted_color = adjust_kelly_brightness(kelly_colors_hex[i], abs(loss_recon.loc[branch_id, top10_feats.index[i]]), recon_min.loc[top10_feats.index[i]], recon_max.loc[top10_feats.index[i]])
#tf = faces.TextFace(gain_recon.loc[branch_id, top10_feats.index[i]])
if loss_recon.loc[branch_id, top10_feats.index[i]] < 0:
tf = faces.TextFace("-")
else:
tf = faces.TextFace("+")
cf = faces.CircleFace(radius = 8, style = "circle", color = adjusted_color)
pfam2color[top10_feats.index[i]] = kelly_colors_hex[i]
pfams_with_event.add(node_recon.index[i])
events.append(cf)
events.append(tf)
for i in range(len(events)):
n.add_face(events[i], column = i, position = "branch-top")
for n in pt_tree.traverse():
if n.name in node2name:
n.name = node2name[n.name]
#filtered_pfams = filter(lambda i: i in list(pfams_with_event), top10_feats.loc[:,"Pfam_acc"].values)
#print filtered_pfams
#filtered_ids = pt_gt2id.loc[filtered_pfams, 0] - 1
#print filtered_ids
#top10_feats_with_event = top10_feats.loc[filtered_ids,]
#process node annotation
return pt_tree, top10_feats, pfam2color
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("""visualize target list of features""")
parser.add_argument("node_recon", help = "node ancestral character state reconstruction")
parser.add_argument("gain_recon", help = "gain events ancestral character state reconstruction")
parser.add_argument("loss_recon", help = "loss events ancestral character state reconstruction")
parser.add_argument("tree", help = "tree with internal nodes labeled")
parser.add_argument("pfam_mapping", help = "feature mapping/list")
parser.add_argument("feat_list", help = "list of features")
parser.add_argument("--target_node", default = "N1", help = "list of features")
parser.add_argument("phenotype", help = "target phenotype")
parser.add_argument("--are_continuous_features_with_discrete_phenotype", action = 'store_true', help = "set if using continuous features with a discrete phenotype")
parser.add_argument("threshold", type = float, help = "threshold to call genotype/phenotype events")
parser.add_argument("sample_mapping", help = "mapping between sample ids and names")
parser.add_argument("out", help = "output file")
parser.add_argument("--max_feats", type = int, default = 10, help = "visualize at most max_feats features")
parser.add_argument("--miscl", help = "table of misclassified samples")
parser.add_argument("--node_annotation", help = "table of binary features for labeling the nodes")
a = parser.parse_args()
pt_tree, feats, pf2color = get_tree(node_recon = a.node_recon, gain_recon = a.gain_recon, loss_recon = a.loss_recon, pfam_mapping = a.pfam_mapping, tree = a.tree, feat_list = a.feat_list, phenotype = a.phenotype, target_node = a.target_node, threshold = a.threshold, sample_mapping = a.sample_mapping, are_continuous_features_with_discrete_phenotype = a.are_continuous_features_with_discrete_phenotype, max_feats = a.max_feats, miscl = a.miscl, node_annotation = a.node_annotation)
plot_tree(pt_tree, a.target_node, a.out)
plot_legend(feats, a.out, pf2color)
| [
"ete2.Tree",
"pandas.isnull",
"ete2.faces.CircleFace",
"ete2.CircleFace",
"pandas.read_csv",
"argparse.ArgumentParser",
"ete2.TreeStyle",
"ete2.faces.TextFace",
"pylab.figure",
"ete2.faces.add_face_to_node",
"pandas.np.array",
"ete2.AttrFace",
"ete2.NodeStyle"
] | [((1247, 1321), 'ete2.faces.add_face_to_node', 'faces.add_face_to_node', (['name_face', 'node'], {'column': '(0)', 'position': '"""branch-right"""'}), "(name_face, node, column=0, position='branch-right')\n", (1269, 1321), False, 'from ete2 import faces, Tree, AttrFace, TreeStyle\n'), ((1732, 1743), 'ete2.TreeStyle', 'TreeStyle', ([], {}), '()\n', (1741, 1743), False, 'from ete2 import faces, Tree, AttrFace, TreeStyle\n'), ((2473, 2487), 'pylab.figure', 'pylab.figure', ([], {}), '()\n', (2485, 2487), False, 'import pylab\n'), ((2504, 2532), 'pylab.figure', 'pylab.figure', ([], {'figsize': '(9, 6)'}), '(figsize=(9, 6))\n', (2516, 2532), False, 'import pylab\n'), ((3755, 3800), 'pandas.read_csv', 'pd.read_csv', (['feat_list'], {'index_col': '(0)', 'sep': '"""\t"""'}), "(feat_list, index_col=0, sep='\\t')\n", (3766, 3800), True, 'import pandas as pd\n'), ((3819, 3844), 'ete2.Tree', 'ete2.Tree', (['tree'], {'format': '(1)'}), '(tree, format=1)\n', (3828, 3844), False, 'import ete2\n'), ((4005, 4055), 'pandas.read_csv', 'pd.read_csv', (['sample_mapping'], {'index_col': '(0)', 'sep': '"""\t"""'}), "(sample_mapping, index_col=0, sep='\\t')\n", (4016, 4055), True, 'import pandas as pd\n'), ((4125, 4171), 'pandas.read_csv', 'pd.read_csv', (['node_recon'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(node_recon, sep='\\t', index_col=0)\n", (4136, 4171), True, 'import pandas as pd\n'), ((4193, 4239), 'pandas.read_csv', 'pd.read_csv', (['gain_recon'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(gain_recon, sep='\\t', index_col=0)\n", (4204, 4239), True, 'import pandas as pd\n'), ((4377, 4423), 'pandas.read_csv', 'pd.read_csv', (['loss_recon'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(loss_recon, sep='\\t', index_col=0)\n", (4388, 4423), True, 'import pandas as pd\n'), ((10225, 10285), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""visualize target list of features"""'], {}), "('visualize target list of features')\n", (10248, 10285), False, 'import argparse\n'), ((1040, 1056), 'ete2.AttrFace', 'AttrFace', (['"""name"""'], {}), "('name')\n", (1048, 1056), False, 'from ete2 import faces, Tree, AttrFace, TreeStyle\n'), ((1152, 1178), 'ete2.AttrFace', 'AttrFace', (['"""name"""'], {'fsize': '(10)'}), "('name', fsize=10)\n", (1160, 1178), False, 'from ete2 import faces, Tree, AttrFace, TreeStyle\n'), ((3928, 3979), 'pandas.read_csv', 'pd.read_csv', (['node_annotation'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(node_annotation, sep='\\t', index_col=0)\n", (3939, 3979), True, 'import pandas as pd\n'), ((5221, 5262), 'pandas.read_csv', 'pd.read_csv', (['miscl'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(miscl, sep='\\t', index_col=0)\n", (5232, 5262), True, 'import pandas as pd\n'), ((6297, 6313), 'ete2.NodeStyle', 'ete2.NodeStyle', ([], {}), '()\n', (6311, 6313), False, 'import ete2\n'), ((6386, 6399), 'pandas.isnull', 'pd.isnull', (['ns'], {}), '(ns)\n', (6395, 6399), True, 'import pandas as pd\n'), ((1676, 1702), 'pandas.np.array', 'pd.np.array', (['[h, s, v_new]'], {}), '([h, s, v_new])\n', (1687, 1702), True, 'import pandas as pd\n'), ((7370, 7401), 'ete2.faces.TextFace', 'faces.TextFace', (['"""misclassified"""'], {}), "('misclassified')\n", (7384, 7401), False, 'from ete2 import faces, Tree, AttrFace, TreeStyle\n'), ((7861, 7930), 'ete2.faces.CircleFace', 'faces.CircleFace', ([], {'radius': '(8)', 'style': '"""circle"""', 'color': 'kelly_colors_hex[i]'}), "(radius=8, style='circle', color=kelly_colors_hex[i])\n", (7877, 7930), False, 'from ete2 import faces, Tree, AttrFace, TreeStyle\n'), ((9314, 9378), 'ete2.faces.CircleFace', 'faces.CircleFace', ([], {'radius': '(8)', 'style': '"""circle"""', 'color': 'adjusted_color'}), "(radius=8, style='circle', color=adjusted_color)\n", (9330, 9378), False, 'from ete2 import faces, Tree, AttrFace, TreeStyle\n'), ((5628, 5644), 'pandas.isnull', 'pd.isnull', (['value'], {}), '(value)\n', (5637, 5644), True, 'import pandas as pd\n'), ((6100, 6156), 'ete2.faces.CircleFace', 'faces.CircleFace', ([], {'radius': '(8)', 'style': '"""circle"""', 'color': '"""grey"""'}), "(radius=8, style='circle', color='grey')\n", (6116, 6156), False, 'from ete2 import faces, Tree, AttrFace, TreeStyle\n'), ((8162, 8181), 'ete2.faces.TextFace', 'faces.TextFace', (['"""-"""'], {}), "('-')\n", (8176, 8181), False, 'from ete2 import faces, Tree, AttrFace, TreeStyle\n'), ((9194, 9213), 'ete2.faces.TextFace', 'faces.TextFace', (['"""-"""'], {}), "('-')\n", (9208, 9213), False, 'from ete2 import faces, Tree, AttrFace, TreeStyle\n'), ((9269, 9288), 'ete2.faces.TextFace', 'faces.TextFace', (['"""+"""'], {}), "('+')\n", (9283, 9288), False, 'from ete2 import faces, Tree, AttrFace, TreeStyle\n'), ((5718, 5772), 'ete2.CircleFace', 'ete2.CircleFace', ([], {'radius': '(8)', 'style': '"""circle"""', 'color': '"""red"""'}), "(radius=8, style='circle', color='red')\n", (5733, 5772), False, 'import ete2\n'), ((8559, 8578), 'ete2.faces.TextFace', 'faces.TextFace', (['"""-"""'], {}), "('-')\n", (8573, 8578), False, 'from ete2 import faces, Tree, AttrFace, TreeStyle\n'), ((5853, 5911), 'ete2.faces.CircleFace', 'faces.CircleFace', ([], {'radius': '(8)', 'style': '"""circle"""', 'color': '"""orange"""'}), "(radius=8, style='circle', color='orange')\n", (5869, 5911), False, 'from ete2 import faces, Tree, AttrFace, TreeStyle\n'), ((5981, 6038), 'ete2.faces.CircleFace', 'faces.CircleFace', ([], {'radius': '(8)', 'style': '"""circle"""', 'color': '"""green"""'}), "(radius=8, style='circle', color='green')\n", (5997, 6038), False, 'from ete2 import faces, Tree, AttrFace, TreeStyle\n')] |
import attr
from firedrake import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from scipy.linalg import svd
from scipy.sparse.linalg import svds
from scipy.sparse import csr_matrix
from slepc4py import SLEPc
import pandas as pd
from tqdm import tqdm
import os
matplotlib.use('Agg')
@attr.s
class ConditionNumberResult(object):
form = attr.ib()
assembled_form = attr.ib()
condition_number = attr.ib()
sparse_operator = attr.ib()
number_of_dofs = attr.ib()
nnz = attr.ib()
is_operator_symmetric = attr.ib()
bcs = attr.ib(default=list())
def plot_matrix(assembled_form, **kwargs):
"""Provides a plot of a matrix."""
fig, ax = plt.subplots(1, 1)
petsc_mat = assembled_form.M.handle
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
return plot
def plot_matrix_mixed(assembled_form, **kwargs):
"""Provides a plot of a mixed matrix."""
fig, ax = plt.subplots(1, 1)
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.getSize()
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
return plot
def plot_matrix_primal_hybrid_full(a_form, bcs=[], **kwargs):
"""Provides a plot of a full hybrid-mixed matrix."""
fig, ax = plt.subplots(1, 1)
assembled_form = assemble(a_form, bcs=bcs, mat_type="aij")
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.getSize()
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
return plot
def plot_matrix_mixed_hybrid_full(a_form, bcs=[], **kwargs):
"""Provides a plot of a full hybrid-mixed matrix."""
fig, ax = plt.subplots(1, 1)
assembled_form = assemble(a_form, bcs=bcs, mat_type="aij")
petsc_mat = assembled_form.M.handle
f0_size = assembled_form.M[0, 0].handle.getSize()
f1_size = assembled_form.M[1, 1].handle.getSize()
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.axhline(y=f0_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] - 0.5, color="k")
ax.axhline(y=f0_size[0] + f1_size[0] - 0.5, color="k")
ax.axvline(x=f0_size[0] + f1_size[0] - 0.5, color="k")
return plot
def plot_matrix_hybrid_multiplier(a_form, trace_index=2, bcs=[], **kwargs):
"""Provides a plot of a condensed hybrid-mixed matrix for single scale problems."""
fig, ax = plt.subplots(1, 1)
_A = Tensor(a_form)
A = _A.blocks
idx = trace_index
S = A[idx, idx] - A[idx, :idx] * A[:idx, :idx].inv * A[:idx, idx]
Smat = assemble(S, bcs=bcs)
petsc_mat = Smat.M.handle
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
Mnp = Mnp.toarray()
# Eliminate rows and columns filled with zero entries
Mnp = Mnp[~(Mnp==0).all(1)]
idx = np.argwhere(np.all(Mnp[..., :] == 0, axis=0))
Mnp = np.delete(Mnp, idx, axis=1)
Am = np.ma.masked_values(Mnp, 0, rtol=1e-13)
# Plot the matrix
plot = ax.matshow(Am, **kwargs)
# Below there is the spy alternative
# plot = plt.spy(Am, **kwargs)
# Remove axis ticks and values
ax.tick_params(length=0)
ax.set_xticklabels([])
ax.set_yticklabels([])
return plot
def filter_real_part_in_array(array: np.ndarray, imag_threshold: float = 1e-5) -> np.ndarray:
"""Utility function to filter real part in a numpy array.
:param array:
Array with real and complex numbers.
:param imag_threshold:
Threshold to cut off imaginary part in complex number.
:return:
Filtered array with only real numbers.
"""
real_part_array = array.real[abs(array.imag) < 1e-5]
return real_part_array
def calculate_condition_number(
A,
num_of_factors,
backend: str = "scipy",
use_sparse: bool = False,
zero_tol: float = 1e-5
):
backend = backend.lower()
if backend == "scipy":
size = A.getSize()
Mnp = csr_matrix(A.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
if use_sparse:
singular_values = svds(
A=Mnp,
k=num_of_factors,
which="LM",
maxiter=5000,
return_singular_vectors=False,
solver="lobpcg"
)
else:
M = Mnp.toarray()
singular_values = svd(M, compute_uv=False, check_finite=False)
singular_values = singular_values[singular_values > zero_tol]
condition_number = singular_values.max() / singular_values.min()
elif backend == "slepc":
S = SLEPc.SVD()
S.create()
S.setOperator(A)
S.setType(SLEPc.SVD.Type.LAPACK)
S.setDimensions(nsv=num_of_factors)
S.setTolerances(max_it=5000)
S.setWhichSingularTriplets(SLEPc.SVD.Which.LARGEST)
S.solve()
num_converged_values = S.getConverged()
singular_values_list = list()
if num_converged_values > 0:
for i in range(num_converged_values):
singular_value = S.getValue(i)
singular_values_list.append(singular_value)
else:
raise RuntimeError("SLEPc SVD has not converged.")
singular_values = np.array(singular_values_list)
singular_values = singular_values[singular_values > zero_tol]
condition_number = singular_values.max() / singular_values.min()
else:
raise NotImplementedError("The required method for condition number estimation is currently unavailable.")
return condition_number
def solve_poisson_cg(mesh, degree=1, use_quads=False):
# Function space declaration
V = FunctionSpace(mesh, "CG", degree)
# Trial and test functions
u = TrialFunction(V)
v = TestFunction(V)
# Dirichlet BCs
bcs = DirichletBC(V, 0.0, "on_boundary")
# Variational form
a = inner(grad(u), grad(v)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = V.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_ls(mesh, degree=1):
# Function space declaration
pressure_family = 'CG'
velocity_family = 'CG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Stabilization parameters
delta_1 = Constant(1)
delta_2 = Constant(1)
delta_3 = Constant(1)
# Least-squares terms
a = delta_1 * inner(u + grad(p), v + grad(q)) * dx
a += delta_2 * div(u) * div(v) * dx
a += delta_3 * inner(curl(u), curl(v)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_cgls(mesh, degree=1):
# Function space declaration
pressure_family = 'CG'
velocity_family = 'CG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Mixed classical terms
a = (dot(u, v) - div(v) * p - q * div(u)) * dx
# Stabilizing terms
a += -0.5 * inner((u + grad(p)), v + grad(q)) * dx
# a += 0.5 * h * h * div(u) * div(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * div(v) * dx
a += 0.5 * div(u) * div(v) * dx
a += 0.5 * inner(curl(u), curl(v)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_vms(mesh, degree=1):
# Function space declaration
pressure_family = 'CG'
velocity_family = 'CG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Mixed classical terms
a = (dot(u, v) - div(v) * p + q * div(u)) * dx
# Stabilizing terms
a += 0.5 * inner(u + grad(p), grad(q) - v) * dx
# a += 0.5 * h * h * div(u) * div(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * div(v) * dx
# a += 0.5 * div(u) * div(v) * dx
# a += 0.5 * inner(curl(u), curl(v)) * dx
# L += 0.5 * f * div(v) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_mixed_RT(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
if use_quads:
hdiv_family = 'RTCF'
pressure_family = 'DQ'
else:
hdiv_family = 'RT'
pressure_family = 'DG'
U = FunctionSpace(mesh, hdiv_family, degree + 1)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
bcs = DirichletBC(W[0], sigma_e, "on_boundary")
# Mixed classical terms
a = (dot(u, v) - div(v) * p + q * div(u)) * dx
A = assemble(a, bcs=bcs, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_dgls(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
# bcs = DirichletBC(W[0], sigma_e, "on_boundary", method="geometric")
# Average cell size and mesh dependent stabilization
h_avg = (h("+") + h("-")) / 2.0
# Jump stabilizing parameters based on Badia-Codina stabilized dG method
L0 = 1
eta_p = L0 * h # method B in the Badia-Codina paper
# eta_p = 1
# eta_p = L0 * L0 # method D in the Badia-Codina paper
eta_u = h / L0 # method B in the Badia-Codina paper
# eta_u = 1
# Nitsche's penalizing term
beta_0 = Constant(1.0)
beta = beta_0 / h
# Mixed classical terms
a = (dot(u, v) - div(v) * p - q * div(u)) * dx
# DG terms
a += jump(v, n) * avg(p) * dS - avg(q) * jump(u, n) * dS
# Edge stabilizing terms
# ** Badia-Codina based
a += (avg(eta_p) / h_avg) * (jump(u, n) * jump(v, n)) * dS
a += (avg(eta_u) / h_avg) * dot(jump(p, n), jump(q, n)) * dS
# ** Mesh independent terms
# a += jump(u, n) * jump(v, n) * dS
# a += dot(jump(p, n), jump(q, n)) * dS
# Volumetric stabilizing terms
# a += 0.5 * h * h * div(u) * div(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * div(v) * dx
# a += -0.5 * inner(u + grad(p), v + grad(q)) * dx
# a += 0.5 * div(u) * div(v) * dx
# a += 0.5 * inner(curl(u), curl(v)) * dx
# ** Badia-Codina based
a += -eta_u * inner(u + grad(p), v + grad(q)) * dx
a += eta_p * div(u) * div(v) * dx
a += eta_p * inner(curl(u), curl(v)) * dx
# Weakly imposed boundary conditions
a += dot(v, n) * p * ds - q * dot(u, n) * ds
a += beta * p * q * ds # may decrease convergente rates
# ** The terms below are based on ASGS Badia-Codina (2010), it is not a classical Nitsche's method
a += (eta_p / h) * dot(u, n) * dot(v, n) * ds
a += (eta_u / h) * dot(p * n, q * n) * ds
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_dvms(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
# bcs = DirichletBC(W[0], sigma_e, "on_boundary", method="geometric")
# Average cell size and mesh dependent stabilization
h_avg = (h("+") + h("-")) / 2.0
# Jump stabilizing parameters based on Badia-Codina stabilized dG method
L0 = 1
eta_p = L0 * h # method B in the Badia-Codina paper
# eta_p = L0 * L0 # method D in the Badia-Codina paper
eta_u = h / L0 # method B in the Badia-Codina paper
# Mixed classical terms
a = (dot(u, v) - div(v) * p + q * div(u)) * dx
# DG terms
a += jump(v, n) * avg(p) * dS - avg(q) * jump(u, n) * dS
# Edge stabilizing terms
# ** Badia-Codina based
a += (avg(eta_p) / h_avg) * (jump(u, n) * jump(v, n)) * dS
a += (avg(eta_u) / h_avg) * dot(jump(p, n), jump(q, n)) * dS
# ** Mesh independent (original)
# a += jump(u, n) * jump(v, n) * dS # not considered in the original paper
# a += dot(jump(p, n), jump(q, n)) * dS
# Volumetric stabilizing terms
# a += 0.5 * inner(u + grad(p), grad(q) - v) * dx
# a += 0.5 * h * h * div(u) * div(v) * dx
# a += 0.5 * h * h * inner(curl(u), curl(v)) * dx
# L += 0.5 * h * h * f * div(v) * dx
# a += 0.5 * div(u) * div(v) * dx
# a += 0.5 * inner(curl(u), curl(v)) * dx
# L += 0.5 * f * div(v) * dx
# ** Badia-Codina based
a += eta_u * inner(u + grad(p), grad(q) - v) * dx
a += eta_p * div(u) * div(v) * dx
# Weakly imposed boundary conditions
a += dot(v, n) * p * ds - q * dot(u, n) * ds
# ** The terms below are based on ASGS Badia-Codina (2010), it is not a classical Nitsche's method
a += (eta_p / h) * dot(u, n) * dot(v, n) * ds
a += (eta_u / h) * dot(p * n, q * n) * ds # may decrease convergente rates
# ** Classical Nitsche
# a += beta * p * q * ds # may decrease convergente rates (Nitsche)
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_sipg(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
V = FunctionSpace(mesh, pressure_family, degree)
# Trial and test functions
p = TrialFunction(V)
q = TestFunction(V)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Edge stabilizing parameter
beta0 = Constant(1e1)
beta = beta0 / h
# Symmetry term. Choose if the method is SIPG (-1) or NIPG (1)
s = Constant(-1)
# Classical volumetric terms
a = inner(grad(p), grad(q)) * dx
L = f * q * dx
# DG edge terms
a += s * dot(jump(p, n), avg(grad(q))) * dS - dot(avg(grad(p)), jump(q, n)) * dS
# Edge stabilizing terms
a += beta("+") * dot(jump(p, n), jump(q, n)) * dS
# Weak boundary conditions
a += s * dot(p * n, grad(q)) * ds - dot(grad(p), q * n) * ds
a += beta * p * q * ds
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = V.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_dls(mesh, degree=1):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
W = U * V
# Trial and test functions
u, p = TrialFunctions(W)
v, q = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Dirichlet BCs
# bcs = DirichletBC(W[0], sigma_e, "on_boundary", method="geometric")
# Average cell size and mesh dependent stabilization
h_avg = (h("+") + h("-")) / 2.0
# Jump stabilizing parameters based on Badia-Codina stabilized dG method
# L0 = 1
# eta_p = L0 * h_avg # method B in the Badia-Codina paper
eta_p = 1
# eta_p = L0 * L0 # method D in the Badia-Codina paper
# eta_u = h_avg / L0 # method B in the Badia-Codina paper
eta_u = 1
# eta_u_bc = h / L0 # method B in the Badia-Codina paper
eta_u_bc = 1
# Least-Squares weights
delta = Constant(1.0)
# delta = h
delta_0 = delta
delta_1 = delta
delta_2 = delta
delta_3 = 1 / h
delta_4 = 1 / h
# Least-squares terms
a = delta_0 * inner(u + grad(p), v + grad(q)) * dx
a += delta_1 * div(u) * div(v) * dx
a += delta_2 * inner(curl(u), curl(v)) * dx
# Edge stabilizing terms
# ** Badia-Codina based (better results) **
a += eta_u * avg(delta_3) * (jump(u, n) * jump(v, n)) * dS
a += eta_p * avg(delta_4) * dot(jump(p, n), jump(q, n)) * dS
a += eta_u_bc * delta_3 * p * q * ds # may decrease convergente rates
a += eta_u_bc * delta_4 * dot(u, n) * dot(v, n) * ds
# ** Mesh independent **
# a += jump(u, n) * jump(v, n) * dS
# a += dot(jump(p, n), jump(q, n)) * dS
# a += p * q * ds
A = assemble(a, mat_type="aij")
petsc_mat = A.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-12)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = W.dim()
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=A,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric
)
return result
def solve_poisson_sdhm(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
trace_family = "HDiv Trace"
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
T = FunctionSpace(mesh, trace_family, degree)
W = U * V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
u, p, lambda_h = TrialFunctions(W)
v, q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# BCs
u_projected = sigma_e
p_boundaries = p_exact
bcs = DirichletBC(W.sub(2), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0e-18)
# beta = beta_0 / h
beta = beta_0
# Stabilization parameters
delta_0 = Constant(-1)
delta_1 = Constant(-0.5) * h * h
delta_2 = Constant(0.5) * h * h
delta_3 = Constant(0.5) * h * h
# Mixed classical terms
a = (dot(u, v) - div(v) * p + delta_0 * q * div(u)) * dx
L = delta_0 * f * q * dx
# Stabilizing terms
a += delta_1 * inner(u + grad(p), v + grad(q)) * dx
a += delta_2 * div(u) * div(v) * dx
a += delta_3 * inner(curl(u), curl(v)) * dx
L += delta_2 * f * div(v) * dx
# Hybridization terms
a += lambda_h("+") * dot(v, n)("+") * dS + mu_h("+") * dot(u, n)("+") * dS
a += beta("+") * (lambda_h("+") - p("+")) * (mu_h("+") - q("+")) * dS
# Weakly imposed BC
a += (p_boundaries * dot(v, n) + mu_h * (dot(u, n) - dot(u_projected, n))) * ds
a += beta * (lambda_h - p_boundaries) * mu_h * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[2, 2] - A[2, :2] * A[:2, :2].inv * A[:2, 2]
Smat = assemble(S, bcs=bcs)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bcs
)
return result
def solve_poisson_hdg(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
trace_family = "HDiv Trace"
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
T = FunctionSpace(mesh, trace_family, degree)
W = U * V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
u, p, lambda_h = TrialFunctions(W)
v, q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Dirichlet BCs
bc_multiplier = DirichletBC(W.sub(2), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0e0)
beta = beta_0 / h
# beta = beta_0
# Numerical flux trace
u_hat = u + beta * (p - lambda_h) * n
# HDG classical form
a = (dot(u, v) - div(v) * p) * dx + lambda_h("+") * jump(v, n) * dS
a += -dot(u, grad(q)) * dx + jump(u_hat, n) * q("+") * dS
L = f * q * dx
# Transmission condition
a += jump(u_hat, n) * mu_h("+") * dS
# Weakly imposed BC
a += lambda_h * dot(v, n) * ds
a += dot(u_hat, n) * q * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[2, 2] - A[2, :2] * A[:2, :2].inv * A[:2, 2]
Smat = assemble(S, bcs=bc_multiplier)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bc_multiplier
)
return result
def solve_poisson_cgh(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
trace_family = "HDiv Trace"
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
T = FunctionSpace(mesh, trace_family, degree)
W = V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
p, lambda_h = TrialFunctions(W)
q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Dirichlet BCs
bc_multiplier = DirichletBC(W.sub(1), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0e0)
beta = beta_0 / h
# beta = beta_0
# Numerical flux trace
u = -grad(p)
u_hat = u + beta * (p - lambda_h) * n
# HDG classical form
a = -dot(u, grad(q)) * dx + jump(u_hat, n) * q("+") * dS
L = f * q * dx
# Transmission condition
a += jump(u_hat, n) * mu_h("+") * dS
# Weakly imposed BC
a += dot(u_hat, n) * q * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[1, 1] - A[1, :1] * A[:1, :1].inv * A[:1, 1]
Smat = assemble(S, bcs=bc_multiplier)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bc_multiplier
)
return result
def solve_poisson_ldgc(
mesh,
degree=1,
is_multiplier_continuous=True
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
primal_family = "DQ" if use_quads else "DG"
V = FunctionSpace(mesh, primal_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
trace_family = "HDiv Trace"
T = FunctionSpace(mesh, trace_family, degree)
W = V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
p, lambda_h = TrialFunctions(W)
q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
# Forcing function
f_expression = div(-grad(p_exact))
f = Function(V).interpolate(f_expression)
# Dirichlet BCs
p_boundaries = Constant(0.0)
bc_multiplier = DirichletBC(W.sub(1), p_exact, "on_boundary")
# Hybridization parameter
s = Constant(-1.0)
beta = Constant(32.0)
h = CellDiameter(mesh)
h_avg = avg(h)
# Classical term
a = dot(grad(p), grad(q)) * dx
L = f * q * dx
# Hybridization terms
a += s * dot(grad(q), n)("+") * (p("+") - lambda_h("+")) * dS
a += -dot(grad(p), n)("+") * (q("+") - mu_h("+")) * dS
a += (beta / h_avg) * (p("+") - lambda_h("+")) * (q("+") - mu_h("+")) * dS
# Boundary terms
# a += -dot(vel_projected, n) * v * ds # How to set this bc??
# a += (beta / h) * (p- p_boundaries) * q * ds # is this necessary?
L += s * dot(grad(q), n) * p_boundaries * ds
F = a - L
a_form = lhs(F)
_A = Tensor(a_form)
A = _A.blocks
S = A[1, 1] - A[1, :1] * A[:1, :1].inv * A[:1, 1]
Smat = assemble(S, bcs=bc_multiplier)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bc_multiplier
)
return result
def solve_poisson_lsh(
mesh,
degree=1,
is_multiplier_continuous=False
):
# Function space declaration
use_quads = str(mesh.ufl_cell()) == "quadrilateral"
pressure_family = 'DQ' if use_quads else 'DG'
velocity_family = 'DQ' if use_quads else 'DG'
U = VectorFunctionSpace(mesh, velocity_family, degree)
V = FunctionSpace(mesh, pressure_family, degree)
if is_multiplier_continuous:
LagrangeElement = FiniteElement("Lagrange", mesh.ufl_cell(), degree)
C0TraceElement = LagrangeElement["facet"]
T = FunctionSpace(mesh, C0TraceElement)
else:
trace_family = "HDiv Trace"
T = FunctionSpace(mesh, trace_family, degree)
W = U * V * T
# Trial and test functions
# solution = Function(W)
# u, p, lambda_h = split(solution)
u, p, lambda_h = TrialFunctions(W)
v, q, mu_h = TestFunctions(W)
# Mesh entities
n = FacetNormal(mesh)
h = CellDiameter(mesh)
x, y = SpatialCoordinate(mesh)
# Exact solution
p_exact = sin(2 * pi * x) * sin(2 * pi * y)
exact_solution = Function(V).interpolate(p_exact)
exact_solution.rename("Exact pressure", "label")
sigma_e = Function(U, name='Exact velocity')
sigma_e.project(-grad(p_exact))
# BCs
bcs = DirichletBC(W.sub(2), p_exact, "on_boundary")
# Hybridization parameter
beta_0 = Constant(1.0)
beta = beta_0 / h
beta_avg = beta_0 / h("+")
# Stabilizing parameter
# delta_0 = Constant(1)
# delta_1 = Constant(1)
# delta_2 = Constant(1)
# delta_3 = Constant(1)
# delta_4 = Constant(1)
# delta_5 = Constant(1)
# LARGE_NUMBER = Constant(1e0)
delta = h * h
# delta = Constant(1)
# delta = h
delta_0 = delta
delta_1 = delta
delta_2 = delta
delta_3 = delta
delta_4 = delta
# delta_4 = LARGE_NUMBER / h
delta_5 = delta
# Numerical flux trace
u_hat = u + beta * (p - lambda_h) * n
v_hat = v + beta * (q - mu_h) * n
# Flux least-squares
# a = (
# (inner(u, v) - q * div(u) - p * div(v) + inner(grad(p), grad(q)))
# * delta_1
# * dx
# )
# # These terms below are unsymmetric
# a += delta_1 * jump(u_hat, n=n) * q("+") * dS
# a += delta_1("+") * dot(u_hat, n) * q * ds
# # a += delta_1 * dot(u, n) * q * ds
# # L = -delta_1 * dot(u_projected, n) * q * ds
# a += delta_1("+") * lambda_h("+") * jump(v, n=n) * dS
# a += delta_1 * lambda_h * dot(v, n) * ds
# # L = delta_1 * p_exact * dot(v, n) * ds
# Flux Least-squares as in DG
a = delta_0 * inner(u + grad(p), v + grad(q)) * dx
# Classical mixed Darcy eq. first-order terms as stabilizing terms
a += delta_1 * (dot(u, v) - div(v) * p) * dx
a += delta_1("+") * lambda_h("+") * jump(v, n=n) * dS
a += delta_1 * lambda_h * dot(v, n) * ds
# Mass balance least-square
a += delta_2 * div(u) * div(v) * dx
# L = delta_2 * f * div(v) * dx
# Irrotational least-squares
a += delta_3 * inner(curl(u), curl(v)) * dx
# Hybridization terms
a += mu_h("+") * jump(u_hat, n=n) * dS
a += delta_4("+") * (p("+") - lambda_h("+")) * (q("+") - mu_h("+")) * dS
# a += delta_4 * (p - lambda_h) * (q - mu_h) * ds
# a += delta_5 * (dot(u, n)("+") - dot(u_hat, n)("+")) * (dot(v, n)("+") - dot(v_hat, n)("+")) * dS
# a += delta_5 * (dot(u, n) - dot(u_hat, n)) * (dot(v, n) - dot(v_hat, n)) * ds
# Weakly imposed BC from hybridization
# a += mu_h * (lambda_h - p_boundaries) * ds
# a += mu_h * lambda_h * ds
# ###
# a += (
# (mu_h - q) * (lambda_h - p_boundaries) * ds
# ) # maybe this is not a good way to impose BC, but this necessary
_A = Tensor(a)
A = _A.blocks
S = A[2, 2] - A[2, :2] * A[:2, :2].inv * A[:2, 2]
Smat = assemble(S, bcs=bcs)
petsc_mat = Smat.M.handle
is_symmetric = petsc_mat.isSymmetric(tol=1e-8)
size = petsc_mat.getSize()
Mnp = csr_matrix(petsc_mat.getValuesCSR()[::-1], shape=size)
Mnp.eliminate_zeros()
nnz = Mnp.nnz
number_of_dofs = Mnp.shape[0]
num_of_factors = int(number_of_dofs) - 1
condition_number = calculate_condition_number(petsc_mat, num_of_factors)
result = ConditionNumberResult(
form=a,
assembled_form=Smat,
condition_number=condition_number,
sparse_operator=Mnp,
number_of_dofs=number_of_dofs,
nnz=nnz,
is_operator_symmetric=is_symmetric,
bcs=bcs
)
return result
def hp_refinement_cond_number_calculation(
solver,
min_degree=1,
max_degree=4,
numel_xy=(5, 10, 15, 20, 25),
quadrilateral=True,
name="",
**kwargs
):
results_dict = {
"Element": list(),
"Number of Elements": list(),
"Degree": list(),
"Symmetric": list(),
"nnz": list(),
"dofs": list(),
"h": list(),
"Condition Number": list(),
}
element_kind = "Quad" if quadrilateral else "Tri"
pbar = tqdm(range(min_degree, max_degree))
for degree in pbar:
for n in numel_xy:
pbar.set_description(f"Processing {name} - degree = {degree} - N = {n}")
mesh = UnitSquareMesh(n, n, quadrilateral=quadrilateral)
result = solver(mesh, degree=degree)
current_cell_size = mesh.cell_sizes.dat.data_ro.min() if not quadrilateral else 1 / n
results_dict["Element"].append(element_kind)
results_dict["Number of Elements"].append(n * n)
results_dict["Degree"].append(degree)
results_dict["Symmetric"].append(result.is_operator_symmetric)
results_dict["nnz"].append(result.nnz)
results_dict["dofs"].append(result.number_of_dofs)
results_dict["h"].append(current_cell_size)
results_dict["Condition Number"].append(result.condition_number)
os.makedirs("./cond_number_results/results_%s" % name, exist_ok=True)
df_cond_number = pd.DataFrame(data=results_dict)
path_to_save_results = "./cond_number_results/results_%s/cond_numbers.csv" % name
df_cond_number.to_csv(path_to_save_results)
return df_cond_number
# Solver options
solvers_options = {
# "cg": solve_poisson_cg,
# "cgls": solve_poisson_cgls,
# "dgls": solve_poisson_dgls,
# "sdhm": solve_poisson_sdhm,
# "ls": solve_poisson_ls,
# "dls": solve_poisson_dls,
"lsh": solve_poisson_lsh,
# "vms": solve_poisson_vms,
# "dvms": solve_poisson_dvms,
# "mixed_RT": solve_poisson_mixed_RT,
# "hdg": solve_poisson_hdg,
# "cgh": solve_poisson_cgh,
# "ldgc": solve_poisson_ldgc,
# "sipg": solve_poisson_sipg,
}
degree = 1
last_degree = 1
for current_solver in solvers_options:
# Setting the output file name
name = f"{current_solver}"
# Selecting the solver and its kwargs
solver = solvers_options[current_solver]
# Performing the convergence study
hp_refinement_cond_number_calculation(
solver,
min_degree=degree,
max_degree=degree + last_degree,
quadrilateral=True,
name=name
)
# N = 5
# mesh = UnitSquareMesh(N, N, quadrilateral=True)
# result = solve_poisson_lsh(mesh, degree=1)
# print(f'Is symmetric? {result.is_operator_symmetric}')
# print(f'nnz: {result.nnz}')
# print(f'DoFs: {result.number_of_dofs}')
# print(f'Condition Number: {result.condition_number}')
# # Plotting the resulting matrix
# matplotlib.use('TkAgg')
# import copy
# my_cmap = copy.copy(plt.cm.get_cmap("winter"))
# my_cmap.set_bad(color="lightgray")
# # plot_matrix_primal_hybrid_full(result.form, result.bcs, cmap=my_cmap)
# # plot_matrix_mixed_hybrid_full(result.form, result.bcs, cmap=my_cmap)
# plot_matrix_hybrid_multiplier(result.form, trace_index=2, bcs=result.bcs, cmap=my_cmap)
# # plot_matrix(result.assembled_form, cmap=my_cmap)
# # plot_matrix_mixed(result.assembled_form, cmap=my_cmap)
# plt.tight_layout()
# plt.savefig("sparse_pattern.png")
# plt.show() | [
"numpy.ma.masked_values",
"slepc4py.SLEPc.SVD",
"os.makedirs",
"matplotlib.use",
"numpy.delete",
"numpy.array",
"scipy.sparse.linalg.svds",
"scipy.linalg.svd",
"pandas.DataFrame",
"numpy.all",
"matplotlib.pyplot.subplots",
"attr.ib"
] | [((287, 308), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (301, 308), False, 'import matplotlib\n'), ((367, 376), 'attr.ib', 'attr.ib', ([], {}), '()\n', (374, 376), False, 'import attr\n'), ((398, 407), 'attr.ib', 'attr.ib', ([], {}), '()\n', (405, 407), False, 'import attr\n'), ((431, 440), 'attr.ib', 'attr.ib', ([], {}), '()\n', (438, 440), False, 'import attr\n'), ((463, 472), 'attr.ib', 'attr.ib', ([], {}), '()\n', (470, 472), False, 'import attr\n'), ((494, 503), 'attr.ib', 'attr.ib', ([], {}), '()\n', (501, 503), False, 'import attr\n'), ((514, 523), 'attr.ib', 'attr.ib', ([], {}), '()\n', (521, 523), False, 'import attr\n'), ((552, 561), 'attr.ib', 'attr.ib', ([], {}), '()\n', (559, 561), False, 'import attr\n'), ((698, 716), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (710, 716), True, 'import matplotlib.pyplot as plt\n'), ((1061, 1088), 'numpy.delete', 'np.delete', (['Mnp', 'idx'], {'axis': '(1)'}), '(Mnp, idx, axis=1)\n', (1070, 1088), True, 'import numpy as np\n'), ((1098, 1137), 'numpy.ma.masked_values', 'np.ma.masked_values', (['Mnp', '(0)'], {'rtol': '(1e-13)'}), '(Mnp, 0, rtol=1e-13)\n', (1117, 1137), True, 'import numpy as np\n'), ((1443, 1461), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1455, 1461), True, 'import matplotlib.pyplot as plt\n'), ((1861, 1888), 'numpy.delete', 'np.delete', (['Mnp', 'idx'], {'axis': '(1)'}), '(Mnp, idx, axis=1)\n', (1870, 1888), True, 'import numpy as np\n'), ((1898, 1937), 'numpy.ma.masked_values', 'np.ma.masked_values', (['Mnp', '(0)'], {'rtol': '(1e-13)'}), '(Mnp, 0, rtol=1e-13)\n', (1917, 1937), True, 'import numpy as np\n'), ((2361, 2379), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (2373, 2379), True, 'import matplotlib.pyplot as plt\n'), ((2843, 2870), 'numpy.delete', 'np.delete', (['Mnp', 'idx'], {'axis': '(1)'}), '(Mnp, idx, axis=1)\n', (2852, 2870), True, 'import numpy as np\n'), ((2880, 2919), 'numpy.ma.masked_values', 'np.ma.masked_values', (['Mnp', '(0)'], {'rtol': '(1e-13)'}), '(Mnp, 0, rtol=1e-13)\n', (2899, 2919), True, 'import numpy as np\n'), ((3342, 3360), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (3354, 3360), True, 'import matplotlib.pyplot as plt\n'), ((3878, 3905), 'numpy.delete', 'np.delete', (['Mnp', 'idx'], {'axis': '(1)'}), '(Mnp, idx, axis=1)\n', (3887, 3905), True, 'import numpy as np\n'), ((3915, 3954), 'numpy.ma.masked_values', 'np.ma.masked_values', (['Mnp', '(0)'], {'rtol': '(1e-13)'}), '(Mnp, 0, rtol=1e-13)\n', (3934, 3954), True, 'import numpy as np\n'), ((4541, 4559), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (4553, 4559), True, 'import matplotlib.pyplot as plt\n'), ((5061, 5088), 'numpy.delete', 'np.delete', (['Mnp', 'idx'], {'axis': '(1)'}), '(Mnp, idx, axis=1)\n', (5070, 5088), True, 'import numpy as np\n'), ((5098, 5137), 'numpy.ma.masked_values', 'np.ma.masked_values', (['Mnp', '(0)'], {'rtol': '(1e-13)'}), '(Mnp, 0, rtol=1e-13)\n', (5117, 5137), True, 'import numpy as np\n'), ((44728, 44797), 'os.makedirs', 'os.makedirs', (["('./cond_number_results/results_%s' % name)"], {'exist_ok': '(True)'}), "('./cond_number_results/results_%s' % name, exist_ok=True)\n", (44739, 44797), False, 'import os\n'), ((44819, 44850), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'results_dict'}), '(data=results_dict)\n', (44831, 44850), True, 'import pandas as pd\n'), ((1017, 1049), 'numpy.all', 'np.all', (['(Mnp[..., :] == 0)'], {'axis': '(0)'}), '(Mnp[..., :] == 0, axis=0)\n', (1023, 1049), True, 'import numpy as np\n'), ((1817, 1849), 'numpy.all', 'np.all', (['(Mnp[..., :] == 0)'], {'axis': '(0)'}), '(Mnp[..., :] == 0, axis=0)\n', (1823, 1849), True, 'import numpy as np\n'), ((2799, 2831), 'numpy.all', 'np.all', (['(Mnp[..., :] == 0)'], {'axis': '(0)'}), '(Mnp[..., :] == 0, axis=0)\n', (2805, 2831), True, 'import numpy as np\n'), ((3834, 3866), 'numpy.all', 'np.all', (['(Mnp[..., :] == 0)'], {'axis': '(0)'}), '(Mnp[..., :] == 0, axis=0)\n', (3840, 3866), True, 'import numpy as np\n'), ((5017, 5049), 'numpy.all', 'np.all', (['(Mnp[..., :] == 0)'], {'axis': '(0)'}), '(Mnp[..., :] == 0, axis=0)\n', (5023, 5049), True, 'import numpy as np\n'), ((6257, 6364), 'scipy.sparse.linalg.svds', 'svds', ([], {'A': 'Mnp', 'k': 'num_of_factors', 'which': '"""LM"""', 'maxiter': '(5000)', 'return_singular_vectors': '(False)', 'solver': '"""lobpcg"""'}), "(A=Mnp, k=num_of_factors, which='LM', maxiter=5000,\n return_singular_vectors=False, solver='lobpcg')\n", (6261, 6364), False, 'from scipy.sparse.linalg import svds\n'), ((6550, 6594), 'scipy.linalg.svd', 'svd', (['M'], {'compute_uv': '(False)', 'check_finite': '(False)'}), '(M, compute_uv=False, check_finite=False)\n', (6553, 6594), False, 'from scipy.linalg import svd\n'), ((6781, 6792), 'slepc4py.SLEPc.SVD', 'SLEPc.SVD', ([], {}), '()\n', (6790, 6792), False, 'from slepc4py import SLEPc\n'), ((7422, 7452), 'numpy.array', 'np.array', (['singular_values_list'], {}), '(singular_values_list)\n', (7430, 7452), True, 'import numpy as np\n')] |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import InputRequired, Email, ValidationError
from models import User
class RegistrationForm(FlaskForm):
email = StringField('Your Email Address', validators=[InputRequired(), Email()])
username = StringField('Enter your username', validators=[InputRequired()])
password = PasswordField('Password', validators=[InputRequired()])
submit = SubmitField('Sign Up')
def validate_username(self, username):
existing_username = User.query.filter_by(username=username.data).first()
if existing_username:
raise ValidationError("The username already exists")
class LoginForm(FlaskForm):
username = StringField("Your email address", validators=[InputRequired()])
password = PasswordField("<PASSWORD>:", validators=[InputRequired()])
submit = SubmitField("Sign In")
| [
"wtforms.validators.Email",
"models.User.query.filter_by",
"wtforms.validators.ValidationError",
"wtforms.SubmitField",
"wtforms.validators.InputRequired"
] | [((471, 493), 'wtforms.SubmitField', 'SubmitField', (['"""Sign Up"""'], {}), "('Sign Up')\n", (482, 493), False, 'from wtforms import StringField, PasswordField, SubmitField\n'), ((910, 932), 'wtforms.SubmitField', 'SubmitField', (['"""Sign In"""'], {}), "('Sign In')\n", (921, 932), False, 'from wtforms import StringField, PasswordField, SubmitField\n'), ((667, 713), 'wtforms.validators.ValidationError', 'ValidationError', (['"""The username already exists"""'], {}), "('The username already exists')\n", (682, 713), False, 'from wtforms.validators import InputRequired, Email, ValidationError\n'), ((280, 295), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (293, 295), False, 'from wtforms.validators import InputRequired, Email, ValidationError\n'), ((297, 304), 'wtforms.validators.Email', 'Email', ([], {}), '()\n', (302, 304), False, 'from wtforms.validators import InputRequired, Email, ValidationError\n'), ((369, 384), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (382, 384), False, 'from wtforms.validators import InputRequired, Email, ValidationError\n'), ((440, 455), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (453, 455), False, 'from wtforms.validators import InputRequired, Email, ValidationError\n'), ((566, 610), 'models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'username.data'}), '(username=username.data)\n', (586, 610), False, 'from models import User\n'), ((805, 820), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (818, 820), False, 'from wtforms.validators import InputRequired, Email, ValidationError\n'), ((879, 894), 'wtforms.validators.InputRequired', 'InputRequired', ([], {}), '()\n', (892, 894), False, 'from wtforms.validators import InputRequired, Email, ValidationError\n')] |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Creates a security policy with the default values"
class Input:
NAME = "name"
class Output:
ID = "id"
class CreateSecurityPolicyInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"name": {
"type": "string",
"title": "Name",
"description": "The name of the security policy that needs to be created",
"order": 1
}
},
"required": [
"name"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class CreateSecurityPolicyOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"id": {
"type": "string",
"title": "ID",
"description": "ID of the new policy",
"order": 1
}
},
"required": [
"id"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| [
"json.loads"
] | [((288, 591), 'json.loads', 'json.loads', (['"""\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "name": {\n "type": "string",\n "title": "Name",\n "description": "The name of the security policy that needs to be created",\n "order": 1\n }\n },\n "required": [\n "name"\n ]\n}\n """'], {}), '(\n """\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "name": {\n "type": "string",\n "title": "Name",\n "description": "The name of the security policy that needs to be created",\n "order": 1\n }\n },\n "required": [\n "name"\n ]\n}\n """\n )\n', (298, 591), False, 'import json\n'), ((729, 990), 'json.loads', 'json.loads', (['"""\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "id": {\n "type": "string",\n "title": "ID",\n "description": "ID of the new policy",\n "order": 1\n }\n },\n "required": [\n "id"\n ]\n}\n """'], {}), '(\n """\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "id": {\n "type": "string",\n "title": "ID",\n "description": "ID of the new policy",\n "order": 1\n }\n },\n "required": [\n "id"\n ]\n}\n """\n )\n', (739, 990), False, 'import json\n')] |
import collections
import logging
import urllib.parse
from structlog import wrap_logger
from secure_message.constants import MESSAGE_BY_ID_ENDPOINT, MESSAGE_LIST_ENDPOINT, MESSAGE_QUERY_LIMIT
from secure_message.services.service_toggles import party, internal_user_service
logger = wrap_logger(logging.getLogger(__name__))
MessageArgs = collections.namedtuple(
'MessageArgs',
'page limit business_id surveys cc label desc ce is_closed my_conversations new_respondent_conversations all_conversation_types unread_conversations')
def get_options(args): # NOQA pylint:disable=too-complex
"""extract options from request , allow label to be set by caller
:param args: contains search arguments. Not all end points support all args
:returns: MessageArgs named tuple containing the args for the search
business_id If set , restricts search to conversations regarding this specific party id
surveys If set allows the count to be restricted by a list of survey_ids
cc If set , allows the count to be restricted by a particular case
ce If set, alows the count to be restricted by a particular collection exercise
is_closed If set to 'true' only counts closed conversations, else only open conversations
my_conversations If set to 'true only counts my conversations.
I.e conversations where the current user id is the to actor id
new_respondent_conversations If set to 'true'only counts conversations where the to actor is set to 'GROUP'
all_conversation_types If set 'true', overrides is_closed, my_conversations and new_respondent_conversations
and returns 4 counts 1 for each of , open , closed, my_conversations and new_respondent_conversations
page If set requests the specific page of information to return
limit If set it sets the maximum number of results to return
desc If present, requests the information in descending order
"""
fields = {'page': 1, 'limit': MESSAGE_QUERY_LIMIT, 'business_id': None, 'surveys': None,
'desc': True, 'cc': None, 'label': None, 'ce': None, 'is_closed': False,
'my_conversations': False, 'new_respondent_conversations': False, 'all_conversation_types': False,
'unread_conversations': False}
for field in ['cc', 'ce', 'business_id', 'label']:
if args.get(field):
fields[field] = str(args.get(field))
fields['surveys'] = args.getlist('survey')
for field in ['limit', 'page']:
if args.get(field):
fields[field] = int(args.get(field))
if args.get('desc') == 'false':
fields['desc'] = False
if args.get('is_closed') == 'true':
fields['is_closed'] = True
if args.get('my_conversations') == 'true':
fields['my_conversations'] = True
if args.get('new_respondent_conversations') == 'true':
fields['new_respondent_conversations'] = True
if args.get('all_conversation_types') == 'true':
fields['all_conversation_types'] = True
if args.get('unread_conversations') == 'true':
fields['unread_conversations'] = True
return MessageArgs(page=fields['page'], limit=fields['limit'], business_id=fields['business_id'],
surveys=fields['surveys'], cc=fields['cc'], label=fields['label'],
desc=fields['desc'], ce=fields['ce'], is_closed=fields['is_closed'],
my_conversations=fields['my_conversations'],
new_respondent_conversations=fields['new_respondent_conversations'],
all_conversation_types=fields['all_conversation_types'],
unread_conversations=fields['unread_conversations'])
def set_conversation_type_args(existing_args, is_closed=False, my_conversations=False, new_conversations=False,
all_types=False, unread_conversations=False):
"""Returns a new set of args based on the existing args which are a named tuple,
but allow the conversation type only to be changed"""
return MessageArgs(page=existing_args.page,
limit=existing_args.limit,
business_id=existing_args.business_id,
surveys=existing_args.surveys,
cc=existing_args.cc,
label=existing_args.label,
desc=existing_args.desc,
ce=existing_args.ce,
is_closed=is_closed,
my_conversations=my_conversations,
new_respondent_conversations=new_conversations,
all_conversation_types=all_types,
unread_conversations=unread_conversations)
def generate_string_query_args(args):
params = {}
for field in args._fields:
if field in ['page']:
continue
value = getattr(args, field)
if value:
params[field] = value
return urllib.parse.urlencode(params)
def process_paginated_list(paginated_list, host_url, user, message_args, endpoint=MESSAGE_LIST_ENDPOINT, body_summary=True):
"""used to change a pagination object to json format with links"""
messages = []
string_query_args = generate_string_query_args(message_args)
for message in paginated_list.items:
msg = message.serialize(user, body_summary=body_summary)
msg['_links'] = {"self": {"href": f"{host_url}{MESSAGE_BY_ID_ENDPOINT}/{msg['msg_id']}"}}
messages.append(msg)
links = {'first': {"href": f"{host_url}{endpoint}"},
'self': {"href": f"{host_url}{endpoint}?{string_query_args}&page={message_args.page}"}}
if paginated_list.has_next:
links['next'] = {
"href": f"{host_url}{endpoint}?{string_query_args}&page={message_args.page + 1}"}
if paginated_list.has_prev:
links['prev'] = {
"href": f"{host_url}{endpoint}?{string_query_args}&page={message_args.page - 1}"}
return messages, links
def add_to_details(messages):
"""Adds a @msg_to key to every message in a list of messages.
Every msg_to uuid is resolved to include details of the user.
If the call for the internal user id fails, an exception will be thrown.
If the external user id cannot be found in the list that we got from the party service. There
won't be a @msg_to value returned in the payload. The API documentation notes that these elements
aren't guaranteed to be provided so we're not breaking the contract by doing this.
Note: Several of these lines of code could be combined into a more succinct view, spreading them out
is deliberate so that log stack traces are better able to identify the cause of log errors
"""
external_user_details = {}
for user in party.get_users_details(get_external_user_uuid_list(messages)):
external_user_details[user['id']] = user
for message in messages:
try:
msg_to = message["msg_to"][0]
from_internal = message["from_internal"]
if not from_internal:
msg_to_details = internal_user_service.get_user_details(msg_to)
message.update({"@msg_to": [msg_to_details]})
else:
msg_to_details = external_user_details.get(msg_to)
if msg_to_details:
message.update({'@msg_to': [msg_to_details]})
else:
logger.info("No details found for the message recipient", msg_to=msg_to)
except IndexError:
logger.exception("Exception adding to details", msg_to=msg_to, from_internal=from_internal)
raise
return messages
def add_from_details(messages):
"""Adds a @msg_from key to every message in a list of messages.
Every msg_to uuid is resolved to include details of the user.
If the call for the internal user id fails, an exception will be thrown.
If the external user id cannot be found in the list that we got from the party service. There
won't be a @msg_from value returned in the payload. The API documentation notes that these elements
aren't guaranteed to be provided so we're not breaking the contract by doing this.
"""
external_user_details = {}
for user in party.get_users_details(get_external_user_uuid_list(messages)):
external_user_details[user['id']] = user
for message in messages:
try:
msg_from = message["msg_from"]
from_internal = message["from_internal"]
if from_internal:
message.update({"@msg_from": internal_user_service.get_user_details(msg_from)})
else:
if external_user_details.get(message['msg_from']):
message.update({'@msg_from': external_user_details.get(msg_from)})
except IndexError:
logger.exception("Exception adding from details message", msg_from=msg_from, from_internal=from_internal)
raise
return messages
def get_external_user_uuid_list(messages):
"""Compiles a list of all unique the external user (respondent) uuids from a list of messages"""
external_user_uuids = set()
external_msgs = [message for message in messages if message['from_internal'] is False]
for message in external_msgs:
external_user_uuids.add(message["msg_from"])
internal_messages = [message for message in messages if message['from_internal'] is True]
for uuid in internal_messages:
external_user_uuids.add(uuid["msg_to"][0])
return external_user_uuids
def add_business_details(messages):
"""Adds a @business_details key to every message in a list of messages."""
business_ids = set()
for message in messages:
business_ids.add(message['business_id'])
business_details = party.get_business_details(business_ids)
for message in messages:
message['@business_details'] = next((business for business in business_details if business["id"] == message['business_id']), None)
return messages
def add_users_and_business_details(messages):
"""Add both user and business details to messages based on data from party service"""
if not messages:
raise ValueError('messages is a required parameter and must not be empty')
messages = add_to_details(messages)
messages = add_from_details(messages)
logger.info("Successfully added to and from details")
messages = add_business_details(messages)
logger.info("Successfully added business details")
return messages
| [
"logging.getLogger",
"secure_message.services.service_toggles.internal_user_service.get_user_details",
"secure_message.services.service_toggles.party.get_business_details",
"collections.namedtuple"
] | [((340, 537), 'collections.namedtuple', 'collections.namedtuple', (['"""MessageArgs"""', '"""page limit business_id surveys cc label desc ce is_closed my_conversations new_respondent_conversations all_conversation_types unread_conversations"""'], {}), "('MessageArgs',\n 'page limit business_id surveys cc label desc ce is_closed my_conversations new_respondent_conversations all_conversation_types unread_conversations'\n )\n", (362, 537), False, 'import collections\n'), ((297, 324), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (314, 324), False, 'import logging\n'), ((9846, 9886), 'secure_message.services.service_toggles.party.get_business_details', 'party.get_business_details', (['business_ids'], {}), '(business_ids)\n', (9872, 9886), False, 'from secure_message.services.service_toggles import party, internal_user_service\n'), ((7125, 7171), 'secure_message.services.service_toggles.internal_user_service.get_user_details', 'internal_user_service.get_user_details', (['msg_to'], {}), '(msg_to)\n', (7163, 7171), False, 'from secure_message.services.service_toggles import party, internal_user_service\n'), ((8625, 8673), 'secure_message.services.service_toggles.internal_user_service.get_user_details', 'internal_user_service.get_user_details', (['msg_from'], {}), '(msg_from)\n', (8663, 8673), False, 'from secure_message.services.service_toggles import party, internal_user_service\n')] |
import hashlib
import mimetypes
from urllib.parse import unquote
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.http import HttpResponseRedirect
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.fields import CreationDateTimeField, ModificationDateTimeField
from great_components.mixins import GA360Mixin
from modelcluster.contrib.taggit import ClusterTaggableManager
from modelcluster.models import ClusterableModel, ParentalKey
from taggit.managers import TaggableManager
from taggit.models import ItemBase, TagBase, TaggedItemBase
from wagtail.admin.edit_handlers import (
FieldPanel,
InlinePanel,
MultiFieldPanel,
ObjectList,
PageChooserPanel,
StreamFieldPanel,
TabbedInterface,
)
from wagtail.contrib.redirects.models import Redirect
from wagtail.contrib.settings.models import BaseSetting, register_setting
from wagtail.core import blocks
from wagtail.core.blocks.stream_block import StreamBlockValidationError
from wagtail.core.fields import RichTextField, StreamField
from wagtail.core.models import Orderable, Page
from wagtail.images import get_image_model_string
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.images.models import AbstractImage, AbstractRendition, Image
from wagtail.snippets.models import register_snippet
from wagtail.utils.decorators import cached_classmethod
from wagtailmedia.models import Media
from core import blocks as core_blocks, mixins
from core.case_study_index import delete_cs_index, update_cs_index
from core.constants import BACKLINK_QUERYSTRING_NAME, RICHTEXT_FEATURES__MINIMAL
from core.context import get_context_provider
from core.utils import PageTopicHelper, get_first_lesson
from exportplan.core.data import (
SECTION_SLUGS as EXPORTPLAN_SLUGS,
SECTIONS as EXPORTPLAN_URL_MAP,
)
# If we make a Redirect appear as a Snippet, we can sync it via Wagtail-Transfer
register_snippet(Redirect)
class GreatMedia(Media):
transcript = models.TextField(
verbose_name=_('Transcript'), blank=False, null=True # left null because was an existing field
)
subtitles_en = models.TextField(
verbose_name=_('English subtitles'),
null=True,
blank=True,
help_text='English-language subtitles for this video, in VTT format',
)
admin_form_fields = Media.admin_form_fields + (
'transcript',
'subtitles_en',
)
@property
def sources(self):
return [
{
'src': self.url,
'type': mimetypes.guess_type(self.filename)[0] or 'application/octet-stream',
'transcript': self.transcript,
}
]
@property
def subtitles(self):
output = []
# TO COME: support for more than just English
if self.subtitles_en:
output.append(
{
'srclang': 'en',
'label': 'English',
'url': reverse('core:subtitles-serve', args=[self.id, 'en']),
'default': False,
},
)
return output
class AbstractObjectHash(models.Model):
class Meta:
abstract = True
content_hash = models.CharField(max_length=1000)
@staticmethod
def generate_content_hash(field_file):
filehash = hashlib.md5()
field_file.open()
filehash.update(field_file.read())
field_file.close()
return filehash.hexdigest()
class DocumentHash(AbstractObjectHash):
document = models.ForeignKey(
'wagtaildocs.Document', null=True, blank=True, on_delete=models.CASCADE, related_name='+'
)
class ImageHash(AbstractObjectHash):
image = models.ForeignKey('wagtailimages.Image', null=True, blank=True, on_delete=models.CASCADE, related_name='+')
class AltTextImage(AbstractImage):
alt_text = models.CharField(max_length=255, blank=True)
admin_form_fields = Image.admin_form_fields + ('alt_text',)
class Rendition(AbstractRendition):
image = models.ForeignKey(AltTextImage, on_delete=models.CASCADE, related_name='renditions')
class Meta:
unique_together = ('image', 'filter_spec', 'focal_point_key')
@property
def alt(self):
return self.image.alt_text
@register_snippet
class Tour(ClusterableModel):
page = models.OneToOneField('wagtailcore.Page', on_delete=models.CASCADE, related_name='tour')
title = models.CharField(max_length=255)
body = models.CharField(max_length=255)
button_text = models.CharField(max_length=255)
panels = [
PageChooserPanel('page'),
FieldPanel('title'),
FieldPanel('body'),
FieldPanel('button_text'),
MultiFieldPanel([InlinePanel('steps')], heading='Steps'),
]
def __str__(self):
return self.page.title
class TourStep(Orderable):
title = models.CharField(max_length=255)
body = models.CharField(max_length=255)
position = models.CharField(max_length=255)
selector = models.CharField(max_length=255)
tour = ParentalKey(Tour, on_delete=models.CASCADE, related_name='steps')
panels = [
FieldPanel('title'),
FieldPanel('body'),
FieldPanel('position'),
FieldPanel('selector'),
]
@register_snippet
class Product(models.Model):
name = models.CharField(max_length=255)
panels = [
FieldPanel('name'),
]
def __str__(self):
return self.name
@register_snippet
class Region(models.Model):
name = models.CharField(max_length=100, unique=True)
panels = [FieldPanel('name')]
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@register_snippet
class Country(models.Model):
name = models.CharField(max_length=255)
slug = models.SlugField(max_length=100, unique=True)
region = models.ForeignKey(Region, null=True, blank=True, on_delete=models.SET_NULL)
panels = [
FieldPanel('name'),
FieldPanel('region'),
]
class Meta:
verbose_name_plural = 'Countries'
ordering = ('name',)
def save(self, *args, **kwargs):
# Automatically set slug on save, if not already set
if not self.slug:
self.slug = slugify(self.name)
super().save(*args, **kwargs)
def __str__(self):
return self.name
@register_snippet
class Tag(models.Model):
name = models.CharField(max_length=100, unique=True)
panels = [FieldPanel('name')]
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@register_snippet
class IndustryTag(models.Model):
name = models.CharField(max_length=100, unique=True)
icon = models.ForeignKey(
AltTextImage,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
panels = [FieldPanel('name'), ImageChooserPanel('icon')]
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class TimeStampedModel(models.Model):
"""Modified version of django_extensions.db.models.TimeStampedModel
Unfortunately, because null=True needed to be added to create and
modified fields, inheritance causes issues with field clash.
"""
created = CreationDateTimeField('created', null=True)
modified = ModificationDateTimeField('modified', null=True)
def save(self, **kwargs):
self.update_modified = kwargs.pop('update_modified', getattr(self, 'update_modified', True))
super().save(**kwargs)
class Meta:
get_latest_by = 'modified'
ordering = (
'-modified',
'-created',
)
abstract = True
# Content models
class CMSGenericPage(
mixins.EnableTourMixin,
mixins.AuthenticatedUserRequired,
mixins.WagtailGA360Mixin,
GA360Mixin,
Page,
):
"""
Generic page, freely inspired by Codered page
"""
class Meta:
abstract = True
# Do not allow this page type to be created in wagtail admin
is_creatable = False
template_choices = []
###############
# Layout fields
###############
template = models.CharField(
max_length=255,
choices=None,
)
#########
# Panels
##########
layout_panels = [FieldPanel('template')]
settings_panels = [FieldPanel('slug')] + Page.settings_panels
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
field = self._meta.get_field('template')
field.choices = self.template_choices
field.required = True
@cached_classmethod
def get_edit_handler(cls): # NOQA N805
panels = [
ObjectList(cls.content_panels, heading='Content'),
ObjectList(cls.layout_panels, heading='Layout'),
ObjectList(cls.settings_panels, heading='Settings', classname='settings'),
]
return TabbedInterface(panels).bind_to(model=cls)
def get_template(self, request, *args, **kwargs):
return self.template
def get_context(self, request, *args, **kwargs):
context = super().get_context(request)
self.set_ga360_payload(
page_id=self.id,
business_unit=settings.GA360_BUSINESS_UNIT,
site_section=str(self.url or '/').split('/')[1],
)
self.add_ga360_data_to_payload(request)
context['ga360'] = self.ga360_payload
provider = get_context_provider(request=request, page=self)
if provider:
context.update(provider.get_context_data(request=request, page=self))
return context
class LandingPage(CMSGenericPage):
parent_page_types = [
'domestic.DomesticHomePage', # TODO: once we've restructured, remove this permission
'domestic.GreatDomesticHomePage',
]
subpage_types = [
'core.ListPage',
'core.InterstitialPage',
'domestic.DomesticDashboard',
]
template_choices = (
('learn/landing_page.html', 'Learn'),
('core/generic_page.html', 'Generic'),
)
################
# Content fields
################
description = RichTextField()
button = StreamField([('button', core_blocks.ButtonBlock(icon='cog'))], null=True, blank=True)
image = models.ForeignKey(
get_image_model_string(), null=True, blank=True, on_delete=models.SET_NULL, related_name='+'
)
body = StreamField(
[
('section', core_blocks.SectionBlock()),
('title', core_blocks.TitleBlock()),
('text', blocks.RichTextBlock(icon='openquote', helptext='Add a textblock')),
('image', core_blocks.ImageBlock()),
],
null=True,
blank=True,
)
components = StreamField(
[
('route', core_blocks.RouteSectionBlock()),
],
null=True,
blank=True,
)
#########
# Panels
#########
content_panels = CMSGenericPage.content_panels + [
FieldPanel('description'),
StreamFieldPanel('button'),
ImageChooserPanel('image'),
StreamFieldPanel('components'),
StreamFieldPanel('body'),
]
class InterstitialPage(CMSGenericPage):
parent_page_types = ['core.LandingPage']
template_choices = (('learn/interstitial.html', 'Learn'),)
################
# Content fields
################
button = StreamField([('button', core_blocks.ButtonBlock(icon='cog'))], null=True, blank=True)
#########
# Panels
#########
content_panels = CMSGenericPage.content_panels + [
StreamFieldPanel('button'),
]
class ListPage(CMSGenericPage):
parent_page_types = ['core.LandingPage']
subpage_types = ['core.CuratedListPage']
template_choices = (('learn/automated_list_page.html', 'Learn'),)
record_read_progress = models.BooleanField(
default=False,
help_text='Should we record when a user views a page in this collection?',
)
class Meta:
verbose_name = 'Automated list page'
verbose_name_plural = 'Automated list pages'
def get_context(self, request, *args, **kwargs):
from core.helpers import get_high_level_completion_progress
from domestic.helpers import get_lesson_completion_status
context = super().get_context(request)
if request.user.is_authenticated:
completion_status = get_lesson_completion_status(request.user)
context['high_level_completion_progress'] = get_high_level_completion_progress(
completion_status=completion_status,
)
return context
################
# Content fields
################
description = RichTextField()
button_label = models.CharField(max_length=100)
#########
# Panels
#########
settings_panels = CMSGenericPage.settings_panels + [FieldPanel('record_read_progress')]
content_panels = CMSGenericPage.content_panels + [FieldPanel('description'), FieldPanel('button_label')]
class CuratedListPage(CMSGenericPage):
parent_page_types = ['core.ListPage']
subpage_types = [
'core.TopicPage',
]
template_choices = (('learn/curated_list_page.html', 'Learn'),)
################
# Content fields
################
heading = RichTextField()
image = models.ForeignKey(
get_image_model_string(), null=True, blank=True, on_delete=models.SET_NULL, related_name='+'
)
########
# Panels
########
content_panels = CMSGenericPage.content_panels + [
FieldPanel('heading'),
ImageChooserPanel('image'),
]
def get_topics(self, live=True) -> models.QuerySet:
qs = TopicPage.objects.live().specific().descendant_of(self)
if live:
qs = qs.live()
return qs
@cached_property
def count_topics(self):
return self.get_topics().count()
@cached_property
def count_detail_pages(self):
count = 0
for topic in self.get_topics():
count += DetailPage.objects.live().descendant_of(topic).count()
return count
def get_context(self, request, *args, **kwargs):
from core.helpers import (
get_high_level_completion_progress,
get_module_completion_progress,
)
from domestic.helpers import get_lesson_completion_status
context = super().get_context(request)
# Give the template a simple way to link back to the parent
# learning module (ListPage)
context['parent_page_url'] = self.get_parent().url
if request.user.is_authenticated:
# get this once, so we don't waste the network call to get the data twice
completion_status = get_lesson_completion_status(request.user)
context['module_completion_progress'] = get_module_completion_progress(
completion_status=completion_status,
module_page=self,
)
context['high_level_completion_progress'] = get_high_level_completion_progress(
completion_status=completion_status,
)
return context
def hero_singular_validation(value):
if value and len(value) > 1:
raise StreamBlockValidationError(
non_block_errors=ValidationError('Only one image or video allowed in Hero section', code='invalid'),
)
class TopicPage(mixins.AuthenticatedUserRequired, Page):
"""Structural page to allow for cleaner mapping of lessons (`DetailPage`s)
to modules (`CuratedListPage`s).
Not intented to be viewed by end users, so will redirect to the parent
module if accessed.
Also, for the above reason, mixins.WagtailGA360Mixin and GA360Mixin
are not used."""
parent_page_types = ['core.CuratedListPage']
subpage_types = [
'core.DetailPage',
'core.LessonPlaceholderPage',
]
# `title` comes from Page superclass and that's all we need here
def _redirect_to_parent_module(self):
return HttpResponseRedirect(self.get_parent().url)
def serve_preview(self, request, mode_name='dummy'):
# It doesn't matter what is passed as mode_name - we always redirect
return self._redirect_to_parent_module()
def serve(self, request):
return self._redirect_to_parent_module()
class LessonPlaceholderPage(mixins.AuthenticatedUserRequired, Page):
"""Structural page to allow for configuring and representing very simple
to modules (`CuratedListPage`s).
Not intented to be viewed by end users, so will redirect to the parent
module if accessed.
Also, for the above reason, mixins.WagtailGA360Mixin and GA360Mixin
are not used."""
parent_page_types = ['core.TopicPage']
subpage_types = [] # No child pages allowed for placeholders
# `title` comes from Page superclass and that's all we need here
def _redirect_to_parent_module(self):
dest = CuratedListPage.objects.ancestor_of(self).first().url
return HttpResponseRedirect(dest)
def serve_preview(self, request, mode_name='dummy'):
# It doesn't matter what is passed as mode_name - we always redirect
return self._redirect_to_parent_module()
def serve(self, request):
return self._redirect_to_parent_module()
class DetailPage(CMSGenericPage):
estimated_read_duration = models.DurationField(null=True, blank=True)
parent_page_types = [
'core.CuratedListPage', # TEMPORARY: remove after topics refactor migration has run
'core.TopicPage',
]
template_choices = (('learn/detail_page.html', 'Learn'),)
class Meta:
verbose_name = 'Detail page'
verbose_name_plural = 'Detail pages'
################
# Content fields
################
hero = StreamField(
[
('Image', core_blocks.ImageBlock(template='core/includes/_hero_image.html')),
('Video', core_blocks.SimpleVideoBlock(template='core/includes/_hero_video.html')),
],
null=True,
blank=True,
validators=[hero_singular_validation],
)
objective = StreamField(
[
(
'paragraph',
blocks.RichTextBlock(options={'class': 'objectives'}),
),
('ListItem', core_blocks.Item()),
]
)
body = StreamField(
[
(
'paragraph',
blocks.StructBlock(
[('paragraph', blocks.RichTextBlock())],
template='core/struct_paragraph_block.html',
icon='fa-font',
),
),
(
'video',
blocks.StructBlock(
[('video', core_blocks.VideoBlock())],
template='core/struct_video_block.html',
icon='fa-play',
),
),
('case_study', core_blocks.CaseStudyStaticBlock(icon='fa-book')),
(
'Step',
core_blocks.StepByStepBlock(icon='cog'),
),
(
'fictional_example',
blocks.StructBlock(
[('fiction_body', blocks.RichTextBlock(icon='openquote'))],
template='learn/fictional_company_example.html',
icon='fa-commenting-o',
),
),
(
'ITA_Quote',
core_blocks.ITAQuoteBlock(icon='fa-quote-left'),
),
(
'pros_cons',
blocks.StructBlock(
[
(
'pros',
blocks.StreamBlock(
[
(
'item',
core_blocks.Item(icon='fa-arrow-right'),
)
]
),
),
(
'cons',
blocks.StreamBlock(
[
(
'item',
core_blocks.Item(icon='fa-arrow-right'),
)
]
),
),
],
template='learn/pros_and_cons.html',
icon='fa-arrow-right',
),
),
('choose_do_not_choose', core_blocks.ChooseDoNotChooseBlock()),
(
'image',
core_blocks.ImageBlock(
template='core/includes/_image_full_width.html',
help_text='Image displayed within a full-page-width block',
),
),
(
'video',
core_blocks.SimpleVideoBlock(
template='core/includes/_video_full_width.html',
help_text='Video displayed within a full-page-width block',
),
),
]
)
recap = StreamField(
[
(
'recap_item',
blocks.StructBlock(
[
('title', blocks.CharBlock(icon='fa-header')),
(
'item',
blocks.StreamBlock(
[
(
'item',
core_blocks.Item(),
)
]
),
),
],
template='learn/recap.html',
icon='fa-commenting-o',
),
)
]
)
#########
# Panels
##########
content_panels = Page.content_panels + [
StreamFieldPanel('hero'),
StreamFieldPanel('objective'),
StreamFieldPanel('body'),
StreamFieldPanel('recap'),
]
def handle_page_view(self, request):
if request.user.is_authenticated:
# checking if the page should record read progress
# checking if the page is already marked as read
list_page = (
ListPage.objects.ancestor_of(self)
.filter(record_read_progress=True)
.exclude(page_views_list__sso_id=request.user.pk, page_views_list__page=self)
.first()
)
if list_page:
PageView.objects.get_or_create(
page=self,
list_page=list_page,
sso_id=request.user.pk,
)
def serve(self, request, *args, **kwargs):
self.handle_page_view(request)
return super().serve(request, **kwargs)
@cached_property
def topic_title(self):
return self.get_parent().title
@cached_property
def module(self):
"""Gets the learning module this lesson belongs to"""
return CuratedListPage.objects.live().specific().ancestor_of(self).first()
@cached_property
def _export_plan_url_map(self):
"""Return a lookup dictionary of URL Slugs->title for all the
Export Plan sections we have."""
return {url: values['title'] for url, values in EXPORTPLAN_URL_MAP.items()}
def _get_backlink(self, request):
"""Try to extract a backlink (used for a link to the export plan) from the
querystring on the request that brought us to this view.
Only accepts backlinks that we KNOW are for the export plan, else ignore it."""
backlink_path = request.GET.get(BACKLINK_QUERYSTRING_NAME, '')
if backlink_path is not None:
backlink_path = unquote(backlink_path)
if len(backlink_path.split('/')) > 2 and (
backlink_path.split('/')[3] in EXPORTPLAN_SLUGS and '://' not in backlink_path
):
# The check for '://' will stop us accepting a backlink which
# features a full URL as its OWN querystring param (eg a crafted attack
# URL), but that's an acceptable limitation here and is very unlikely
# to happen.
return backlink_path
return None # safe default
def _get_backlink_title(self, backlink_path):
"""For a given backlink, see if we can get a title that goes with it.
For now, this is limited only to Export Plan pages/links.
"""
# We have to re-arrange EXPORT_PLAN_SECTION_TITLES_URLS after import
# because it features lazily-evaluated URLs that aren't ready when
# models are imported
if backlink_path and len(backlink_path.split('/')) > 3:
_path = backlink_path.split('/')[3]
return self._export_plan_url_map.get(_path)
def get_context(self, request, *args, **kwargs):
context = super().get_context(request)
context['refresh_on_market_change'] = True
# Prepare backlink to the export plan if we detect one and can validate it
_backlink = self._get_backlink(request)
if _backlink:
context['backlink'] = _backlink
context['backlink_title'] = self._get_backlink_title(_backlink)
if isinstance(self.get_parent().specific, TopicPage):
# In a conditional because a DetailPage currently MAY be used as
# a child of another page type...
page_topic_helper = PageTopicHelper(self)
next_lesson = page_topic_helper.get_next_lesson()
context['current_lesson'] = self
context['current_module'] = page_topic_helper.module
if page_topic_helper:
topic_page = page_topic_helper.get_page_topic()
if topic_page:
context['current_topic'] = topic_page
context['page_topic'] = topic_page.title
if next_lesson:
context['next_lesson'] = next_lesson
else:
next_module = self.module.get_next_sibling()
if not next_module:
return context
context['next_module'] = next_module.specific
context['next_lesson'] = get_first_lesson(next_module)
return context
class PageView(TimeStampedModel):
page = models.ForeignKey(DetailPage, on_delete=models.CASCADE, related_name='page_views')
list_page = models.ForeignKey(ListPage, on_delete=models.CASCADE, related_name='page_views_list')
sso_id = models.TextField()
class Meta:
ordering = ['page__pk']
unique_together = ['page', 'sso_id']
# TODO: deprecate and remove
class ContentModuleTag(TaggedItemBase):
content_object = ParentalKey('core.ContentModule', on_delete=models.CASCADE, related_name='tagged_items')
# TODO: deprecate and remove
@register_snippet
class ContentModule(ClusterableModel):
title = models.CharField(max_length=255)
content = RichTextField()
tags = TaggableManager(through=ContentModuleTag, blank=True)
panels = [
FieldPanel('title'),
FieldPanel('content'),
FieldPanel('tags'),
]
def __str__(self):
return self.title
class PersonalisationHSCodeTag(TagBase):
"""Custom tag for personalisation.
Tag value will be a HS6, HS4 or HS2 code"""
# free_tagging = False # DISABLED until tag data only comes via data migration
class Meta:
verbose_name = 'HS Code tag for personalisation'
verbose_name_plural = 'HS Code tags for personalisation'
class PersonalisationCountryTag(TagBase):
"""Custom tag for personalisation.
Tag value will be an ISO-2 Country code ('DE')
"""
free_tagging = False
class Meta:
verbose_name = 'Country tag for personalisation'
verbose_name_plural = 'Country tags for personalisation'
class PersonalisationRegionTag(TagBase):
"""Custom tag for personalisation.
Tag value will be a geographical string ('Europe')
"""
free_tagging = False
class Meta:
verbose_name = 'Region tag for personalisation'
verbose_name_plural = 'Region tags for personalisation'
class PersonalisationTradingBlocTag(TagBase):
"""Custom tag for personalisation.
Tag value will be an Trading blocs
"""
free_tagging = False
class Meta:
verbose_name = 'Trading bloc tag for personalisation'
verbose_name_plural = 'Trading bloc tags for personalisation'
# If you're wondering what's going on here:
# https://docs.wagtail.io/en/stable/reference/pages/model_recipes.html#custom-tag-models
class HSCodeTaggedCaseStudy(ItemBase):
tag = models.ForeignKey(
PersonalisationHSCodeTag, related_name='hscode_tagged_case_studies', on_delete=models.CASCADE
)
content_object = ParentalKey(to='core.CaseStudy', on_delete=models.CASCADE, related_name='hs_code_tagged_items')
class CountryTaggedCaseStudy(ItemBase):
tag = models.ForeignKey(
PersonalisationCountryTag, related_name='country_tagged_case_studies', on_delete=models.CASCADE
)
content_object = ParentalKey(to='core.CaseStudy', on_delete=models.CASCADE, related_name='country_tagged_items')
class RegionTaggedCaseStudy(ItemBase):
tag = models.ForeignKey(
PersonalisationRegionTag, related_name='region_tagged_case_studies', on_delete=models.CASCADE
)
content_object = ParentalKey(to='core.CaseStudy', on_delete=models.CASCADE, related_name='region_tagged_items')
class TradingBlocTaggedCaseStudy(ItemBase):
tag = models.ForeignKey(
PersonalisationTradingBlocTag, related_name='trading_bloc_tagged_case_studies', on_delete=models.CASCADE
)
content_object = ParentalKey(
to='core.CaseStudy', on_delete=models.CASCADE, related_name='trading_bloc_tagged_items'
)
def _high_level_validation(value, error_messages):
TEXT_BLOCK = 'text' # noqa N806
MEDIA_BLOCK = 'media' # noqa N806
QUOTE_BLOCK = 'quote' # noqa N806
# we need to be strict about presence and ordering of these nodes
if [node.block_type for node in value if node.block_type != QUOTE_BLOCK] != [MEDIA_BLOCK, TEXT_BLOCK]:
error_messages.append(
(
'This block must contain one Media section (with one or '
'two items in it) and/or a Quote section, then one Text section following it.'
)
)
return error_messages
def _low_level_validation(value, error_messages):
# Check content of media node, which should be present here
MEDIA_BLOCK = 'media' # noqa N806
VIDEO_BLOCK = 'video' # noqa N806
for node in value:
if node.block_type == MEDIA_BLOCK:
subnode_block_types = [subnode.block_type for subnode in node.value]
if len(subnode_block_types) == 2:
if set(subnode_block_types) == {VIDEO_BLOCK}:
# Two videos: not allowed
error_messages.append('Only one video may be used in a case study.')
elif subnode_block_types[1] == VIDEO_BLOCK:
# implicitly, [0] must be an image
# video after image: not allowed
error_messages.append('The video must come before a still image.')
return error_messages
def case_study_body_validation(value):
"""Ensure the case study has exactly both a media node and a text node
and that the media node has the following content:
* One image, only
* One video, only
* One video + One image
* (video must comes first so that it is displayed first)
* Two images
"""
error_messages = []
if value:
error_messages = _high_level_validation(value, error_messages)
error_messages = _low_level_validation(value, error_messages)
if error_messages:
raise StreamBlockValidationError(
non_block_errors=ValidationError('; '.join(error_messages), code='invalid'),
)
class MagnaPageChooserPanel(PageChooserPanel):
show_label = False
field_template = 'admin/wagtailadmin/edit_handlers/field_panel_field.html'
def render_as_field(self):
instance_obj = self.get_chosen_item()
context = {
'field': self.bound_field,
self.object_type_name: instance_obj,
'is_chosen': bool(instance_obj), # DEPRECATED - passed to templates for backwards compatibility only
# Added obj_type on base class method render_as_field
'obj_type': instance_obj.specific.__class__.__name__ if instance_obj else None,
}
return mark_safe(render_to_string(self.field_template, context))
class CaseStudyRelatedPages(Orderable):
case_study = ParentalKey(
'core.CaseStudy',
related_name='related_pages',
on_delete=models.SET_NULL,
null=True,
blank=True,
)
page = models.ForeignKey(
'wagtailcore.Page',
on_delete=models.CASCADE,
related_name='+',
)
panels = [
MagnaPageChooserPanel('page', [DetailPage, CuratedListPage, TopicPage]),
]
class Meta:
unique_together = ['case_study', 'page']
@register_snippet
class CaseStudy(ClusterableModel):
"""Dedicated snippet for use as a case study. Supports personalised
selection via its tags.
The decision about the appropriate Case Study block to show will happen
when the page attempts to render the relevant CaseStudyBlock.
Note that this is rendered via Wagtail's ModelAdmin, so appears in the sidebar,
but we have to keep it registered as a Snippet to be able to transfer it
with Wagtail-Transfer
"""
title = models.CharField(
max_length=255,
blank=False,
verbose_name='Internal case study title',
)
# old name company_name
summary_context = models.CharField(max_length=255, blank=False, default='How we did it')
# old name summary
lead_title = models.TextField(blank=False) # Deliberately not rich-text / no formatting
body = StreamField(
[
(
'media',
blocks.StreamBlock(
[
('video', core_blocks.SimpleVideoBlock(template='core/includes/_case_study_video.html')),
('image', core_blocks.ImageBlock()),
],
min_num=1,
max_num=2,
),
),
(
'text',
blocks.RichTextBlock(
features=RICHTEXT_FEATURES__MINIMAL,
),
),
(
'quote',
core_blocks.CaseStudyQuoteBlock(),
),
],
validators=[case_study_body_validation],
help_text=(
'This block must contain one Media section (with one or two items in it) '
'and/or Quote sections, then one Text section.'
),
)
# We are keeping the personalisation-relevant tags in separate
# fields to aid lookup and make the UX easier for editors
hs_code_tags = ClusterTaggableManager(through='core.HSCodeTaggedCaseStudy', blank=True, verbose_name='HS-code tags')
country_code_tags = ClusterTaggableManager(
through='core.CountryTaggedCaseStudy', blank=True, verbose_name='Country tags'
)
region_code_tags = ClusterTaggableManager(
through='core.RegionTaggedCaseStudy', blank=True, verbose_name='Region tags'
)
trading_bloc_code_tags = ClusterTaggableManager(
through='core.TradingBlocTaggedCaseStudy', blank=True, verbose_name='Trading bloc tags'
)
created = CreationDateTimeField('created', null=True)
modified = ModificationDateTimeField('modified', null=True)
panels = [
MultiFieldPanel(
[
FieldPanel('title'),
FieldPanel('lead_title'),
FieldPanel('summary_context'),
StreamFieldPanel('body'),
],
heading='Case Study content',
),
MultiFieldPanel(
[
FieldPanel('hs_code_tags'),
FieldPanel('country_code_tags'),
FieldPanel('region_code_tags'),
FieldPanel('trading_bloc_code_tags'),
],
heading='Case Study tags for Personalisation',
),
MultiFieldPanel(
[
InlinePanel('related_pages', label='Related pages'),
],
heading='Related Lesson, Topic & Module, also used for Personalisation',
),
]
def __str__(self):
display_name = self.title if self.title else self.summary_context
return f'{display_name}'
def save(self, **kwargs):
# When we create a new CS need to call create to obtain an ID for indexing
self.update_modified = kwargs.pop('update_modified', getattr(self, 'update_modified', True))
super().save(**kwargs)
update_cs_index(self)
def delete(self, **kwargs):
delete_cs_index(self.id)
super().delete(**kwargs)
def get_cms_standalone_view_url(self):
return reverse('cms_extras:case-study-view', args=[self.id])
class Meta:
verbose_name_plural = 'Case studies'
get_latest_by = 'modified'
ordering = (
'-modified',
'-created',
)
@register_setting
class CaseStudyScoringSettings(BaseSetting):
threshold = models.DecimalField(
help_text='This is the minimum score which a case study needs to have to be '
'considered before being presented to users. ',
default=10,
decimal_places=3,
max_digits=5,
)
lesson = models.DecimalField(
help_text="Score given when user's lesson is tagged in the case study.",
default=8,
decimal_places=3,
max_digits=5,
)
topic = models.DecimalField(
help_text="Score given when user's lesson's topic is tagged in the case study "
'unless there is also lesson match.',
default=4,
decimal_places=3,
max_digits=5,
)
module = models.DecimalField(
help_text="Score given when the user's lesson's module is tagged in the case study "
'unless there is also lesson or topic match.',
default=2,
decimal_places=3,
max_digits=5,
)
product_hs6 = models.DecimalField(
help_text='Score given when any case study HS6 tag matches the complete HS6 code of '
"any of the user's products",
default=8,
decimal_places=3,
max_digits=5,
)
product_hs4 = models.DecimalField(
help_text="Score given when any case study HS4 tag matches the first 4 digits of any of the user's products "
'unless there is an HS6 match.',
default=4,
decimal_places=3,
max_digits=5,
)
product_hs2 = models.DecimalField(
help_text="Score given when any case study HS2 tag matches the first 2 digits of any of the user's products "
'unless there is an HS6 or HS4 match.',
default=2,
decimal_places=3,
max_digits=5,
)
country_exact = models.DecimalField(
help_text="Score given when any case study country tag exactly matches one of the user's export markets.",
default=4,
decimal_places=3,
max_digits=5,
)
country_region = models.DecimalField(
help_text="Score given when any case study region tag matches the region of any of the user's export markets "
'unless there is an exact country match.',
default=2,
decimal_places=3,
max_digits=5,
)
trading_blocs = models.DecimalField(
help_text='Score given when any case study trading bloc tag matches the any trading bloc that any of '
"the user's export markets falls into unless there is an exact country or region match.",
default=2,
decimal_places=3,
max_digits=5,
)
product_tab = [MultiFieldPanel([FieldPanel('product_hs6'), FieldPanel('product_hs4'), FieldPanel('product_hs2')])]
market_tab = [
MultiFieldPanel([FieldPanel('country_exact'), FieldPanel('country_region'), FieldPanel('trading_blocs')])
]
lesson_tab = [MultiFieldPanel([FieldPanel('lesson'), FieldPanel('topic'), FieldPanel('module')])]
threshold_tab = [
MultiFieldPanel(
[
FieldPanel('threshold'),
]
)
]
edit_handler = TabbedInterface(
[
ObjectList(product_tab, heading='Product'),
ObjectList(market_tab, heading='Market'),
ObjectList(lesson_tab, heading='Lesson'),
ObjectList(threshold_tab, heading='Threshold'),
]
)
class Meta:
verbose_name = 'Case Study Scoring'
| [
"core.case_study_index.update_cs_index",
"django.db.models.TextField",
"wagtail.admin.edit_handlers.PageChooserPanel",
"exportplan.core.data.SECTIONS.items",
"django.core.exceptions.ValidationError",
"core.blocks.VideoBlock",
"django.urls.reverse",
"wagtail.admin.edit_handlers.FieldPanel",
"wagtail.admin.edit_handlers.ObjectList",
"core.blocks.CaseStudyStaticBlock",
"mimetypes.guess_type",
"django.template.loader.render_to_string",
"django.http.HttpResponseRedirect",
"core.blocks.TitleBlock",
"core.blocks.CaseStudyQuoteBlock",
"django.db.models.ForeignKey",
"core.blocks.ChooseDoNotChooseBlock",
"core.utils.PageTopicHelper",
"core.blocks.ITAQuoteBlock",
"core.context.get_context_provider",
"django.db.models.CharField",
"urllib.parse.unquote",
"django.db.models.OneToOneField",
"wagtail.admin.edit_handlers.TabbedInterface",
"django.utils.translation.ugettext_lazy",
"wagtail.core.blocks.CharBlock",
"hashlib.md5",
"core.blocks.ImageBlock",
"core.blocks.RouteSectionBlock",
"django.db.models.DurationField",
"core.helpers.get_high_level_completion_progress",
"core.blocks.SimpleVideoBlock",
"core.blocks.Item",
"django.db.models.BooleanField",
"taggit.managers.TaggableManager",
"wagtail.admin.edit_handlers.StreamFieldPanel",
"django_extensions.db.fields.CreationDateTimeField",
"wagtail.admin.edit_handlers.InlinePanel",
"core.utils.get_first_lesson",
"core.helpers.get_module_completion_progress",
"modelcluster.models.ParentalKey",
"django.utils.text.slugify",
"wagtail.images.edit_handlers.ImageChooserPanel",
"core.case_study_index.delete_cs_index",
"wagtail.core.blocks.RichTextBlock",
"wagtail.snippets.models.register_snippet",
"wagtail.images.get_image_model_string",
"core.blocks.SectionBlock",
"core.blocks.StepByStepBlock",
"domestic.helpers.get_lesson_completion_status",
"django.db.models.SlugField",
"wagtail.core.fields.RichTextField",
"django.db.models.DecimalField",
"django_extensions.db.fields.ModificationDateTimeField",
"core.blocks.ButtonBlock",
"modelcluster.contrib.taggit.ClusterTaggableManager"
] | [((2206, 2232), 'wagtail.snippets.models.register_snippet', 'register_snippet', (['Redirect'], {}), '(Redirect)\n', (2222, 2232), False, 'from wagtail.snippets.models import register_snippet\n'), ((3527, 3560), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1000)'}), '(max_length=1000)\n', (3543, 3560), False, 'from django.db import models\n'), ((3845, 3958), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""wagtaildocs.Document"""'], {'null': '(True)', 'blank': '(True)', 'on_delete': 'models.CASCADE', 'related_name': '"""+"""'}), "('wagtaildocs.Document', null=True, blank=True, on_delete=\n models.CASCADE, related_name='+')\n", (3862, 3958), False, 'from django.db import models\n'), ((4019, 4131), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""wagtailimages.Image"""'], {'null': '(True)', 'blank': '(True)', 'on_delete': 'models.CASCADE', 'related_name': '"""+"""'}), "('wagtailimages.Image', null=True, blank=True, on_delete=\n models.CASCADE, related_name='+')\n", (4036, 4131), False, 'from django.db import models\n'), ((4179, 4223), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)'}), '(max_length=255, blank=True)\n', (4195, 4223), False, 'from django.db import models\n'), ((4339, 4428), 'django.db.models.ForeignKey', 'models.ForeignKey', (['AltTextImage'], {'on_delete': 'models.CASCADE', 'related_name': '"""renditions"""'}), "(AltTextImage, on_delete=models.CASCADE, related_name=\n 'renditions')\n", (4356, 4428), False, 'from django.db import models\n'), ((4641, 4732), 'django.db.models.OneToOneField', 'models.OneToOneField', (['"""wagtailcore.Page"""'], {'on_delete': 'models.CASCADE', 'related_name': '"""tour"""'}), "('wagtailcore.Page', on_delete=models.CASCADE,\n related_name='tour')\n", (4661, 4732), False, 'from django.db import models\n'), ((4741, 4773), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (4757, 4773), False, 'from django.db import models\n'), ((4785, 4817), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (4801, 4817), False, 'from django.db import models\n'), ((4836, 4868), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (4852, 4868), False, 'from django.db import models\n'), ((5179, 5211), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (5195, 5211), False, 'from django.db import models\n'), ((5223, 5255), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (5239, 5255), False, 'from django.db import models\n'), ((5271, 5303), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (5287, 5303), False, 'from django.db import models\n'), ((5319, 5351), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (5335, 5351), False, 'from django.db import models\n'), ((5363, 5428), 'modelcluster.models.ParentalKey', 'ParentalKey', (['Tour'], {'on_delete': 'models.CASCADE', 'related_name': '"""steps"""'}), "(Tour, on_delete=models.CASCADE, related_name='steps')\n", (5374, 5428), False, 'from modelcluster.models import ClusterableModel, ParentalKey\n'), ((5632, 5664), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (5648, 5664), False, 'from django.db import models\n'), ((5823, 5868), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'unique': '(True)'}), '(max_length=100, unique=True)\n', (5839, 5868), False, 'from django.db import models\n'), ((6059, 6091), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (6075, 6091), False, 'from django.db import models\n'), ((6103, 6148), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(100)', 'unique': '(True)'}), '(max_length=100, unique=True)\n', (6119, 6148), False, 'from django.db import models\n'), ((6162, 6237), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Region'], {'null': '(True)', 'blank': '(True)', 'on_delete': 'models.SET_NULL'}), '(Region, null=True, blank=True, on_delete=models.SET_NULL)\n', (6179, 6237), False, 'from django.db import models\n'), ((6718, 6763), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'unique': '(True)'}), '(max_length=100, unique=True)\n', (6734, 6763), False, 'from django.db import models\n'), ((6958, 7003), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'unique': '(True)'}), '(max_length=100, unique=True)\n', (6974, 7003), False, 'from django.db import models\n'), ((7015, 7119), 'django.db.models.ForeignKey', 'models.ForeignKey', (['AltTextImage'], {'null': '(True)', 'blank': '(True)', 'on_delete': 'models.SET_NULL', 'related_name': '"""+"""'}), "(AltTextImage, null=True, blank=True, on_delete=models.\n SET_NULL, related_name='+')\n", (7032, 7119), False, 'from django.db import models\n'), ((7591, 7634), 'django_extensions.db.fields.CreationDateTimeField', 'CreationDateTimeField', (['"""created"""'], {'null': '(True)'}), "('created', null=True)\n", (7612, 7634), False, 'from django_extensions.db.fields import CreationDateTimeField, ModificationDateTimeField\n'), ((7650, 7698), 'django_extensions.db.fields.ModificationDateTimeField', 'ModificationDateTimeField', (['"""modified"""'], {'null': '(True)'}), "('modified', null=True)\n", (7675, 7698), False, 'from django_extensions.db.fields import CreationDateTimeField, ModificationDateTimeField\n'), ((8486, 8532), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'choices': 'None'}), '(max_length=255, choices=None)\n', (8502, 8532), False, 'from django.db import models\n'), ((10485, 10500), 'wagtail.core.fields.RichTextField', 'RichTextField', ([], {}), '()\n', (10498, 10500), False, 'from wagtail.core.fields import RichTextField, StreamField\n'), ((12182, 12296), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Should we record when a user views a page in this collection?"""'}), "(default=False, help_text=\n 'Should we record when a user views a page in this collection?')\n", (12201, 12296), False, 'from django.db import models\n'), ((13048, 13063), 'wagtail.core.fields.RichTextField', 'RichTextField', ([], {}), '()\n', (13061, 13063), False, 'from wagtail.core.fields import RichTextField, StreamField\n'), ((13083, 13115), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (13099, 13115), False, 'from django.db import models\n'), ((13642, 13657), 'wagtail.core.fields.RichTextField', 'RichTextField', ([], {}), '()\n', (13655, 13657), False, 'from wagtail.core.fields import RichTextField, StreamField\n'), ((17723, 17766), 'django.db.models.DurationField', 'models.DurationField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (17743, 17766), False, 'from django.db import models\n'), ((27063, 27150), 'django.db.models.ForeignKey', 'models.ForeignKey', (['DetailPage'], {'on_delete': 'models.CASCADE', 'related_name': '"""page_views"""'}), "(DetailPage, on_delete=models.CASCADE, related_name=\n 'page_views')\n", (27080, 27150), False, 'from django.db import models\n'), ((27162, 27252), 'django.db.models.ForeignKey', 'models.ForeignKey', (['ListPage'], {'on_delete': 'models.CASCADE', 'related_name': '"""page_views_list"""'}), "(ListPage, on_delete=models.CASCADE, related_name=\n 'page_views_list')\n", (27179, 27252), False, 'from django.db import models\n'), ((27261, 27279), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (27277, 27279), False, 'from django.db import models\n'), ((27466, 27559), 'modelcluster.models.ParentalKey', 'ParentalKey', (['"""core.ContentModule"""'], {'on_delete': 'models.CASCADE', 'related_name': '"""tagged_items"""'}), "('core.ContentModule', on_delete=models.CASCADE, related_name=\n 'tagged_items')\n", (27477, 27559), False, 'from modelcluster.models import ClusterableModel, ParentalKey\n'), ((27655, 27687), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (27671, 27687), False, 'from django.db import models\n'), ((27702, 27717), 'wagtail.core.fields.RichTextField', 'RichTextField', ([], {}), '()\n', (27715, 27717), False, 'from wagtail.core.fields import RichTextField, StreamField\n'), ((27729, 27782), 'taggit.managers.TaggableManager', 'TaggableManager', ([], {'through': 'ContentModuleTag', 'blank': '(True)'}), '(through=ContentModuleTag, blank=True)\n', (27744, 27782), False, 'from taggit.managers import TaggableManager\n'), ((29407, 29524), 'django.db.models.ForeignKey', 'models.ForeignKey', (['PersonalisationHSCodeTag'], {'related_name': '"""hscode_tagged_case_studies"""', 'on_delete': 'models.CASCADE'}), "(PersonalisationHSCodeTag, related_name=\n 'hscode_tagged_case_studies', on_delete=models.CASCADE)\n", (29424, 29524), False, 'from django.db import models\n'), ((29555, 29655), 'modelcluster.models.ParentalKey', 'ParentalKey', ([], {'to': '"""core.CaseStudy"""', 'on_delete': 'models.CASCADE', 'related_name': '"""hs_code_tagged_items"""'}), "(to='core.CaseStudy', on_delete=models.CASCADE, related_name=\n 'hs_code_tagged_items')\n", (29566, 29655), False, 'from modelcluster.models import ClusterableModel, ParentalKey\n'), ((29703, 29822), 'django.db.models.ForeignKey', 'models.ForeignKey', (['PersonalisationCountryTag'], {'related_name': '"""country_tagged_case_studies"""', 'on_delete': 'models.CASCADE'}), "(PersonalisationCountryTag, related_name=\n 'country_tagged_case_studies', on_delete=models.CASCADE)\n", (29720, 29822), False, 'from django.db import models\n'), ((29853, 29953), 'modelcluster.models.ParentalKey', 'ParentalKey', ([], {'to': '"""core.CaseStudy"""', 'on_delete': 'models.CASCADE', 'related_name': '"""country_tagged_items"""'}), "(to='core.CaseStudy', on_delete=models.CASCADE, related_name=\n 'country_tagged_items')\n", (29864, 29953), False, 'from modelcluster.models import ClusterableModel, ParentalKey\n'), ((30000, 30117), 'django.db.models.ForeignKey', 'models.ForeignKey', (['PersonalisationRegionTag'], {'related_name': '"""region_tagged_case_studies"""', 'on_delete': 'models.CASCADE'}), "(PersonalisationRegionTag, related_name=\n 'region_tagged_case_studies', on_delete=models.CASCADE)\n", (30017, 30117), False, 'from django.db import models\n'), ((30148, 30247), 'modelcluster.models.ParentalKey', 'ParentalKey', ([], {'to': '"""core.CaseStudy"""', 'on_delete': 'models.CASCADE', 'related_name': '"""region_tagged_items"""'}), "(to='core.CaseStudy', on_delete=models.CASCADE, related_name=\n 'region_tagged_items')\n", (30159, 30247), False, 'from modelcluster.models import ClusterableModel, ParentalKey\n'), ((30299, 30427), 'django.db.models.ForeignKey', 'models.ForeignKey', (['PersonalisationTradingBlocTag'], {'related_name': '"""trading_bloc_tagged_case_studies"""', 'on_delete': 'models.CASCADE'}), "(PersonalisationTradingBlocTag, related_name=\n 'trading_bloc_tagged_case_studies', on_delete=models.CASCADE)\n", (30316, 30427), False, 'from django.db import models\n'), ((30458, 30563), 'modelcluster.models.ParentalKey', 'ParentalKey', ([], {'to': '"""core.CaseStudy"""', 'on_delete': 'models.CASCADE', 'related_name': '"""trading_bloc_tagged_items"""'}), "(to='core.CaseStudy', on_delete=models.CASCADE, related_name=\n 'trading_bloc_tagged_items')\n", (30469, 30563), False, 'from modelcluster.models import ClusterableModel, ParentalKey\n'), ((33519, 33633), 'modelcluster.models.ParentalKey', 'ParentalKey', (['"""core.CaseStudy"""'], {'related_name': '"""related_pages"""', 'on_delete': 'models.SET_NULL', 'null': '(True)', 'blank': '(True)'}), "('core.CaseStudy', related_name='related_pages', on_delete=\n models.SET_NULL, null=True, blank=True)\n", (33530, 33633), False, 'from modelcluster.models import ClusterableModel, ParentalKey\n'), ((33687, 33772), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""wagtailcore.Page"""'], {'on_delete': 'models.CASCADE', 'related_name': '"""+"""'}), "('wagtailcore.Page', on_delete=models.CASCADE,\n related_name='+')\n", (33704, 33772), False, 'from django.db import models\n'), ((34475, 34567), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(False)', 'verbose_name': '"""Internal case study title"""'}), "(max_length=255, blank=False, verbose_name=\n 'Internal case study title')\n", (34491, 34567), False, 'from django.db import models\n'), ((34645, 34715), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(False)', 'default': '"""How we did it"""'}), "(max_length=255, blank=False, default='How we did it')\n", (34661, 34715), False, 'from django.db import models\n'), ((34756, 34785), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(False)'}), '(blank=False)\n', (34772, 34785), False, 'from django.db import models\n'), ((35922, 36027), 'modelcluster.contrib.taggit.ClusterTaggableManager', 'ClusterTaggableManager', ([], {'through': '"""core.HSCodeTaggedCaseStudy"""', 'blank': '(True)', 'verbose_name': '"""HS-code tags"""'}), "(through='core.HSCodeTaggedCaseStudy', blank=True,\n verbose_name='HS-code tags')\n", (35944, 36027), False, 'from modelcluster.contrib.taggit import ClusterTaggableManager\n'), ((36049, 36155), 'modelcluster.contrib.taggit.ClusterTaggableManager', 'ClusterTaggableManager', ([], {'through': '"""core.CountryTaggedCaseStudy"""', 'blank': '(True)', 'verbose_name': '"""Country tags"""'}), "(through='core.CountryTaggedCaseStudy', blank=True,\n verbose_name='Country tags')\n", (36071, 36155), False, 'from modelcluster.contrib.taggit import ClusterTaggableManager\n'), ((36189, 36293), 'modelcluster.contrib.taggit.ClusterTaggableManager', 'ClusterTaggableManager', ([], {'through': '"""core.RegionTaggedCaseStudy"""', 'blank': '(True)', 'verbose_name': '"""Region tags"""'}), "(through='core.RegionTaggedCaseStudy', blank=True,\n verbose_name='Region tags')\n", (36211, 36293), False, 'from modelcluster.contrib.taggit import ClusterTaggableManager\n'), ((36333, 36449), 'modelcluster.contrib.taggit.ClusterTaggableManager', 'ClusterTaggableManager', ([], {'through': '"""core.TradingBlocTaggedCaseStudy"""', 'blank': '(True)', 'verbose_name': '"""Trading bloc tags"""'}), "(through='core.TradingBlocTaggedCaseStudy', blank=\n True, verbose_name='Trading bloc tags')\n", (36355, 36449), False, 'from modelcluster.contrib.taggit import ClusterTaggableManager\n'), ((36474, 36517), 'django_extensions.db.fields.CreationDateTimeField', 'CreationDateTimeField', (['"""created"""'], {'null': '(True)'}), "('created', null=True)\n", (36495, 36517), False, 'from django_extensions.db.fields import CreationDateTimeField, ModificationDateTimeField\n'), ((36533, 36581), 'django_extensions.db.fields.ModificationDateTimeField', 'ModificationDateTimeField', (['"""modified"""'], {'null': '(True)'}), "('modified', null=True)\n", (36558, 36581), False, 'from django_extensions.db.fields import CreationDateTimeField, ModificationDateTimeField\n'), ((38294, 38490), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'help_text': '"""This is the minimum score which a case study needs to have to be considered before being presented to users. """', 'default': '(10)', 'decimal_places': '(3)', 'max_digits': '(5)'}), "(help_text=\n 'This is the minimum score which a case study needs to have to be considered before being presented to users. '\n , default=10, decimal_places=3, max_digits=5)\n", (38313, 38490), False, 'from django.db import models\n'), ((38544, 38689), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'help_text': '"""Score given when user\'s lesson is tagged in the case study."""', 'default': '(8)', 'decimal_places': '(3)', 'max_digits': '(5)'}), '(help_text=\n "Score given when user\'s lesson is tagged in the case study.", default=\n 8, decimal_places=3, max_digits=5)\n', (38563, 38689), False, 'from django.db import models\n'), ((38731, 38918), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'help_text': '"""Score given when user\'s lesson\'s topic is tagged in the case study unless there is also lesson match."""', 'default': '(4)', 'decimal_places': '(3)', 'max_digits': '(5)'}), '(help_text=\n "Score given when user\'s lesson\'s topic is tagged in the case study unless there is also lesson match."\n , default=4, decimal_places=3, max_digits=5)\n', (38750, 38918), False, 'from django.db import models\n'), ((38972, 39173), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'help_text': '"""Score given when the user\'s lesson\'s module is tagged in the case study unless there is also lesson or topic match."""', 'default': '(2)', 'decimal_places': '(3)', 'max_digits': '(5)'}), '(help_text=\n "Score given when the user\'s lesson\'s module is tagged in the case study unless there is also lesson or topic match."\n , default=2, decimal_places=3, max_digits=5)\n', (38991, 39173), False, 'from django.db import models\n'), ((39232, 39417), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'help_text': '"""Score given when any case study HS6 tag matches the complete HS6 code of any of the user\'s products"""', 'default': '(8)', 'decimal_places': '(3)', 'max_digits': '(5)'}), '(help_text=\n "Score given when any case study HS6 tag matches the complete HS6 code of any of the user\'s products"\n , default=8, decimal_places=3, max_digits=5)\n', (39251, 39417), False, 'from django.db import models\n'), ((39476, 39688), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'help_text': '"""Score given when any case study HS4 tag matches the first 4 digits of any of the user\'s products unless there is an HS6 match."""', 'default': '(4)', 'decimal_places': '(3)', 'max_digits': '(5)'}), '(help_text=\n "Score given when any case study HS4 tag matches the first 4 digits of any of the user\'s products unless there is an HS6 match."\n , default=4, decimal_places=3, max_digits=5)\n', (39495, 39688), False, 'from django.db import models\n'), ((39747, 39966), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'help_text': '"""Score given when any case study HS2 tag matches the first 2 digits of any of the user\'s products unless there is an HS6 or HS4 match."""', 'default': '(2)', 'decimal_places': '(3)', 'max_digits': '(5)'}), '(help_text=\n "Score given when any case study HS2 tag matches the first 2 digits of any of the user\'s products unless there is an HS6 or HS4 match."\n , default=2, decimal_places=3, max_digits=5)\n', (39766, 39966), False, 'from django.db import models\n'), ((40027, 40206), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'help_text': '"""Score given when any case study country tag exactly matches one of the user\'s export markets."""', 'default': '(4)', 'decimal_places': '(3)', 'max_digits': '(5)'}), '(help_text=\n "Score given when any case study country tag exactly matches one of the user\'s export markets."\n , default=4, decimal_places=3, max_digits=5)\n', (40046, 40206), False, 'from django.db import models\n'), ((40257, 40480), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'help_text': '"""Score given when any case study region tag matches the region of any of the user\'s export markets unless there is an exact country match."""', 'default': '(2)', 'decimal_places': '(3)', 'max_digits': '(5)'}), '(help_text=\n "Score given when any case study region tag matches the region of any of the user\'s export markets unless there is an exact country match."\n , default=2, decimal_places=3, max_digits=5)\n', (40276, 40480), False, 'from django.db import models\n'), ((40542, 40804), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'help_text': '"""Score given when any case study trading bloc tag matches the any trading bloc that any of the user\'s export markets falls into unless there is an exact country or region match."""', 'default': '(2)', 'decimal_places': '(3)', 'max_digits': '(5)'}), '(help_text=\n "Score given when any case study trading bloc tag matches the any trading bloc that any of the user\'s export markets falls into unless there is an exact country or region match."\n , default=2, decimal_places=3, max_digits=5)\n', (40561, 40804), False, 'from django.db import models\n'), ((3642, 3655), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (3653, 3655), False, 'import hashlib\n'), ((4893, 4917), 'wagtail.admin.edit_handlers.PageChooserPanel', 'PageChooserPanel', (['"""page"""'], {}), "('page')\n", (4909, 4917), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((4927, 4946), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""title"""'], {}), "('title')\n", (4937, 4946), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((4956, 4974), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""body"""'], {}), "('body')\n", (4966, 4974), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((4984, 5009), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""button_text"""'], {}), "('button_text')\n", (4994, 5009), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((5453, 5472), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""title"""'], {}), "('title')\n", (5463, 5472), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((5482, 5500), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""body"""'], {}), "('body')\n", (5492, 5500), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((5510, 5532), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""position"""'], {}), "('position')\n", (5520, 5532), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((5542, 5564), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""selector"""'], {}), "('selector')\n", (5552, 5564), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((5689, 5707), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""name"""'], {}), "('name')\n", (5699, 5707), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((5884, 5902), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""name"""'], {}), "('name')\n", (5894, 5902), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((6262, 6280), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""name"""'], {}), "('name')\n", (6272, 6280), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((6290, 6310), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""region"""'], {}), "('region')\n", (6300, 6310), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((6779, 6797), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""name"""'], {}), "('name')\n", (6789, 6797), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((7177, 7195), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""name"""'], {}), "('name')\n", (7187, 7195), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((7197, 7222), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""icon"""'], {}), "('icon')\n", (7214, 7222), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((8620, 8642), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""template"""'], {}), "('template')\n", (8630, 8642), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((9775, 9823), 'core.context.get_context_provider', 'get_context_provider', ([], {'request': 'request', 'page': 'self'}), '(request=request, page=self)\n', (9795, 9823), False, 'from core.context import get_context_provider\n'), ((10639, 10663), 'wagtail.images.get_image_model_string', 'get_image_model_string', ([], {}), '()\n', (10661, 10663), False, 'from wagtail.images import get_image_model_string\n'), ((13697, 13721), 'wagtail.images.get_image_model_string', 'get_image_model_string', ([], {}), '()\n', (13719, 13721), False, 'from wagtail.images import get_image_model_string\n'), ((17366, 17392), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['dest'], {}), '(dest)\n', (17386, 17392), False, 'from django.http import HttpResponseRedirect\n'), ((27807, 27826), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""title"""'], {}), "('title')\n", (27817, 27826), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((27836, 27857), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""content"""'], {}), "('content')\n", (27846, 27857), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((27867, 27885), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""tags"""'], {}), "('tags')\n", (27877, 27885), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((37802, 37823), 'core.case_study_index.update_cs_index', 'update_cs_index', (['self'], {}), '(self)\n', (37817, 37823), False, 'from core.case_study_index import delete_cs_index, update_cs_index\n'), ((37865, 37889), 'core.case_study_index.delete_cs_index', 'delete_cs_index', (['self.id'], {}), '(self.id)\n', (37880, 37889), False, 'from core.case_study_index import delete_cs_index, update_cs_index\n'), ((37982, 38035), 'django.urls.reverse', 'reverse', (['"""cms_extras:case-study-view"""'], {'args': '[self.id]'}), "('cms_extras:case-study-view', args=[self.id])\n", (37989, 38035), False, 'from django.urls import reverse\n'), ((2317, 2332), 'django.utils.translation.ugettext_lazy', '_', (['"""Transcript"""'], {}), "('Transcript')\n", (2318, 2332), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2465, 2487), 'django.utils.translation.ugettext_lazy', '_', (['"""English subtitles"""'], {}), "('English subtitles')\n", (2466, 2487), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6555, 6573), 'django.utils.text.slugify', 'slugify', (['self.name'], {}), '(self.name)\n', (6562, 6573), False, 'from django.utils.text import slugify\n'), ((8667, 8685), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""slug"""'], {}), "('slug')\n", (8677, 8685), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((9019, 9068), 'wagtail.admin.edit_handlers.ObjectList', 'ObjectList', (['cls.content_panels'], {'heading': '"""Content"""'}), "(cls.content_panels, heading='Content')\n", (9029, 9068), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((9082, 9129), 'wagtail.admin.edit_handlers.ObjectList', 'ObjectList', (['cls.layout_panels'], {'heading': '"""Layout"""'}), "(cls.layout_panels, heading='Layout')\n", (9092, 9129), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((9143, 9216), 'wagtail.admin.edit_handlers.ObjectList', 'ObjectList', (['cls.settings_panels'], {'heading': '"""Settings"""', 'classname': '"""settings"""'}), "(cls.settings_panels, heading='Settings', classname='settings')\n", (9153, 9216), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((11328, 11353), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""description"""'], {}), "('description')\n", (11338, 11353), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((11363, 11389), 'wagtail.admin.edit_handlers.StreamFieldPanel', 'StreamFieldPanel', (['"""button"""'], {}), "('button')\n", (11379, 11389), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((11399, 11425), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""image"""'], {}), "('image')\n", (11416, 11425), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((11435, 11465), 'wagtail.admin.edit_handlers.StreamFieldPanel', 'StreamFieldPanel', (['"""components"""'], {}), "('components')\n", (11451, 11465), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((11475, 11499), 'wagtail.admin.edit_handlers.StreamFieldPanel', 'StreamFieldPanel', (['"""body"""'], {}), "('body')\n", (11491, 11499), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((11925, 11951), 'wagtail.admin.edit_handlers.StreamFieldPanel', 'StreamFieldPanel', (['"""button"""'], {}), "('button')\n", (11941, 11951), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((12741, 12783), 'domestic.helpers.get_lesson_completion_status', 'get_lesson_completion_status', (['request.user'], {}), '(request.user)\n', (12769, 12783), False, 'from domestic.helpers import get_lesson_completion_status\n'), ((12840, 12911), 'core.helpers.get_high_level_completion_progress', 'get_high_level_completion_progress', ([], {'completion_status': 'completion_status'}), '(completion_status=completion_status)\n', (12874, 12911), False, 'from core.helpers import get_high_level_completion_progress, get_module_completion_progress\n'), ((13214, 13248), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""record_read_progress"""'], {}), "('record_read_progress')\n", (13224, 13248), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((13304, 13329), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""description"""'], {}), "('description')\n", (13314, 13329), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((13331, 13357), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""button_label"""'], {}), "('button_label')\n", (13341, 13357), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((13899, 13920), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""heading"""'], {}), "('heading')\n", (13909, 13920), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((13930, 13956), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""image"""'], {}), "('image')\n", (13947, 13956), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((15084, 15126), 'domestic.helpers.get_lesson_completion_status', 'get_lesson_completion_status', (['request.user'], {}), '(request.user)\n', (15112, 15126), False, 'from domestic.helpers import get_lesson_completion_status\n'), ((15179, 15268), 'core.helpers.get_module_completion_progress', 'get_module_completion_progress', ([], {'completion_status': 'completion_status', 'module_page': 'self'}), '(completion_status=completion_status,\n module_page=self)\n', (15209, 15268), False, 'from core.helpers import get_high_level_completion_progress, get_module_completion_progress\n'), ((15368, 15439), 'core.helpers.get_high_level_completion_progress', 'get_high_level_completion_progress', ([], {'completion_status': 'completion_status'}), '(completion_status=completion_status)\n', (15402, 15439), False, 'from core.helpers import get_high_level_completion_progress, get_module_completion_progress\n'), ((22546, 22570), 'wagtail.admin.edit_handlers.StreamFieldPanel', 'StreamFieldPanel', (['"""hero"""'], {}), "('hero')\n", (22562, 22570), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((22580, 22609), 'wagtail.admin.edit_handlers.StreamFieldPanel', 'StreamFieldPanel', (['"""objective"""'], {}), "('objective')\n", (22596, 22609), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((22619, 22643), 'wagtail.admin.edit_handlers.StreamFieldPanel', 'StreamFieldPanel', (['"""body"""'], {}), "('body')\n", (22635, 22643), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((22653, 22678), 'wagtail.admin.edit_handlers.StreamFieldPanel', 'StreamFieldPanel', (['"""recap"""'], {}), "('recap')\n", (22669, 22678), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((24442, 24464), 'urllib.parse.unquote', 'unquote', (['backlink_path'], {}), '(backlink_path)\n', (24449, 24464), False, 'from urllib.parse import unquote\n'), ((26186, 26207), 'core.utils.PageTopicHelper', 'PageTopicHelper', (['self'], {}), '(self)\n', (26201, 26207), False, 'from core.utils import PageTopicHelper, get_first_lesson\n'), ((33412, 33458), 'django.template.loader.render_to_string', 'render_to_string', (['self.field_template', 'context'], {}), '(self.field_template, context)\n', (33428, 33458), False, 'from django.template.loader import render_to_string\n'), ((41398, 41440), 'wagtail.admin.edit_handlers.ObjectList', 'ObjectList', (['product_tab'], {'heading': '"""Product"""'}), "(product_tab, heading='Product')\n", (41408, 41440), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((41454, 41494), 'wagtail.admin.edit_handlers.ObjectList', 'ObjectList', (['market_tab'], {'heading': '"""Market"""'}), "(market_tab, heading='Market')\n", (41464, 41494), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((41508, 41548), 'wagtail.admin.edit_handlers.ObjectList', 'ObjectList', (['lesson_tab'], {'heading': '"""Lesson"""'}), "(lesson_tab, heading='Lesson')\n", (41518, 41548), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((41562, 41608), 'wagtail.admin.edit_handlers.ObjectList', 'ObjectList', (['threshold_tab'], {'heading': '"""Threshold"""'}), "(threshold_tab, heading='Threshold')\n", (41572, 41608), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((5036, 5056), 'wagtail.admin.edit_handlers.InlinePanel', 'InlinePanel', (['"""steps"""'], {}), "('steps')\n", (5047, 5056), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((9244, 9267), 'wagtail.admin.edit_handlers.TabbedInterface', 'TabbedInterface', (['panels'], {}), '(panels)\n', (9259, 9267), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((10538, 10573), 'core.blocks.ButtonBlock', 'core_blocks.ButtonBlock', ([], {'icon': '"""cog"""'}), "(icon='cog')\n", (10561, 10573), True, 'from core import blocks as core_blocks, mixins\n'), ((10797, 10823), 'core.blocks.SectionBlock', 'core_blocks.SectionBlock', ([], {}), '()\n', (10821, 10823), True, 'from core import blocks as core_blocks, mixins\n'), ((10848, 10872), 'core.blocks.TitleBlock', 'core_blocks.TitleBlock', ([], {}), '()\n', (10870, 10872), True, 'from core import blocks as core_blocks, mixins\n'), ((10896, 10962), 'wagtail.core.blocks.RichTextBlock', 'blocks.RichTextBlock', ([], {'icon': '"""openquote"""', 'helptext': '"""Add a textblock"""'}), "(icon='openquote', helptext='Add a textblock')\n", (10916, 10962), False, 'from wagtail.core import blocks\n'), ((10987, 11011), 'core.blocks.ImageBlock', 'core_blocks.ImageBlock', ([], {}), '()\n', (11009, 11011), True, 'from core import blocks as core_blocks, mixins\n'), ((11133, 11164), 'core.blocks.RouteSectionBlock', 'core_blocks.RouteSectionBlock', ([], {}), '()\n', (11162, 11164), True, 'from core import blocks as core_blocks, mixins\n'), ((11758, 11793), 'core.blocks.ButtonBlock', 'core_blocks.ButtonBlock', ([], {'icon': '"""cog"""'}), "(icon='cog')\n", (11781, 11793), True, 'from core import blocks as core_blocks, mixins\n'), ((15637, 15724), 'django.core.exceptions.ValidationError', 'ValidationError', (['"""Only one image or video allowed in Hero section"""'], {'code': '"""invalid"""'}), "('Only one image or video allowed in Hero section', code=\n 'invalid')\n", (15652, 15724), False, 'from django.core.exceptions import ValidationError\n'), ((18199, 18264), 'core.blocks.ImageBlock', 'core_blocks.ImageBlock', ([], {'template': '"""core/includes/_hero_image.html"""'}), "(template='core/includes/_hero_image.html')\n", (18221, 18264), True, 'from core import blocks as core_blocks, mixins\n'), ((18289, 18360), 'core.blocks.SimpleVideoBlock', 'core_blocks.SimpleVideoBlock', ([], {'template': '"""core/includes/_hero_video.html"""'}), "(template='core/includes/_hero_video.html')\n", (18317, 18360), True, 'from core import blocks as core_blocks, mixins\n'), ((18564, 18617), 'wagtail.core.blocks.RichTextBlock', 'blocks.RichTextBlock', ([], {'options': "{'class': 'objectives'}"}), "(options={'class': 'objectives'})\n", (18584, 18617), False, 'from wagtail.core import blocks\n'), ((18659, 18677), 'core.blocks.Item', 'core_blocks.Item', ([], {}), '()\n', (18675, 18677), True, 'from core import blocks as core_blocks, mixins\n'), ((19297, 19345), 'core.blocks.CaseStudyStaticBlock', 'core_blocks.CaseStudyStaticBlock', ([], {'icon': '"""fa-book"""'}), "(icon='fa-book')\n", (19329, 19345), True, 'from core import blocks as core_blocks, mixins\n'), ((19402, 19441), 'core.blocks.StepByStepBlock', 'core_blocks.StepByStepBlock', ([], {'icon': '"""cog"""'}), "(icon='cog')\n", (19429, 19441), True, 'from core import blocks as core_blocks, mixins\n'), ((19831, 19878), 'core.blocks.ITAQuoteBlock', 'core_blocks.ITAQuoteBlock', ([], {'icon': '"""fa-quote-left"""'}), "(icon='fa-quote-left')\n", (19856, 19878), True, 'from core import blocks as core_blocks, mixins\n'), ((21072, 21108), 'core.blocks.ChooseDoNotChooseBlock', 'core_blocks.ChooseDoNotChooseBlock', ([], {}), '()\n', (21106, 21108), True, 'from core import blocks as core_blocks, mixins\n'), ((21166, 21301), 'core.blocks.ImageBlock', 'core_blocks.ImageBlock', ([], {'template': '"""core/includes/_image_full_width.html"""', 'help_text': '"""Image displayed within a full-page-width block"""'}), "(template='core/includes/_image_full_width.html',\n help_text='Image displayed within a full-page-width block')\n", (21188, 21301), True, 'from core import blocks as core_blocks, mixins\n'), ((21428, 21575), 'core.blocks.SimpleVideoBlock', 'core_blocks.SimpleVideoBlock', ([], {'template': '"""core/includes/_video_full_width.html"""', 'help_text': '"""Video displayed within a full-page-width block"""'}), "(template=\n 'core/includes/_video_full_width.html', help_text=\n 'Video displayed within a full-page-width block')\n", (21456, 21575), True, 'from core import blocks as core_blocks, mixins\n'), ((24000, 24026), 'exportplan.core.data.SECTIONS.items', 'EXPORTPLAN_URL_MAP.items', ([], {}), '()\n', (24024, 24026), True, 'from exportplan.core.data import SECTION_SLUGS as EXPORTPLAN_SLUGS, SECTIONS as EXPORTPLAN_URL_MAP\n'), ((26963, 26992), 'core.utils.get_first_lesson', 'get_first_lesson', (['next_module'], {}), '(next_module)\n', (26979, 26992), False, 'from core.utils import PageTopicHelper, get_first_lesson\n'), ((35311, 35368), 'wagtail.core.blocks.RichTextBlock', 'blocks.RichTextBlock', ([], {'features': 'RICHTEXT_FEATURES__MINIMAL'}), '(features=RICHTEXT_FEATURES__MINIMAL)\n', (35331, 35368), False, 'from wagtail.core import blocks\n'), ((35479, 35512), 'core.blocks.CaseStudyQuoteBlock', 'core_blocks.CaseStudyQuoteBlock', ([], {}), '()\n', (35510, 35512), True, 'from core import blocks as core_blocks, mixins\n'), ((36653, 36672), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""title"""'], {}), "('title')\n", (36663, 36672), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((36690, 36714), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""lead_title"""'], {}), "('lead_title')\n", (36700, 36714), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((36732, 36761), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""summary_context"""'], {}), "('summary_context')\n", (36742, 36761), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((36779, 36803), 'wagtail.admin.edit_handlers.StreamFieldPanel', 'StreamFieldPanel', (['"""body"""'], {}), "('body')\n", (36795, 36803), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((36928, 36954), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""hs_code_tags"""'], {}), "('hs_code_tags')\n", (36938, 36954), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((36972, 37003), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""country_code_tags"""'], {}), "('country_code_tags')\n", (36982, 37003), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((37021, 37051), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""region_code_tags"""'], {}), "('region_code_tags')\n", (37031, 37051), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((37069, 37105), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""trading_bloc_code_tags"""'], {}), "('trading_bloc_code_tags')\n", (37079, 37105), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((37247, 37298), 'wagtail.admin.edit_handlers.InlinePanel', 'InlinePanel', (['"""related_pages"""'], {'label': '"""Related pages"""'}), "('related_pages', label='Related pages')\n", (37258, 37298), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((40882, 40907), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""product_hs6"""'], {}), "('product_hs6')\n", (40892, 40907), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((40909, 40934), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""product_hs4"""'], {}), "('product_hs4')\n", (40919, 40934), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((40936, 40961), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""product_hs2"""'], {}), "('product_hs2')\n", (40946, 40961), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((41009, 41036), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""country_exact"""'], {}), "('country_exact')\n", (41019, 41036), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((41038, 41066), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""country_region"""'], {}), "('country_region')\n", (41048, 41066), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((41068, 41095), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""trading_blocs"""'], {}), "('trading_blocs')\n", (41078, 41095), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((41139, 41159), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""lesson"""'], {}), "('lesson')\n", (41149, 41159), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((41161, 41180), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""topic"""'], {}), "('topic')\n", (41171, 41180), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((41182, 41202), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""module"""'], {}), "('module')\n", (41192, 41202), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((41284, 41307), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""threshold"""'], {}), "('threshold')\n", (41294, 41307), False, 'from wagtail.admin.edit_handlers import FieldPanel, InlinePanel, MultiFieldPanel, ObjectList, PageChooserPanel, StreamFieldPanel, TabbedInterface\n'), ((3277, 3330), 'django.urls.reverse', 'reverse', (['"""core:subtitles-serve"""'], {'args': "[self.id, 'en']"}), "('core:subtitles-serve', args=[self.id, 'en'])\n", (3284, 3330), False, 'from django.urls import reverse\n'), ((2843, 2878), 'mimetypes.guess_type', 'mimetypes.guess_type', (['self.filename'], {}), '(self.filename)\n', (2863, 2878), False, 'import mimetypes\n'), ((18844, 18866), 'wagtail.core.blocks.RichTextBlock', 'blocks.RichTextBlock', ([], {}), '()\n', (18864, 18866), False, 'from wagtail.core import blocks\n'), ((19111, 19135), 'core.blocks.VideoBlock', 'core_blocks.VideoBlock', ([], {}), '()\n', (19133, 19135), True, 'from core import blocks as core_blocks, mixins\n'), ((19583, 19621), 'wagtail.core.blocks.RichTextBlock', 'blocks.RichTextBlock', ([], {'icon': '"""openquote"""'}), "(icon='openquote')\n", (19603, 19621), False, 'from wagtail.core import blocks\n'), ((21828, 21862), 'wagtail.core.blocks.CharBlock', 'blocks.CharBlock', ([], {'icon': '"""fa-header"""'}), "(icon='fa-header')\n", (21844, 21862), False, 'from wagtail.core import blocks\n'), ((34997, 35074), 'core.blocks.SimpleVideoBlock', 'core_blocks.SimpleVideoBlock', ([], {'template': '"""core/includes/_case_study_video.html"""'}), "(template='core/includes/_case_study_video.html')\n", (35025, 35074), True, 'from core import blocks as core_blocks, mixins\n'), ((35111, 35135), 'core.blocks.ImageBlock', 'core_blocks.ImageBlock', ([], {}), '()\n', (35133, 35135), True, 'from core import blocks as core_blocks, mixins\n'), ((20266, 20305), 'core.blocks.Item', 'core_blocks.Item', ([], {'icon': '"""fa-arrow-right"""'}), "(icon='fa-arrow-right')\n", (20282, 20305), True, 'from core import blocks as core_blocks, mixins\n'), ((20707, 20746), 'core.blocks.Item', 'core_blocks.Item', ([], {'icon': '"""fa-arrow-right"""'}), "(icon='fa-arrow-right')\n", (20723, 20746), True, 'from core import blocks as core_blocks, mixins\n'), ((22135, 22153), 'core.blocks.Item', 'core_blocks.Item', ([], {}), '()\n', (22151, 22153), True, 'from core import blocks as core_blocks, mixins\n')] |
# -*-coding utf-8 -*-
##########################################################################
#
# Copyright (c) 2022 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##########################################################################
"""
数据变换器
"""
import numpy as np
import numbers
import collections
import random
import math
import cv2
from . import functional as F
from easymia.core.abstract_transforms import AbstractTransform
from easymia.libs import manager
@manager.TRANSFORMS.add_component
class Compose(AbstractTransform):
"""
Do transformation on input data with corresponding pre-processing and augmentation operations.
The shape of input data to all operations is [height, width, channels].
Args:
transforms (list): A list contains data pre-processing or augmentation. Empty list means only reading images, no transformation.
to_rgb (bool, optional): If converting image to RGB color space. Default: True.
Raises:
TypeError: When 'transforms' is not a list.
ValueError: when the length of 'transforms' is less than 1.
"""
def __init__(self, mode, transforms):
if not isinstance(transforms, list):
raise TypeError('The transforms must be a list!')
self.transforms = transforms
super().__init__(mode)
def __clas__(self, im):
"""
Args:
im (np.ndarray): It is either image path or image object.
Returns:
(np.array). Image after transformation.
"""
for op in self.transforms:
im = op(im)
return im
@manager.TRANSFORMS.add_component
class RandomHorizontalFlip(AbstractTransform):
"""Horizontally flip the given PIL Image randomly with a given probability.
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, mode, prob=0.5):
"""
init
"""
self.prob = prob
super().__init__(mode)
def __clas__(self, img):
"""
Args:
img (numpy ndarray): Image to be flipped.
Returns:
numpy ndarray: Randomly flipped image.
"""
if random.random() < self.prob:
return F.hflip(img)
return img
@manager.TRANSFORMS.add_component
class RandomVerticalFlip(AbstractTransform):
"""Vertically flip the given PIL Image randomly with a given probability.
Args:
p (float): probability of the image being flipped. Default value is 0.5
"""
def __init__(self, mode, prob=0.5):
"""
init
"""
self.prob = prob
super().__init__(mode)
def __clas__(self, img):
"""
Args:
img (numpy ndarray): Image to be flipped.
Returns:
numpy ndarray: Randomly flipped image.
"""
if random.random() < self.prob:
return F.vflip(img)
return img
@manager.TRANSFORMS.add_component
class RandomResizedCrop(AbstractTransform):
"""Crop the given numpy ndarray to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: cv2.INTER_CUBIC
"""
def __init__(self, mode, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=cv2.INTER_CUBIC):
"""
init
"""
self.size = (size, size)
self.interpolation = interpolation
self.scale = scale
self.ratio = ratio
super().__init__(mode)
def get_params(self, img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (numpy ndarray): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
params_ret = collections.namedtuple('params_ret', ['i', 'j', 'h', 'w'])
for attempt in range(10):
area = img.shape[0] * img.shape[1]
target_area = random.uniform(*scale) * area
aspect_ratio = random.uniform(*ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5:
w, h = h, w
if w <= img.shape[1] and h <= img.shape[0]:
i = random.randint(0, img.shape[0] - h)
j = random.randint(0, img.shape[1] - w)
return params_ret(i, j, h, w)
# Fallback
w = min(img.shape[0], img.shape[1])
i = (img.shape[0] - w) // 2
j = (img.shape[1] - w) // 2
return params_ret(i, j, w, w)
def __clas__(self, img):
"""
Args:
img (numpy ndarray): Image to be cropped and resized.
Returns:
numpy ndarray: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)
@manager.TRANSFORMS.add_component
class RandomRotation(AbstractTransform):
"""Rotate the image by angle.
Args:
degrees (sequence or float or int): Range of degrees to select from.
If degrees is a number instead of sequence like (min, max), the range of degrees
will be (-degrees, +degrees).
resample ({cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_LANCZOS4}, optional):
An optional resampling filter. See `filters`_ for more information.
If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
expand (bool, optional): Optional expansion flag.
If true, expands the output to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the input image.
Note that the expand flag assumes rotation around the center and no translation.
center (2-tuple, optional): Optional center of rotation.
Origin is the upper left corner.
Default is the center of the image.
"""
def __init__(self, mode, degrees, center=None):
"""
init
"""
if isinstance(degrees, numbers.Number):
if degrees < 0:
raise ValueError(
"If degrees is a single number, it must be positive.")
self.degrees = (-degrees, degrees)
else:
if len(degrees) != 2:
raise ValueError(
"If degrees is a sequence, it must be of len 2.")
self.degrees = degrees
self.center = center
super().__init__(mode)
@staticmethod
def get_params(degrees):
"""Get parameters for ``rotate`` for a random rotation.
Returns:
sequence: params to be passed to ``rotate`` for random rotation.
"""
angle = random.uniform(degrees[0], degrees[1])
return angle
def __clas__(self, img):
"""
img (numpy ndarray): Image to be rotated.
Returns:
numpy ndarray: Rotated image.
"""
angle = self.get_params(self.degrees)
return F.rotate(img, angle, self.center)
@manager.TRANSFORMS.add_component
class Resize(AbstractTransform):
"""Resize the input numpy ndarray to the given size.
Args:
size (sequence or int): Desired output size. If size is a sequence like
(h, w), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is
``cv2.INTER_CUBIC``, bicubic interpolation
"""
def __init__(self, mode, size, interpolation=cv2.INTER_LINEAR):
"""
resize
"""
# assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)
if isinstance(size, int):
self.size = (size, size)
elif isinstance(size, collections.abc.Iterable) and len(size) == 2:
if type(size) == list:
size = tuple(size)
self.size = size
else:
raise ValueError('Unknown inputs for size: {}'.format(size))
self.interpolation = interpolation
super().__init__(mode)
def __clas__(self, img):
"""
Args:
img (numpy ndarray): Image to be scaled.
Returns:
numpy ndarray: Rescaled image.
"""
return F.resize(img, self.size, self.interpolation) | [
"random.uniform",
"collections.namedtuple",
"math.sqrt",
"random.random",
"random.randint"
] | [((4959, 5017), 'collections.namedtuple', 'collections.namedtuple', (['"""params_ret"""', "['i', 'j', 'h', 'w']"], {}), "('params_ret', ['i', 'j', 'h', 'w'])\n", (4981, 5017), False, 'import collections\n'), ((8071, 8109), 'random.uniform', 'random.uniform', (['degrees[0]', 'degrees[1]'], {}), '(degrees[0], degrees[1])\n', (8085, 8109), False, 'import random\n'), ((2737, 2752), 'random.random', 'random.random', ([], {}), '()\n', (2750, 2752), False, 'import random\n'), ((3417, 3432), 'random.random', 'random.random', ([], {}), '()\n', (3430, 3432), False, 'import random\n'), ((5182, 5204), 'random.uniform', 'random.uniform', (['*ratio'], {}), '(*ratio)\n', (5196, 5204), False, 'import random\n'), ((5125, 5147), 'random.uniform', 'random.uniform', (['*scale'], {}), '(*scale)\n', (5139, 5147), False, 'import random\n'), ((5354, 5369), 'random.random', 'random.random', ([], {}), '()\n', (5367, 5369), False, 'import random\n'), ((5482, 5517), 'random.randint', 'random.randint', (['(0)', '(img.shape[0] - h)'], {}), '(0, img.shape[0] - h)\n', (5496, 5517), False, 'import random\n'), ((5538, 5573), 'random.randint', 'random.randint', (['(0)', '(img.shape[1] - w)'], {}), '(0, img.shape[1] - w)\n', (5552, 5573), False, 'import random\n'), ((5232, 5269), 'math.sqrt', 'math.sqrt', (['(target_area * aspect_ratio)'], {}), '(target_area * aspect_ratio)\n', (5241, 5269), False, 'import math\n'), ((5298, 5335), 'math.sqrt', 'math.sqrt', (['(target_area / aspect_ratio)'], {}), '(target_area / aspect_ratio)\n', (5307, 5335), False, 'import math\n')] |
# -*- coding: utf-8 -*-
#
# This file is part of menRva.
# Copyright (C) 2018-present NU,FSM,GHSL.
#
# menRva is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test terms views.py"""
from cd2h_repo_project.modules.terms.views import serialize_terms_for_edit_ui
def test_serialize_terms_for_edit_ui(create_record):
deposit = create_record(
{
'terms': [
{'source': 'MeSH', 'value': 'Cognitive Neuroscience'},
{'source': 'FAST', 'value': 'Border terrier'}
]
},
published=False
)
serialized_deposit = serialize_terms_for_edit_ui(deposit)
assert 'terms' not in serialized_deposit
assert serialized_deposit['mesh_terms'] == [
{
'data': {'source': 'MeSH', 'value': 'Cognitive Neuroscience'}
}
]
assert serialized_deposit['fast_terms'] == [
{
'data': {'source': 'FAST', 'value': 'Border terrier'}
}
]
def test_serialize_terms_for_edit_ui_no_terms(create_record):
deposit = create_record(published=False)
serialized_deposit = serialize_terms_for_edit_ui(deposit)
assert 'terms' not in serialized_deposit
assert serialized_deposit['mesh_terms'] == []
assert serialized_deposit['fast_terms'] == []
| [
"cd2h_repo_project.modules.terms.views.serialize_terms_for_edit_ui"
] | [((679, 715), 'cd2h_repo_project.modules.terms.views.serialize_terms_for_edit_ui', 'serialize_terms_for_edit_ui', (['deposit'], {}), '(deposit)\n', (706, 715), False, 'from cd2h_repo_project.modules.terms.views import serialize_terms_for_edit_ui\n'), ((1187, 1223), 'cd2h_repo_project.modules.terms.views.serialize_terms_for_edit_ui', 'serialize_terms_for_edit_ui', (['deposit'], {}), '(deposit)\n', (1214, 1223), False, 'from cd2h_repo_project.modules.terms.views import serialize_terms_for_edit_ui\n')] |
# -*- coding: utf-8 -*-
"""Classes (Python) to compute the Bandit UCB (Upper Confidence Bound) arm allocation and choosing the arm to pull next.
See :mod:`moe.bandit.bandit_interface` for further details on bandit.
"""
import copy
from abc import abstractmethod
from moe.bandit.bandit_interface import BanditInterface
from moe.bandit.utils import get_winning_arm_names_from_payoff_arm_name_list, get_equal_arm_allocations
class UCBInterface(BanditInterface):
r"""Implementation of the constructor of UCB (Upper Confidence Bound) and method allocate_arms. The method get_ucb_payoff is implemented in subclass.
A class to encapsulate the computation of bandit UCB.
The Algorithm: http://moodle.technion.ac.il/pluginfile.php/192340/mod_resource/content/0/UCB.pdf
To inherit this class, a subclass needs to implement get_ucb_payoff
(see :func:`moe.bandit.ucb.ucb1.UCB1.get_ucb_payoff` for an example), everything else is already implemented.
See :mod:`moe.bandit.bandit_interface` docs for further details.
"""
def __init__(
self,
historical_info,
subtype=None,
):
"""Construct a UCB object.
:param historical_info: a dictionary of arms sampled
:type historical_info: dictionary of (str, SampleArm()) pairs (see :class:`moe.bandit.data_containers.SampleArm` for more details)
:param subtype: subtype of the UCB bandit algorithm (default: None)
:type subtype: str
"""
self._historical_info = copy.deepcopy(historical_info)
self._subtype = subtype
@staticmethod
def get_unsampled_arm_names(arms_sampled):
r"""Compute the set of unsampled arm names based on the given ``arms_sampled``..
Throws an exception when arms_sampled is empty.
:param arms_sampled: a dictionary of arm name to :class:`moe.bandit.data_containers.SampleArm`
:type arms_sampled: dictionary of (str, SampleArm()) pairs
:return: set of names of the unsampled arms
:rtype: frozenset(str)
:raise: ValueError when ``arms_sampled`` are empty.
"""
if not arms_sampled:
raise ValueError('arms_sampled is empty!')
unsampled_arm_name_list = [name for name, sampled_arm in arms_sampled.iteritems() if sampled_arm.total == 0]
return frozenset(unsampled_arm_name_list)
@abstractmethod
def get_ucb_payoff(self, sampled_arm, number_sampled):
r"""Compute the expected upper confidence bound payoff using the UCB subtype formula.
See definition in subclasses for details.
:param sampled_arm: a sampled arm
:type sampled_arm: :class:`moe.bandit.data_containers.SampleArm`
:param number_sampled: the overall number of pulls so far
:type number_sampled: int
:return: ucb payoff
:rtype: float64
:raise: ValueError when ``sampled_arm`` is empty.
"""
pass
def allocate_arms(self):
r"""Compute the allocation to each arm given ``historical_info``, running bandit ``subtype`` endpoint.
Computes the allocation to each arm based on the given subtype, and, historical info.
Works with k-armed bandits (k >= 1).
The Algorithm: http://moodle.technion.ac.il/pluginfile.php/192340/mod_resource/content/0/UCB.pdf
If there is at least one unsampled arm, this method will choose to pull the unsampled arm
(randomly choose an unsampled arm if there are multiple unsampled arms).
If all arms are pulled at least once, this method will pull the optimal arm
(best expected upper confidence bound payoff).
See :func:`moe.bandit.ucb.ucb_interface.UCBInterface.get_ucb_payoff` for details on how to compute the expected upper confidence bound payoff (expected UCB payoff)
In case of a tie, the method will split the allocation among the optimal arms.
For example, if we have three arms (arm1, arm2, and arm3) with expected UCB payoff 0.5, 0.5, and 0.1 respectively.
We split the allocation between the optimal arms arm1 and arm2.
``{arm1: 0.5, arm2: 0.5, arm3: 0.0}``
:return: the dictionary of (arm, allocation) key-value pairs
:rtype: a dictionary of (str, float64) pairs
:raise: ValueError when ``sample_arms`` are empty.
"""
arms_sampled = self._historical_info.arms_sampled
if not arms_sampled:
raise ValueError('sample_arms are empty!')
return get_equal_arm_allocations(arms_sampled, self.get_winning_arm_names(arms_sampled))
def get_winning_arm_names(self, arms_sampled):
r"""Compute the set of winning arm names based on the given ``arms_sampled``..
Throws an exception when arms_sampled is empty.
:param arms_sampled: a dictionary of arm name to :class:`moe.bandit.data_containers.SampleArm`
:type arms_sampled: dictionary of (str, SampleArm()) pairs
:return: set of names of the winning arms
:rtype: frozenset(str)
:raise: ValueError when ``arms_sampled`` are empty.
"""
if not arms_sampled:
raise ValueError('arms_sampled is empty!')
# If there exists an unsampled arm, return the names of the unsampled arms
unsampled_arm_names = self.get_unsampled_arm_names(arms_sampled)
if unsampled_arm_names:
return unsampled_arm_names
number_sampled = sum([sampled_arm.total for sampled_arm in arms_sampled.itervalues()])
ucb_payoff_arm_name_list = [(self.get_ucb_payoff(sampled_arm, number_sampled), arm_name) for arm_name, sampled_arm in arms_sampled.iteritems()]
return get_winning_arm_names_from_payoff_arm_name_list(ucb_payoff_arm_name_list)
| [
"moe.bandit.utils.get_winning_arm_names_from_payoff_arm_name_list",
"copy.deepcopy"
] | [((1529, 1559), 'copy.deepcopy', 'copy.deepcopy', (['historical_info'], {}), '(historical_info)\n', (1542, 1559), False, 'import copy\n'), ((5703, 5776), 'moe.bandit.utils.get_winning_arm_names_from_payoff_arm_name_list', 'get_winning_arm_names_from_payoff_arm_name_list', (['ucb_payoff_arm_name_list'], {}), '(ucb_payoff_arm_name_list)\n', (5750, 5776), False, 'from moe.bandit.utils import get_winning_arm_names_from_payoff_arm_name_list, get_equal_arm_allocations\n')] |
import numpy as np
from yt.geometry.selection_routines import GridSelector
from yt.utilities.io_handler import BaseIOHandler
from yt.utilities.logger import ytLogger as mylog
from yt.utilities.on_demand_imports import _h5py as h5py
_convert_mass = ("particle_mass", "mass")
_particle_position_names = {}
class IOHandlerPackedHDF5(BaseIOHandler):
_dataset_type = "enzo_packed_3d"
_base = slice(None)
_field_dtype = "float64"
def _read_field_names(self, grid):
if grid.filename is None:
return []
f = h5py.File(grid.filename, mode="r")
try:
group = f["/Grid%08i" % grid.id]
except KeyError:
group = f
fields = []
dtypes = set()
add_io = "io" in grid.ds.particle_types
add_dm = "DarkMatter" in grid.ds.particle_types
for name, v in group.items():
# NOTE: This won't work with 1D datasets or references.
# For all versions of Enzo I know about, we can assume all floats
# are of the same size. So, let's grab one.
if not hasattr(v, "shape") or v.dtype == "O":
continue
elif len(v.dims) == 1:
if grid.ds.dimensionality == 1:
fields.append(("enzo", str(name)))
elif add_io:
fields.append(("io", str(name)))
elif add_dm:
fields.append(("DarkMatter", str(name)))
else:
fields.append(("enzo", str(name)))
dtypes.add(v.dtype)
if len(dtypes) == 1:
# Now, if everything we saw was the same dtype, we can go ahead and
# set it here. We do this because it is a HUGE savings for 32 bit
# floats, since our numpy copying/casting is way faster than
# h5py's, for some reason I don't understand. This does *not* need
# to be correct -- it will get fixed later -- it just needs to be
# okay for now.
self._field_dtype = list(dtypes)[0]
f.close()
return fields
@property
def _read_exception(self):
return (KeyError,)
def _read_particle_coords(self, chunks, ptf):
yield from self._read_particle_fields(chunks, ptf, None)
def _read_particle_fields(self, chunks, ptf, selector):
chunks = list(chunks)
for chunk in chunks: # These should be organized by grid filename
f = None
for g in chunk.objs:
if g.filename is None:
continue
if f is None:
# print("Opening (read) %s" % g.filename)
f = h5py.File(g.filename, mode="r")
nap = sum(g.NumberOfActiveParticles.values())
if g.NumberOfParticles == 0 and nap == 0:
continue
ds = f.get("/Grid%08i" % g.id)
for ptype, field_list in sorted(ptf.items()):
if ptype == "io":
if g.NumberOfParticles == 0:
continue
pds = ds
elif ptype == "DarkMatter":
if g.NumberOfActiveParticles[ptype] == 0:
continue
pds = ds
elif not g.NumberOfActiveParticles[ptype]:
continue
else:
for pname in ["Active Particles", "Particles"]:
pds = ds.get(f"{pname}/{ptype}")
if pds is not None:
break
else:
raise RuntimeError(
"Could not find active particle group in data."
)
pn = _particle_position_names.get(ptype, r"particle_position_%s")
x, y, z = (
np.asarray(pds.get(pn % ax)[()], dtype="=f8") for ax in "xyz"
)
if selector is None:
# This only ever happens if the call is made from
# _read_particle_coords.
yield ptype, (x, y, z)
continue
mask = selector.select_points(x, y, z, 0.0)
if mask is None:
continue
for field in field_list:
data = np.asarray(pds.get(field)[()], "=f8")
if field in _convert_mass:
data *= g.dds.prod(dtype="f8")
yield (ptype, field), data[mask]
if f:
f.close()
def io_iter(self, chunks, fields):
h5_dtype = self._field_dtype
for chunk in chunks:
fid = None
filename = -1
for obj in chunk.objs:
if obj.filename is None:
continue
if obj.filename != filename:
# Note one really important thing here: even if we do
# implement LRU caching in the _read_obj_field function,
# we'll still be doing file opening and whatnot. This is a
# problem, but one we can return to.
if fid is not None:
fid.close()
fid = h5py.h5f.open(
obj.filename.encode("latin-1"), h5py.h5f.ACC_RDONLY
)
filename = obj.filename
for field in fields:
nodal_flag = self.ds.field_info[field].nodal_flag
dims = obj.ActiveDimensions[::-1] + nodal_flag[::-1]
data = np.empty(dims, dtype=h5_dtype)
yield field, obj, self._read_obj_field(obj, field, (fid, data))
if fid is not None:
fid.close()
def _read_obj_field(self, obj, field, fid_data):
if fid_data is None:
fid_data = (None, None)
fid, data = fid_data
if fid is None:
close = True
fid = h5py.h5f.open(obj.filename.encode("latin-1"), h5py.h5f.ACC_RDONLY)
else:
close = False
if data is None:
data = np.empty(obj.ActiveDimensions[::-1], dtype=self._field_dtype)
ftype, fname = field
try:
node = "/Grid%08i/%s" % (obj.id, fname)
dg = h5py.h5d.open(fid, node.encode("latin-1"))
except KeyError:
if fname == "Dark_Matter_Density":
data[:] = 0
return data.T
raise
dg.read(h5py.h5s.ALL, h5py.h5s.ALL, data)
# I don't know why, but on some installations of h5py this works, but
# on others, nope. Doesn't seem to be a version thing.
# dg.close()
if close:
fid.close()
return data.T
class IOHandlerPackedHDF5GhostZones(IOHandlerPackedHDF5):
_dataset_type = "enzo_packed_3d_gz"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
NGZ = self.ds.parameters.get("NumberOfGhostZones", 3)
self._base = (slice(NGZ, -NGZ), slice(NGZ, -NGZ), slice(NGZ, -NGZ))
def _read_obj_field(self, *args, **kwargs):
return super()._read_obj_field(*args, **kwargs)[self._base]
class IOHandlerInMemory(BaseIOHandler):
_dataset_type = "enzo_inline"
def __init__(self, ds, ghost_zones=3):
self.ds = ds
import enzo
self.enzo = enzo
self.grids_in_memory = enzo.grid_data
self.old_grids_in_memory = enzo.old_grid_data
self.my_slice = (
slice(ghost_zones, -ghost_zones),
slice(ghost_zones, -ghost_zones),
slice(ghost_zones, -ghost_zones),
)
BaseIOHandler.__init__(self, ds)
def _read_field_names(self, grid):
fields = []
add_io = "io" in grid.ds.particle_types
for name, v in self.grids_in_memory[grid.id].items():
# NOTE: This won't work with 1D datasets or references.
if not hasattr(v, "shape") or v.dtype == "O":
continue
elif v.ndim == 1:
if grid.ds.dimensionality == 1:
fields.append(("enzo", str(name)))
elif add_io:
fields.append(("io", str(name)))
else:
fields.append(("enzo", str(name)))
return fields
def _read_fluid_selection(self, chunks, selector, fields, size):
rv = {}
# Now we have to do something unpleasant
chunks = list(chunks)
if isinstance(selector, GridSelector):
if not (len(chunks) == len(chunks[0].objs) == 1):
raise RuntimeError
g = chunks[0].objs[0]
for ftype, fname in fields:
rv[(ftype, fname)] = self.grids_in_memory[g.id][fname].swapaxes(0, 2)
return rv
if size is None:
size = sum(g.count(selector) for chunk in chunks for g in chunk.objs)
for field in fields:
ftype, fname = field
fsize = size
rv[field] = np.empty(fsize, dtype="float64")
ng = sum(len(c.objs) for c in chunks)
mylog.debug(
"Reading %s cells of %s fields in %s grids",
size,
[f2 for f1, f2 in fields],
ng,
)
ind = 0
for chunk in chunks:
for g in chunk.objs:
# We want a *hard error* here.
# if g.id not in self.grids_in_memory: continue
for field in fields:
ftype, fname = field
data_view = self.grids_in_memory[g.id][fname][
self.my_slice
].swapaxes(0, 2)
nd = g.select(selector, data_view, rv[field], ind)
ind += nd
assert ind == fsize
return rv
def _read_particle_coords(self, chunks, ptf):
chunks = list(chunks)
for chunk in chunks: # These should be organized by grid filename
for g in chunk.objs:
if g.id not in self.grids_in_memory:
continue
nap = sum(g.NumberOfActiveParticles.values())
if g.NumberOfParticles == 0 and nap == 0:
continue
for ptype in sorted(ptf):
x, y, z = (
self.grids_in_memory[g.id]["particle_position_x"],
self.grids_in_memory[g.id]["particle_position_y"],
self.grids_in_memory[g.id]["particle_position_z"],
)
yield ptype, (x, y, z)
def _read_particle_fields(self, chunks, ptf, selector):
chunks = list(chunks)
for chunk in chunks: # These should be organized by grid filename
for g in chunk.objs:
if g.id not in self.grids_in_memory:
continue
nap = sum(g.NumberOfActiveParticles.values())
if g.NumberOfParticles == 0 and nap == 0:
continue
for ptype, field_list in sorted(ptf.items()):
x, y, z = (
self.grids_in_memory[g.id]["particle_position_x"],
self.grids_in_memory[g.id]["particle_position_y"],
self.grids_in_memory[g.id]["particle_position_z"],
)
mask = selector.select_points(x, y, z, 0.0)
if mask is None:
continue
for field in field_list:
data = self.grids_in_memory[g.id][field]
if field in _convert_mass:
data = data * g.dds.prod(dtype="f8")
yield (ptype, field), data[mask]
class IOHandlerPacked2D(IOHandlerPackedHDF5):
_dataset_type = "enzo_packed_2d"
_particle_reader = False
def _read_data_set(self, grid, field):
f = h5py.File(grid.filename, mode="r")
ds = f["/Grid%08i/%s" % (grid.id, field)][:]
f.close()
return ds.transpose()[:, :, None]
def _read_fluid_selection(self, chunks, selector, fields, size):
rv = {}
# Now we have to do something unpleasant
chunks = list(chunks)
if isinstance(selector, GridSelector):
if not (len(chunks) == len(chunks[0].objs) == 1):
raise RuntimeError
g = chunks[0].objs[0]
f = h5py.File(g.filename, mode="r")
gds = f.get("/Grid%08i" % g.id)
for ftype, fname in fields:
rv[(ftype, fname)] = np.atleast_3d(gds.get(fname)[()].transpose())
f.close()
return rv
if size is None:
size = sum(g.count(selector) for chunk in chunks for g in chunk.objs)
for field in fields:
ftype, fname = field
fsize = size
rv[field] = np.empty(fsize, dtype="float64")
ng = sum(len(c.objs) for c in chunks)
mylog.debug(
"Reading %s cells of %s fields in %s grids",
size,
[f2 for f1, f2 in fields],
ng,
)
ind = 0
for chunk in chunks:
f = None
for g in chunk.objs:
if f is None:
# print("Opening (count) %s" % g.filename)
f = h5py.File(g.filename, mode="r")
gds = f.get("/Grid%08i" % g.id)
if gds is None:
gds = f
for field in fields:
ftype, fname = field
ds = np.atleast_3d(gds.get(fname)[()].transpose())
nd = g.select(selector, ds, rv[field], ind) # caches
ind += nd
f.close()
return rv
class IOHandlerPacked1D(IOHandlerPackedHDF5):
_dataset_type = "enzo_packed_1d"
_particle_reader = False
def _read_data_set(self, grid, field):
f = h5py.File(grid.filename, mode="r")
ds = f["/Grid%08i/%s" % (grid.id, field)][:]
f.close()
return ds.transpose()[:, None, None]
| [
"yt.utilities.on_demand_imports._h5py.File",
"yt.utilities.io_handler.BaseIOHandler.__init__",
"numpy.empty",
"yt.utilities.logger.ytLogger.debug"
] | [((550, 584), 'yt.utilities.on_demand_imports._h5py.File', 'h5py.File', (['grid.filename'], {'mode': '"""r"""'}), "(grid.filename, mode='r')\n", (559, 584), True, 'from yt.utilities.on_demand_imports import _h5py as h5py\n'), ((7940, 7972), 'yt.utilities.io_handler.BaseIOHandler.__init__', 'BaseIOHandler.__init__', (['self', 'ds'], {}), '(self, ds)\n', (7962, 7972), False, 'from yt.utilities.io_handler import BaseIOHandler\n'), ((9396, 9493), 'yt.utilities.logger.ytLogger.debug', 'mylog.debug', (['"""Reading %s cells of %s fields in %s grids"""', 'size', '[f2 for f1, f2 in fields]', 'ng'], {}), "('Reading %s cells of %s fields in %s grids', size, [f2 for f1,\n f2 in fields], ng)\n", (9407, 9493), True, 'from yt.utilities.logger import ytLogger as mylog\n'), ((12245, 12279), 'yt.utilities.on_demand_imports._h5py.File', 'h5py.File', (['grid.filename'], {'mode': '"""r"""'}), "(grid.filename, mode='r')\n", (12254, 12279), True, 'from yt.utilities.on_demand_imports import _h5py as h5py\n'), ((13300, 13397), 'yt.utilities.logger.ytLogger.debug', 'mylog.debug', (['"""Reading %s cells of %s fields in %s grids"""', 'size', '[f2 for f1, f2 in fields]', 'ng'], {}), "('Reading %s cells of %s fields in %s grids', size, [f2 for f1,\n f2 in fields], ng)\n", (13311, 13397), True, 'from yt.utilities.logger import ytLogger as mylog\n'), ((14269, 14303), 'yt.utilities.on_demand_imports._h5py.File', 'h5py.File', (['grid.filename'], {'mode': '"""r"""'}), "(grid.filename, mode='r')\n", (14278, 14303), True, 'from yt.utilities.on_demand_imports import _h5py as h5py\n'), ((6390, 6451), 'numpy.empty', 'np.empty', (['obj.ActiveDimensions[::-1]'], {'dtype': 'self._field_dtype'}), '(obj.ActiveDimensions[::-1], dtype=self._field_dtype)\n', (6398, 6451), True, 'import numpy as np\n'), ((9309, 9341), 'numpy.empty', 'np.empty', (['fsize'], {'dtype': '"""float64"""'}), "(fsize, dtype='float64')\n", (9317, 9341), True, 'import numpy as np\n'), ((12752, 12783), 'yt.utilities.on_demand_imports._h5py.File', 'h5py.File', (['g.filename'], {'mode': '"""r"""'}), "(g.filename, mode='r')\n", (12761, 12783), True, 'from yt.utilities.on_demand_imports import _h5py as h5py\n'), ((13213, 13245), 'numpy.empty', 'np.empty', (['fsize'], {'dtype': '"""float64"""'}), "(fsize, dtype='float64')\n", (13221, 13245), True, 'import numpy as np\n'), ((2705, 2736), 'yt.utilities.on_demand_imports._h5py.File', 'h5py.File', (['g.filename'], {'mode': '"""r"""'}), "(g.filename, mode='r')\n", (2714, 2736), True, 'from yt.utilities.on_demand_imports import _h5py as h5py\n'), ((5857, 5887), 'numpy.empty', 'np.empty', (['dims'], {'dtype': 'h5_dtype'}), '(dims, dtype=h5_dtype)\n', (5865, 5887), True, 'import numpy as np\n'), ((13669, 13700), 'yt.utilities.on_demand_imports._h5py.File', 'h5py.File', (['g.filename'], {'mode': '"""r"""'}), "(g.filename, mode='r')\n", (13678, 13700), True, 'from yt.utilities.on_demand_imports import _h5py as h5py\n')] |
#!/usr/bin/env python3
# Crypto trading bot using binance api
# Author: LeonardoM011<<EMAIL>>
# Created on 2021-02-05 21:56
# Set constants here:
DELTA_TIME = 300 # How long can we check for setting up new trade (in seconds)
# ----------------------
# Imports:
import os
import sys
import time as t
import datetime
# Adding python-binance to path and importing python-binance
sys.path.insert(1, "../deps/binance")
from binance.client import Client
from fun import *
import candles as can
# Globals:
client = None
# Main program loop
def start():
hour_repeated = -1
try:
while True:
time = datetime.datetime.now()
hour = time.hour
minute = time.minute
open_trade = client.futures_get_open_orders()
if minute < 10:
if not open_trade and hour_repeated != hour:
candles = client.futures_klines(symbol="BTCUSDT", interval=Client.KLINE_INTERVAL_1HOUR, contractType="PERPETUAL")
info = can.get_candle_info(candles[:-1])
candle_side = can.get_side(info)
if candle_side:
output.print_info('Initiating trade...')
#current_price = client.futures_mark_price(symbol="BTCUSDT", contractType="PERPETUAL")['markPrice']
close_price = candles
client.futures_create_order(symbol="BTCUSDT", side=candle_side, type=Client.ORDER_TYPE_MARKET, quantity=0.001)
client.futures_create_order(symbol="BTCUSDT", side=can.flip_side(candle_side), type=Client.ORDER_TYPE_STOP_LOSS_LIMIT, quantity=0.001, price=57975.0, stopPrice=57976.0, workingType="MARK_PRICE")
hour_repeated = hour
t.sleep(300)
except KeyboardInterrupt:
print('Program canceled...')
def connect():
while True:
api_key = get_api_key("BINANCE_API_KEY")
api_secret = get_api_key("BINANCE_API_SECRET_KEY")
output.print_info('Connecting to binance...')
global client
client = Client(api_key, api_secret)
if check_connectivity(client):
output.print_ok('Successfully connected to binance.')
if check_account_status(client):
output.print_ok('Successfully connected using api keys.')
return
output.print_failed('Cannot connect to binance with api keys.')
def main():
output.print_ok('Starting kobe trading bot...')
connect()
start()
#try:
# client.get_all_orders()
#except BinanceAPIException as e:
# print e.status_code
# print e.message
# datetime.datetime.now().year
#btcusdt_price = requests.get("https://api.binance.com/api/v3/ticker/price?symbol=BTCUSDT")
#if (btcusdt_price.status_code != 200):
# print("Error connecting to api server to get price")
# return
#print("Successfully connected and got price")
#while(True):
# btcusdt_price = requests.get("https://api.binance.com/api/v3/ticker/price?symbol=BTCUSDT")
# print("BTC/USDT: {}".format(btcusdt_price.json()['price']))
# time.sleep(1.0)
#btcusdtindex = find_index_of('symbol', 'BTCUSDT', client.get_all_tickers())
#while (True):
# print(client.get_all_tickers()[btcusdtindex])
# time.sleep(5.0)
# client.futures_create_order(symbol="BTCUSDT", side="SELL", type="STOP", quantity=0.001, price=57975.0, stopPrice=57976.0, workingType="MARK_PRICE")
# client.futures_create_order(symbol="BTCUSDT", side="BUY", type="MARKET", quantity=0.001)
if __name__ == "__main__":
main() | [
"binance.client.Client",
"sys.path.insert",
"time.sleep",
"datetime.datetime.now",
"candles.flip_side",
"candles.get_candle_info",
"candles.get_side"
] | [((402, 439), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../deps/binance"""'], {}), "(1, '../deps/binance')\n", (417, 439), False, 'import sys\n'), ((2173, 2200), 'binance.client.Client', 'Client', (['api_key', 'api_secret'], {}), '(api_key, api_secret)\n', (2179, 2200), False, 'from binance.client import Client\n'), ((660, 683), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (681, 683), False, 'import datetime\n'), ((1839, 1851), 'time.sleep', 't.sleep', (['(300)'], {}), '(300)\n', (1846, 1851), True, 'import time as t\n'), ((1061, 1094), 'candles.get_candle_info', 'can.get_candle_info', (['candles[:-1]'], {}), '(candles[:-1])\n', (1080, 1094), True, 'import candles as can\n'), ((1130, 1148), 'candles.get_side', 'can.get_side', (['info'], {}), '(info)\n', (1142, 1148), True, 'import candles as can\n'), ((1636, 1662), 'candles.flip_side', 'can.flip_side', (['candle_side'], {}), '(candle_side)\n', (1649, 1662), True, 'import candles as can\n')] |
import matplotlib.pyplot as plt
import random
import pickle
from skimage.transform import rotate
from scipy import ndimage
from skimage.util import img_as_ubyte
from joblib import Parallel, delayed
from sklearn.ensemble.forest import _generate_unsampled_indices
from sklearn.ensemble.forest import _generate_sample_indices
import numpy as np
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from itertools import product
import keras
from keras import layers
from joblib import Parallel, delayed
from multiprocessing import Pool
import tensorflow as tf
from numba import cuda
import sys
sys.path.append("../../proglearn/")
from progressive_learner import ProgressiveLearner
from deciders import SimpleArgmaxAverage
from transformers import TreeClassificationTransformer, NeuralClassificationTransformer
from voters import TreeClassificationVoter, KNNClassificationVoter
def cross_val_data(data_x, data_y, total_cls=10):
x = data_x.copy()
y = data_y.copy()
idx = [np.where(data_y == u)[0] for u in np.unique(data_y)]
for i in range(total_cls):
indx = idx[i]#np.roll(idx[i],(cv-1)*100)
random.shuffle(indx)
if i==0:
train_x1 = x[indx[0:250],:]
train_x2 = x[indx[250:500],:]
train_y1 = y[indx[0:250]]
train_y2 = y[indx[250:500]]
test_x = x[indx[500:600],:]
test_y = y[indx[500:600]]
else:
train_x1 = np.concatenate((train_x1, x[indx[0:250],:]), axis=0)
train_x2 = np.concatenate((train_x2, x[indx[250:500],:]), axis=0)
train_y1 = np.concatenate((train_y1, y[indx[0:250]]), axis=0)
train_y2 = np.concatenate((train_y2, y[indx[250:500]]), axis=0)
test_x = np.concatenate((test_x, x[indx[500:600],:]), axis=0)
test_y = np.concatenate((test_y, y[indx[500:600]]), axis=0)
return train_x1, train_y1, train_x2, train_y2, test_x, test_y
def LF_experiment(data_x, data_y, angle, model, granularity, reps=1, ntrees=29, acorn=None):
if acorn is not None:
np.random.seed(acorn)
errors = np.zeros(2)
for rep in range(reps):
print("Starting Rep {} of Angle {}".format(rep, angle))
train_x1, train_y1, train_x2, train_y2, test_x, test_y = cross_val_data(data_x, data_y, total_cls=10)
#change data angle for second task
tmp_data = train_x2.copy()
_tmp_ = np.zeros((32,32,3), dtype=int)
total_data = tmp_data.shape[0]
for i in range(total_data):
tmp_ = image_aug(tmp_data[i],angle)
tmp_data[i] = tmp_
if model == "uf":
train_x1 = train_x1.reshape((train_x1.shape[0], train_x1.shape[1] * train_x1.shape[2] * train_x1.shape[3]))
tmp_data = tmp_data.reshape((tmp_data.shape[0], tmp_data.shape[1] * tmp_data.shape[2] * tmp_data.shape[3]))
test_x = test_x.reshape((test_x.shape[0], test_x.shape[1] * test_x.shape[2] * test_x.shape[3]))
with tf.device('/gpu:'+str(int(angle // granularity) % 4)):
default_transformer_class = NeuralClassificationTransformer
network = keras.Sequential()
network.add(layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu', input_shape=np.shape(train_x1)[1:]))
network.add(layers.BatchNormalization())
network.add(layers.Conv2D(filters=32, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu'))
network.add(layers.BatchNormalization())
network.add(layers.Conv2D(filters=64, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu'))
network.add(layers.BatchNormalization())
network.add(layers.Conv2D(filters=128, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu'))
network.add(layers.BatchNormalization())
network.add(layers.Conv2D(filters=254, kernel_size=(3, 3), strides = 2, padding = "same", activation='relu'))
network.add(layers.Flatten())
network.add(layers.BatchNormalization())
network.add(layers.Dense(2000, activation='relu'))
network.add(layers.BatchNormalization())
network.add(layers.Dense(2000, activation='relu'))
network.add(layers.BatchNormalization())
network.add(layers.Dense(units=10, activation = 'softmax'))
default_transformer_kwargs = {"network" : network,
"euclidean_layer_idx" : -2,
"num_classes" : 10,
"optimizer" : keras.optimizers.Adam(3e-4)
}
default_voter_class = KNNClassificationVoter
default_voter_kwargs = {"k" : int(np.log2(len(train_x1)))}
default_decider_class = SimpleArgmaxAverage
progressive_learner = ProgressiveLearner(default_transformer_class = default_transformer_class,
default_transformer_kwargs = default_transformer_kwargs,
default_voter_class = default_voter_class,
default_voter_kwargs = default_voter_kwargs,
default_decider_class = default_decider_class)
progressive_learner.add_task(
X = train_x1,
y = train_y1,
transformer_voter_decider_split = [0.67, 0.33, 0],
decider_kwargs = {"classes" : np.unique(train_y1)}
)
progressive_learner.add_transformer(
X = tmp_data,
y = train_y2,
transformer_data_proportion = 1,
backward_task_ids = [0]
)
llf_task1=progressive_learner.predict(test_x, task_id=0)
llf_single_task=progressive_learner.predict(test_x, task_id=0, transformer_ids=[0])
errors[1] = errors[1]+(1 - np.mean(llf_task1 == test_y))
errors[0] = errors[0]+(1 - np.mean(llf_single_task == test_y))
errors = errors/reps
print("Errors For Angle {}: {}".format(angle, errors))
with open('rotation_results/angle_'+str(angle)+'_'+model+'.pickle', 'wb') as f:
pickle.dump(errors, f, protocol = 2)
def image_aug(pic, angle, centroid_x=23, centroid_y=23, win=16, scale=1.45):
im_sz = int(np.floor(pic.shape[0]*scale))
pic_ = np.uint8(np.zeros((im_sz,im_sz,3),dtype=int))
pic_[:,:,0] = ndimage.zoom(pic[:,:,0],scale)
pic_[:,:,1] = ndimage.zoom(pic[:,:,1],scale)
pic_[:,:,2] = ndimage.zoom(pic[:,:,2],scale)
image_aug = rotate(pic_, angle, resize=False)
#print(image_aug.shape)
image_aug_ = image_aug[centroid_x-win:centroid_x+win,centroid_y-win:centroid_y+win,:]
return img_as_ubyte(image_aug_)
### MAIN HYPERPARAMS ###
model = "dnn"
granularity = 2
reps = 4
########################
(X_train, y_train), (X_test, y_test) = keras.datasets.cifar100.load_data()
data_x = np.concatenate([X_train, X_test])
data_y = np.concatenate([y_train, y_test])
data_y = data_y[:, 0]
def perform_angle(angle):
LF_experiment(data_x, data_y, angle, model, granularity, reps=reps, ntrees=16, acorn=1)
if model == "dnn":
for angle_adder in range(30, 180, granularity * 4):
angles = angle_adder + np.arange(0, granularity * 4, granularity)
with Pool(4) as p:
p.map(perform_angle, angles)
elif model == "uf":
angles = np.arange(30,180,2)
Parallel(n_jobs=-1)(delayed(LF_experiment)(data_x, data_y, angle, model, granularity, reps=20, ntrees=16, acorn=1) for angle in angles)
| [
"keras.layers.Conv2D",
"keras.layers.Dense",
"sys.path.append",
"scipy.ndimage.zoom",
"numpy.arange",
"keras.Sequential",
"numpy.mean",
"skimage.transform.rotate",
"numpy.where",
"keras.datasets.cifar100.load_data",
"skimage.util.img_as_ubyte",
"numpy.random.seed",
"numpy.concatenate",
"keras.optimizers.Adam",
"random.shuffle",
"keras.layers.Flatten",
"numpy.floor",
"progressive_learner.ProgressiveLearner",
"keras.layers.BatchNormalization",
"numpy.shape",
"pickle.dump",
"numpy.unique",
"joblib.Parallel",
"numpy.zeros",
"multiprocessing.Pool",
"joblib.delayed"
] | [((638, 673), 'sys.path.append', 'sys.path.append', (['"""../../proglearn/"""'], {}), "('../../proglearn/')\n", (653, 673), False, 'import sys\n'), ((7056, 7091), 'keras.datasets.cifar100.load_data', 'keras.datasets.cifar100.load_data', ([], {}), '()\n', (7089, 7091), False, 'import keras\n'), ((7101, 7134), 'numpy.concatenate', 'np.concatenate', (['[X_train, X_test]'], {}), '([X_train, X_test])\n', (7115, 7134), True, 'import numpy as np\n'), ((7144, 7177), 'numpy.concatenate', 'np.concatenate', (['[y_train, y_test]'], {}), '([y_train, y_test])\n', (7158, 7177), True, 'import numpy as np\n'), ((2145, 2156), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2153, 2156), True, 'import numpy as np\n'), ((6589, 6622), 'scipy.ndimage.zoom', 'ndimage.zoom', (['pic[:, :, 0]', 'scale'], {}), '(pic[:, :, 0], scale)\n', (6601, 6622), False, 'from scipy import ndimage\n'), ((6639, 6672), 'scipy.ndimage.zoom', 'ndimage.zoom', (['pic[:, :, 1]', 'scale'], {}), '(pic[:, :, 1], scale)\n', (6651, 6672), False, 'from scipy import ndimage\n'), ((6688, 6721), 'scipy.ndimage.zoom', 'ndimage.zoom', (['pic[:, :, 2]', 'scale'], {}), '(pic[:, :, 2], scale)\n', (6700, 6721), False, 'from scipy import ndimage\n'), ((6736, 6769), 'skimage.transform.rotate', 'rotate', (['pic_', 'angle'], {'resize': '(False)'}), '(pic_, angle, resize=False)\n', (6742, 6769), False, 'from skimage.transform import rotate\n'), ((6900, 6924), 'skimage.util.img_as_ubyte', 'img_as_ubyte', (['image_aug_'], {}), '(image_aug_)\n', (6912, 6924), False, 'from skimage.util import img_as_ubyte\n'), ((1170, 1190), 'random.shuffle', 'random.shuffle', (['indx'], {}), '(indx)\n', (1184, 1190), False, 'import random\n'), ((2109, 2130), 'numpy.random.seed', 'np.random.seed', (['acorn'], {}), '(acorn)\n', (2123, 2130), True, 'import numpy as np\n'), ((2456, 2488), 'numpy.zeros', 'np.zeros', (['(32, 32, 3)'], {'dtype': 'int'}), '((32, 32, 3), dtype=int)\n', (2464, 2488), True, 'import numpy as np\n'), ((6352, 6386), 'pickle.dump', 'pickle.dump', (['errors', 'f'], {'protocol': '(2)'}), '(errors, f, protocol=2)\n', (6363, 6386), False, 'import pickle\n'), ((6483, 6513), 'numpy.floor', 'np.floor', (['(pic.shape[0] * scale)'], {}), '(pic.shape[0] * scale)\n', (6491, 6513), True, 'import numpy as np\n'), ((6533, 6571), 'numpy.zeros', 'np.zeros', (['(im_sz, im_sz, 3)'], {'dtype': 'int'}), '((im_sz, im_sz, 3), dtype=int)\n', (6541, 6571), True, 'import numpy as np\n'), ((7570, 7591), 'numpy.arange', 'np.arange', (['(30)', '(180)', '(2)'], {}), '(30, 180, 2)\n', (7579, 7591), True, 'import numpy as np\n'), ((1027, 1048), 'numpy.where', 'np.where', (['(data_y == u)'], {}), '(data_y == u)\n', (1035, 1048), True, 'import numpy as np\n'), ((1061, 1078), 'numpy.unique', 'np.unique', (['data_y'], {}), '(data_y)\n', (1070, 1078), True, 'import numpy as np\n'), ((1485, 1538), 'numpy.concatenate', 'np.concatenate', (['(train_x1, x[indx[0:250], :])'], {'axis': '(0)'}), '((train_x1, x[indx[0:250], :]), axis=0)\n', (1499, 1538), True, 'import numpy as np\n'), ((1561, 1616), 'numpy.concatenate', 'np.concatenate', (['(train_x2, x[indx[250:500], :])'], {'axis': '(0)'}), '((train_x2, x[indx[250:500], :]), axis=0)\n', (1575, 1616), True, 'import numpy as np\n'), ((1639, 1689), 'numpy.concatenate', 'np.concatenate', (['(train_y1, y[indx[0:250]])'], {'axis': '(0)'}), '((train_y1, y[indx[0:250]]), axis=0)\n', (1653, 1689), True, 'import numpy as np\n'), ((1713, 1765), 'numpy.concatenate', 'np.concatenate', (['(train_y2, y[indx[250:500]])'], {'axis': '(0)'}), '((train_y2, y[indx[250:500]]), axis=0)\n', (1727, 1765), True, 'import numpy as np\n'), ((1788, 1841), 'numpy.concatenate', 'np.concatenate', (['(test_x, x[indx[500:600], :])'], {'axis': '(0)'}), '((test_x, x[indx[500:600], :]), axis=0)\n', (1802, 1841), True, 'import numpy as np\n'), ((1862, 1912), 'numpy.concatenate', 'np.concatenate', (['(test_y, y[indx[500:600]])'], {'axis': '(0)'}), '((test_y, y[indx[500:600]]), axis=0)\n', (1876, 1912), True, 'import numpy as np\n'), ((3182, 3200), 'keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (3198, 3200), False, 'import keras\n'), ((4969, 5237), 'progressive_learner.ProgressiveLearner', 'ProgressiveLearner', ([], {'default_transformer_class': 'default_transformer_class', 'default_transformer_kwargs': 'default_transformer_kwargs', 'default_voter_class': 'default_voter_class', 'default_voter_kwargs': 'default_voter_kwargs', 'default_decider_class': 'default_decider_class'}), '(default_transformer_class=default_transformer_class,\n default_transformer_kwargs=default_transformer_kwargs,\n default_voter_class=default_voter_class, default_voter_kwargs=\n default_voter_kwargs, default_decider_class=default_decider_class)\n', (4987, 5237), False, 'from progressive_learner import ProgressiveLearner\n'), ((7426, 7468), 'numpy.arange', 'np.arange', (['(0)', '(granularity * 4)', 'granularity'], {}), '(0, granularity * 4, granularity)\n', (7435, 7468), True, 'import numpy as np\n'), ((7482, 7489), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (7486, 7489), False, 'from multiprocessing import Pool\n'), ((7594, 7613), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (7602, 7613), False, 'from joblib import Parallel, delayed\n'), ((3351, 3378), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3376, 3378), False, 'from keras import layers\n'), ((3404, 3499), 'keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3, 3)', 'strides': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=32, kernel_size=(3, 3), strides=2, padding='same',\n activation='relu')\n", (3417, 3499), False, 'from keras import layers\n'), ((3525, 3552), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3550, 3552), False, 'from keras import layers\n'), ((3578, 3673), 'keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'strides': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), strides=2, padding='same',\n activation='relu')\n", (3591, 3673), False, 'from keras import layers\n'), ((3699, 3726), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3724, 3726), False, 'from keras import layers\n'), ((3752, 3848), 'keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3, 3)', 'strides': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=128, kernel_size=(3, 3), strides=2, padding='same',\n activation='relu')\n", (3765, 3848), False, 'from keras import layers\n'), ((3874, 3901), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (3899, 3901), False, 'from keras import layers\n'), ((3927, 4023), 'keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(254)', 'kernel_size': '(3, 3)', 'strides': '(2)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters=254, kernel_size=(3, 3), strides=2, padding='same',\n activation='relu')\n", (3940, 4023), False, 'from keras import layers\n'), ((4050, 4066), 'keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (4064, 4066), False, 'from keras import layers\n'), ((4092, 4119), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (4117, 4119), False, 'from keras import layers\n'), ((4145, 4182), 'keras.layers.Dense', 'layers.Dense', (['(2000)'], {'activation': '"""relu"""'}), "(2000, activation='relu')\n", (4157, 4182), False, 'from keras import layers\n'), ((4208, 4235), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (4233, 4235), False, 'from keras import layers\n'), ((4261, 4298), 'keras.layers.Dense', 'layers.Dense', (['(2000)'], {'activation': '"""relu"""'}), "(2000, activation='relu')\n", (4273, 4298), False, 'from keras import layers\n'), ((4324, 4351), 'keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), '()\n', (4349, 4351), False, 'from keras import layers\n'), ((4377, 4421), 'keras.layers.Dense', 'layers.Dense', ([], {'units': '(10)', 'activation': '"""softmax"""'}), "(units=10, activation='softmax')\n", (4389, 4421), False, 'from keras import layers\n'), ((4677, 4706), 'keras.optimizers.Adam', 'keras.optimizers.Adam', (['(0.0003)'], {}), '(0.0003)\n', (4698, 4706), False, 'import keras\n'), ((6070, 6098), 'numpy.mean', 'np.mean', (['(llf_task1 == test_y)'], {}), '(llf_task1 == test_y)\n', (6077, 6098), True, 'import numpy as np\n'), ((6139, 6173), 'numpy.mean', 'np.mean', (['(llf_single_task == test_y)'], {}), '(llf_single_task == test_y)\n', (6146, 6173), True, 'import numpy as np\n'), ((7614, 7636), 'joblib.delayed', 'delayed', (['LF_experiment'], {}), '(LF_experiment)\n', (7621, 7636), False, 'from joblib import Parallel, delayed\n'), ((5615, 5634), 'numpy.unique', 'np.unique', (['train_y1'], {}), '(train_y1)\n', (5624, 5634), True, 'import numpy as np\n'), ((3302, 3320), 'numpy.shape', 'np.shape', (['train_x1'], {}), '(train_x1)\n', (3310, 3320), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin, clone
from sklearn_pandas.util import validate_dataframe
class MonitorMixin(object):
def print_message(self, message):
if self.logfile:
with open(self.logfile, "a") as fout:
fout.write(message)
else:
print(message)
class ValidateTypes(BaseEstimator, TransformerMixin, MonitorMixin):
def __init__(self, logfile=None, to_screen=True):
self.logfile = logfile
self.to_screen = to_screen
def fit(self, X, y=None, **fitparams):
X = validate_dataframe(X)
self.types = {}
for col in X.columns:
self.types[col] = X[col].dtype.name
return self
def transform(self, X, **transformparams):
X = validate_dataframe(X)
new_col_list = []
for col in X.columns:
var_type = X[col].dtype.name
if var_type != self.types[col]:
self.print_message(
'Data Type Mismatch for column {col}: Expected {expected} Received {received}'.format(
col=col, expected=self.types[col], received=var_type)
)
return X
class ValidateRange(BaseEstimator, TransformerMixin, MonitorMixin):
def __init__(self, logfile=None, to_screen=True, max_nunique=20):
self.logfile = logfile
self.to_screen = to_screen
self.max_nunique = max_nunique
def fit(self, X, y=None, **fitparams):
X = validate_dataframe(X)
self.types = {}
self.unique_vals = {}
self.minmax = {}
for col in X.columns:
self.types[col] = X[col].dtype.name
if self.types[col] in ('object', 'bool', 'category'):
unique_values = X[col].unique()
if len(unique_values) <= self.max_nunique:
self.unique_vals[col] = unique_values
else:
self.unique_vals[col] = None
elif self.types[col] in ('int64', 'float64', 'datetime64', 'timedelta'):
self.minmax[col] = (X[col].min(), X[col].max())
return self
def transform(self, X, **transformparams):
X = validate_dataframe(X)
new_col_list = []
for col in X.columns:
var_type = X[col].dtype.name
if self.types[col] in ('object', 'bool', 'category'):
if self.unique_vals[col] is not None:
not_in_list = ~X[col].isin(self.unique_vals[col])
if sum(not_in_list) > 0:
new_values = str(X[col][not_in_list].unique().tolist())
self.print_message(
'New Categories specified for column {col}: Received {received}'.format(
col=col, received=new_values)
)
elif self.types[col] in ('int64', 'float64', 'datetime64', 'timedelta'):
minX = X[col].min()
maxX = X[col].max()
if minX < self.minmax[col][0]:
self.print_message(
'Low Value warning for column {col}: Lowest Training value {lowtrain}, Lowest Scoring value {lowscore}'.format(
col=col, lowtrain=self.minmax[col][0], lowscore=minX)
)
if maxX > self.minmax[col][1]:
self.print_message(
'High Value warning for column {col}: Largest Training value {hightrain}, Largest Scoring value {highscore}'.format(
col=col, hightrain=self.minmax[col][1], highscore=maxX)
)
return X
| [
"sklearn_pandas.util.validate_dataframe"
] | [((622, 643), 'sklearn_pandas.util.validate_dataframe', 'validate_dataframe', (['X'], {}), '(X)\n', (640, 643), False, 'from sklearn_pandas.util import validate_dataframe\n'), ((826, 847), 'sklearn_pandas.util.validate_dataframe', 'validate_dataframe', (['X'], {}), '(X)\n', (844, 847), False, 'from sklearn_pandas.util import validate_dataframe\n'), ((1548, 1569), 'sklearn_pandas.util.validate_dataframe', 'validate_dataframe', (['X'], {}), '(X)\n', (1566, 1569), False, 'from sklearn_pandas.util import validate_dataframe\n'), ((2259, 2280), 'sklearn_pandas.util.validate_dataframe', 'validate_dataframe', (['X'], {}), '(X)\n', (2277, 2280), False, 'from sklearn_pandas.util import validate_dataframe\n')] |
import pytest
from CommonServerPython import *
from ProofpointThreatResponse import create_incident_field_context, get_emails_context, pass_sources_list_filter, \
pass_abuse_disposition_filter, filter_incidents, prepare_ingest_alert_request_body, \
get_incidents_batch_by_time_request, get_new_incidents, get_time_delta
MOCK_INCIDENT = {
"id": 1,
"type": "Malware",
"summary": "Unsolicited Bulk Email",
"description": "EvilScheme test message",
"score": 4200,
"state": "Open",
"created_at": "2018-05-26T21:07:17Z",
"event_count": 3,
"event_sources": [
"Proofpoint TAP"
],
"users": [
""
],
"assignee": "Unassigned",
"team": "Unassigned",
"hosts": {
"attacker": [
""
],
"forensics": [
"",
]
},
"incident_field_values": [
{
"name": "Attack Vector",
"value": "Email"
},
{
"name": "Classification",
"value": "Spam"
},
{
"name": "Severity",
"value": "Critical"
},
{
"name": "Abuse Disposition",
"value": "Unknown"
}
],
"events": [
{
"id": 3,
"category": "malware",
"severity": "Info",
"source": "Proofpoint TAP",
"threatname": "",
"state": "Linked",
"description": "",
"attackDirection": "inbound",
"received": "2018-05-26T21:07:17Z",
"malwareName": "",
"emails": [
{
"sender": {
"email": "test"
},
"recipient": {
"email": "test"
},
"subject": "test",
"messageId": "test",
"messageDeliveryTime": {
"chronology": {
"zone": {
"id": "UTC"
}
},
"millis": 1544640072000,
},
"abuseCopy": "false",
"body": "test",
'bodyType': "test",
'headers': "test",
'urls': "test"
}
],
}
],
"quarantine_results": [],
"successful_quarantines": 0,
"failed_quarantines": 0,
"pending_quarantines": 0
}
INCIDENT_FIELD_CONTEXT = {
"Attack_Vector": "Email",
"Classification": "Spam",
"Severity": "Critical",
"Abuse_Disposition": "Unknown"
}
INCIDENT_FIELD_INPUT = [
(MOCK_INCIDENT, INCIDENT_FIELD_CONTEXT)
]
def get_fetch_data():
with open('./test_data/raw_response.json', 'r') as f:
file = json.loads(f.read())
return file.get('result')
FETCH_RESPONSE = get_fetch_data()
@pytest.mark.parametrize('incident, answer', INCIDENT_FIELD_INPUT)
def test_get_incident_field_context(incident, answer):
incident_field_values = create_incident_field_context(incident)
assert incident_field_values == answer
EMAIL_RESULT = [
{
'sender': "test",
'recipient': "test",
'subject': "test",
'message_id': "test",
'message_delivery_time': 1544640072000,
'body': "test",
'body_type': "test",
'headers': "test",
'urls': "test"
}
]
EMAILS_CONTEXT_INPUT = [
(MOCK_INCIDENT['events'][0], EMAIL_RESULT)
]
@pytest.mark.parametrize('event, answer', EMAILS_CONTEXT_INPUT)
def test_get_emails_context(event, answer):
emails_context = get_emails_context(event)
assert emails_context == answer
SOURCE_LIST_INPUT = [
(["Proofpoint TAP"], True),
([], True),
(["No such source"], False),
(["No such source", "Proofpoint TAP"], True)
]
@pytest.mark.parametrize('sources_list, expected_answer', SOURCE_LIST_INPUT)
def test_pass_sources_list_filter(sources_list, expected_answer):
result = pass_sources_list_filter(MOCK_INCIDENT, sources_list)
assert result == expected_answer
ABUSE_DISPOSITION_INPUT = [
(["Unknown"], True),
([], True),
(["No such value"], False),
(["No such value", "Unknown"], True)
]
@pytest.mark.parametrize('abuse_dispotion_values, expected_answer', ABUSE_DISPOSITION_INPUT)
def test_pass_abuse_disposition_filter(abuse_dispotion_values, expected_answer):
result = pass_abuse_disposition_filter(MOCK_INCIDENT, abuse_dispotion_values)
assert result == expected_answer
DEMISTO_PARAMS = [({'event_sources': "No such source, Proofpoint TAP", 'abuse_disposition': "No such value, Unknown"},
[MOCK_INCIDENT]), ({'event_sources': "", 'abuse_disposition': ""}, [MOCK_INCIDENT]),
({'event_sources': "No such source", 'abuse_disposition': "No such value, Unknown"}, []),
({'event_sources': "No such source, Proofpoint TAP", 'abuse_disposition': "No such value"}, []),
({'event_sources': "No such source", 'abuse_disposition': "No such value"}, [])]
@pytest.mark.parametrize('demisto_params, expected_answer', DEMISTO_PARAMS)
def test_filter_incidents(mocker, demisto_params, expected_answer):
mocker.patch.object(demisto, 'params', return_value=demisto_params)
filtered_incidents = filter_incidents([MOCK_INCIDENT])
assert filtered_incidents == expected_answer
INGEST_ALERT_ARGS = {
"attacker": "{\"attacker\":{\"key\":\"value\"}}",
"cnc_host": "{\"cnc_host\":{\"key\":\"value\"}}",
"detector": "{\"detector\":{\"key\":\"value\"}}",
"email": "{\"email\":{\"key\":\"value\"}}",
"forensics_hosts": "{\"forensics_hosts\":{\"key\":\"value\"}}",
"target": "{\"target\":{\"key\":\"value\"}}",
"threat_info": "{\"threat_info\":{\"key\":\"value\"}}",
"custom_fields": "{\"custom_fields\":{\"key\":\"value\"}}",
"post_url_id": "value",
"json_version": "value",
"summary": "value"
}
EXPECTED_RESULT = {
"attacker": {"key": "value"},
"cnc_host": {"key": "value"},
"detector": {"key": "value"},
"email": {"key": "value"},
"forensics_hosts": {"key": "value"},
"target": {"key": "value"},
"threat_info": {"key": "value"},
"custom_fields": {"key": "value"},
"post_url_id": "value",
"json_version": "value",
"summary": "value"
}
def test_prepare_ingest_alert_request_body():
prepared_body = prepare_ingest_alert_request_body(INGEST_ALERT_ARGS)
assert prepared_body == EXPECTED_RESULT
def test_fetch_incidents_limit_exceed(mocker):
"""
Given
- a dict of params given to the function which is gathered originally from demisto.params()
The dict includes the relevant params for the fetch e.g. fetch_delta, fetch_limit, created_after, state.
- response of the api
When
- a single iteration of the fetch is activated with a fetch limit set to 5
Then
- validate that the number or incidents that is returned is equal to the limit when the api returned more.
"""
params = {
'fetch_delta': '6 hours',
'fetch_limit': ' 5',
'created_after': '2021-03-30T11:44:24Z',
'state': 'closed'
}
mocker.patch('ProofpointThreatResponse.get_incidents_request', return_value=FETCH_RESPONSE)
incidents_list = get_incidents_batch_by_time_request(params)
assert len(incidents_list) == 5
def test_fetch_incidents_with_same_created_time(mocker):
"""
Given
- a dict of params given to the function which is gathered originally from demisto.params()
The dict includes the relevant params for the fetch e.g. fetch_delta, fetch_limit, created_after, state and
last_fetched_id.
- response of the api
When
- when a fetch occurs and the last fetched incident has exactly the same time of the next incident.
Then
- validate that only one of the incidents appear as to the fetch limit.
- validate that the next incident whose time is exactly the same is brought in the next fetch loop.
( e.g. 3057 and 3058)
"""
expected_ids_to_fetch_first = [3055, 3056, 3057]
expected_ids_to_fetch_second = [3058, 3059, 3060]
params = {
'fetch_delta': '2 hours',
'fetch_limit': '3',
'created_after': '2021-03-30T10:44:24Z',
'state': 'closed'
}
mocker.patch('ProofpointThreatResponse.get_incidents_request', return_value=FETCH_RESPONSE)
new_fetched_first = get_incidents_batch_by_time_request(params)
for incident in new_fetched_first:
assert incident.get('id') in expected_ids_to_fetch_first
params = {
'fetch_delta': '2 hour',
'fetch_limit': '3',
'created_after': '2021-03-30T11:21:24Z',
'last_fetched_id': '3057',
'state': 'closed'
}
new_fetched_second = get_incidents_batch_by_time_request(params)
for incident in new_fetched_second:
assert incident.get('id') in expected_ids_to_fetch_second
def test_get_new_incidents(mocker):
"""
Given
- a dict of request_params to the api.
- The last fetched incident id.
When
- Get new incidents is called during the fetch process.
Then
- validate that the number of expected incidents return.
- validate that all of the returned incident have a bigger id then the last fetched incident.
"""
last_incident_fetched = 3057
request_params = {
'state': 'closed',
'created_after': '2021-03-30T10:21:24Z',
'created_before': '2021-03-31T11:21:24Z',
}
mocker.patch('ProofpointThreatResponse.get_incidents_request', return_value=FETCH_RESPONSE)
new_incidnets = get_new_incidents(request_params, last_incident_fetched)
assert len(new_incidnets) == 14
for incident in new_incidnets:
assert incident.get('id') > 3057
def test_get_time_delta():
"""
Given
- input to the get_time_delta function which is valid and invalid
When
- run the get_time_delta function.
Then
- validate that on invalid input such as days or no units relevant errors are raised.
- validate that on valid inputs the return value is as expected.
"""
time_delta = get_time_delta('1 minute')
assert str(time_delta) == '0:01:00'
time_delta = get_time_delta('2 hours')
assert str(time_delta) == '2:00:00'
try:
get_time_delta('2')
except Exception as ex:
assert 'The fetch_delta is invalid. Please make sure to insert both the number and the unit of the fetch delta.' in str(
ex)
try:
get_time_delta('2 days')
except Exception as ex:
assert 'The unit of fetch_delta is invalid. Possible values are "minutes" or "hours' in str(ex)
| [
"ProofpointThreatResponse.filter_incidents",
"ProofpointThreatResponse.get_time_delta",
"ProofpointThreatResponse.prepare_ingest_alert_request_body",
"pytest.mark.parametrize",
"ProofpointThreatResponse.get_new_incidents",
"ProofpointThreatResponse.get_incidents_batch_by_time_request",
"ProofpointThreatResponse.pass_sources_list_filter",
"ProofpointThreatResponse.pass_abuse_disposition_filter",
"ProofpointThreatResponse.create_incident_field_context",
"ProofpointThreatResponse.get_emails_context"
] | [((2985, 3050), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""incident, answer"""', 'INCIDENT_FIELD_INPUT'], {}), "('incident, answer', INCIDENT_FIELD_INPUT)\n", (3008, 3050), False, 'import pytest\n'), ((3591, 3653), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""event, answer"""', 'EMAILS_CONTEXT_INPUT'], {}), "('event, answer', EMAILS_CONTEXT_INPUT)\n", (3614, 3653), False, 'import pytest\n'), ((3940, 4015), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sources_list, expected_answer"""', 'SOURCE_LIST_INPUT'], {}), "('sources_list, expected_answer', SOURCE_LIST_INPUT)\n", (3963, 4015), False, 'import pytest\n'), ((4335, 4430), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""abuse_dispotion_values, expected_answer"""', 'ABUSE_DISPOSITION_INPUT'], {}), "('abuse_dispotion_values, expected_answer',\n ABUSE_DISPOSITION_INPUT)\n", (4358, 4430), False, 'import pytest\n'), ((5177, 5251), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""demisto_params, expected_answer"""', 'DEMISTO_PARAMS'], {}), "('demisto_params, expected_answer', DEMISTO_PARAMS)\n", (5200, 5251), False, 'import pytest\n'), ((3134, 3173), 'ProofpointThreatResponse.create_incident_field_context', 'create_incident_field_context', (['incident'], {}), '(incident)\n', (3163, 3173), False, 'from ProofpointThreatResponse import create_incident_field_context, get_emails_context, pass_sources_list_filter, pass_abuse_disposition_filter, filter_incidents, prepare_ingest_alert_request_body, get_incidents_batch_by_time_request, get_new_incidents, get_time_delta\n'), ((3719, 3744), 'ProofpointThreatResponse.get_emails_context', 'get_emails_context', (['event'], {}), '(event)\n', (3737, 3744), False, 'from ProofpointThreatResponse import create_incident_field_context, get_emails_context, pass_sources_list_filter, pass_abuse_disposition_filter, filter_incidents, prepare_ingest_alert_request_body, get_incidents_batch_by_time_request, get_new_incidents, get_time_delta\n'), ((4095, 4148), 'ProofpointThreatResponse.pass_sources_list_filter', 'pass_sources_list_filter', (['MOCK_INCIDENT', 'sources_list'], {}), '(MOCK_INCIDENT, sources_list)\n', (4119, 4148), False, 'from ProofpointThreatResponse import create_incident_field_context, get_emails_context, pass_sources_list_filter, pass_abuse_disposition_filter, filter_incidents, prepare_ingest_alert_request_body, get_incidents_batch_by_time_request, get_new_incidents, get_time_delta\n'), ((4521, 4589), 'ProofpointThreatResponse.pass_abuse_disposition_filter', 'pass_abuse_disposition_filter', (['MOCK_INCIDENT', 'abuse_dispotion_values'], {}), '(MOCK_INCIDENT, abuse_dispotion_values)\n', (4550, 4589), False, 'from ProofpointThreatResponse import create_incident_field_context, get_emails_context, pass_sources_list_filter, pass_abuse_disposition_filter, filter_incidents, prepare_ingest_alert_request_body, get_incidents_batch_by_time_request, get_new_incidents, get_time_delta\n'), ((5417, 5450), 'ProofpointThreatResponse.filter_incidents', 'filter_incidents', (['[MOCK_INCIDENT]'], {}), '([MOCK_INCIDENT])\n', (5433, 5450), False, 'from ProofpointThreatResponse import create_incident_field_context, get_emails_context, pass_sources_list_filter, pass_abuse_disposition_filter, filter_incidents, prepare_ingest_alert_request_body, get_incidents_batch_by_time_request, get_new_incidents, get_time_delta\n'), ((6511, 6563), 'ProofpointThreatResponse.prepare_ingest_alert_request_body', 'prepare_ingest_alert_request_body', (['INGEST_ALERT_ARGS'], {}), '(INGEST_ALERT_ARGS)\n', (6544, 6563), False, 'from ProofpointThreatResponse import create_incident_field_context, get_emails_context, pass_sources_list_filter, pass_abuse_disposition_filter, filter_incidents, prepare_ingest_alert_request_body, get_incidents_batch_by_time_request, get_new_incidents, get_time_delta\n'), ((7410, 7453), 'ProofpointThreatResponse.get_incidents_batch_by_time_request', 'get_incidents_batch_by_time_request', (['params'], {}), '(params)\n', (7445, 7453), False, 'from ProofpointThreatResponse import create_incident_field_context, get_emails_context, pass_sources_list_filter, pass_abuse_disposition_filter, filter_incidents, prepare_ingest_alert_request_body, get_incidents_batch_by_time_request, get_new_incidents, get_time_delta\n'), ((8564, 8607), 'ProofpointThreatResponse.get_incidents_batch_by_time_request', 'get_incidents_batch_by_time_request', (['params'], {}), '(params)\n', (8599, 8607), False, 'from ProofpointThreatResponse import create_incident_field_context, get_emails_context, pass_sources_list_filter, pass_abuse_disposition_filter, filter_incidents, prepare_ingest_alert_request_body, get_incidents_batch_by_time_request, get_new_incidents, get_time_delta\n'), ((8930, 8973), 'ProofpointThreatResponse.get_incidents_batch_by_time_request', 'get_incidents_batch_by_time_request', (['params'], {}), '(params)\n', (8965, 8973), False, 'from ProofpointThreatResponse import create_incident_field_context, get_emails_context, pass_sources_list_filter, pass_abuse_disposition_filter, filter_incidents, prepare_ingest_alert_request_body, get_incidents_batch_by_time_request, get_new_incidents, get_time_delta\n'), ((9773, 9829), 'ProofpointThreatResponse.get_new_incidents', 'get_new_incidents', (['request_params', 'last_incident_fetched'], {}), '(request_params, last_incident_fetched)\n', (9790, 9829), False, 'from ProofpointThreatResponse import create_incident_field_context, get_emails_context, pass_sources_list_filter, pass_abuse_disposition_filter, filter_incidents, prepare_ingest_alert_request_body, get_incidents_batch_by_time_request, get_new_incidents, get_time_delta\n'), ((10308, 10334), 'ProofpointThreatResponse.get_time_delta', 'get_time_delta', (['"""1 minute"""'], {}), "('1 minute')\n", (10322, 10334), False, 'from ProofpointThreatResponse import create_incident_field_context, get_emails_context, pass_sources_list_filter, pass_abuse_disposition_filter, filter_incidents, prepare_ingest_alert_request_body, get_incidents_batch_by_time_request, get_new_incidents, get_time_delta\n'), ((10392, 10417), 'ProofpointThreatResponse.get_time_delta', 'get_time_delta', (['"""2 hours"""'], {}), "('2 hours')\n", (10406, 10417), False, 'from ProofpointThreatResponse import create_incident_field_context, get_emails_context, pass_sources_list_filter, pass_abuse_disposition_filter, filter_incidents, prepare_ingest_alert_request_body, get_incidents_batch_by_time_request, get_new_incidents, get_time_delta\n'), ((10475, 10494), 'ProofpointThreatResponse.get_time_delta', 'get_time_delta', (['"""2"""'], {}), "('2')\n", (10489, 10494), False, 'from ProofpointThreatResponse import create_incident_field_context, get_emails_context, pass_sources_list_filter, pass_abuse_disposition_filter, filter_incidents, prepare_ingest_alert_request_body, get_incidents_batch_by_time_request, get_new_incidents, get_time_delta\n'), ((10685, 10709), 'ProofpointThreatResponse.get_time_delta', 'get_time_delta', (['"""2 days"""'], {}), "('2 days')\n", (10699, 10709), False, 'from ProofpointThreatResponse import create_incident_field_context, get_emails_context, pass_sources_list_filter, pass_abuse_disposition_filter, filter_incidents, prepare_ingest_alert_request_body, get_incidents_batch_by_time_request, get_new_incidents, get_time_delta\n')] |
import sys
import os
import json
from enum import Enum
from .mach_o import LC_SYMTAB
from macholib import MachO
from macholib import mach_o
from shutil import copy2
from shutil import SameFileError
class ReplaceType(Enum):
objc_methname = 1
symbol_table = 2
def replace_in_bytes(method_bytes, name_dict, type):
is_prefix = False
empty_byte = b'\x00'
if not method_bytes.startswith(empty_byte):
is_prefix = True
method_bytes = empty_byte + method_bytes
for key, value in name_dict.items():
if len(key) != len(value):
raise("replace method name with different length may break the mach-o file, ori: " +
key + ", dst: " + value)
if type == ReplaceType.objc_methname:
method_bytes = method_bytes.replace(
empty_byte + key.encode('utf-8') + empty_byte, empty_byte + value.encode('utf-8') + empty_byte)
elif type == ReplaceType.symbol_table:
method_bytes = method_bytes.replace(
b' ' + key.encode('utf-8') + b']', b' ' + value.encode('utf-8') + b']')
if is_prefix:
method_bytes = method_bytes.replace(empty_byte, b'', 1)
return method_bytes
def ch_methname_sect(header, name_dict):
commands = header.commands
lc = None
sect = None
for _, command_tuple in enumerate(commands):
seg = command_tuple[1]
data = command_tuple[2]
if hasattr(seg, 'segname') and seg.segname.rstrip(b'\x00') == b'__TEXT':
for tmp_sect in data:
if tmp_sect.sectname.rstrip(b'\x00') == b'__objc_methname':
lc = command_tuple[0]
sect = tmp_sect
if sect is None:
raise("Can't find __objc_methname section")
sect.section_data = replace_in_bytes(
sect.section_data, name_dict, ReplaceType.objc_methname)
header.mod_dict[lc] = [sect]
def ch_symtab(header, name_dict):
commands = header.commands
for idx, command_tuple in enumerate(commands):
lc = command_tuple[0]
cmd = command_tuple[1]
data = command_tuple[2]
if lc.cmd == LC_SYMTAB:
data = replace_in_bytes(data, name_dict, ReplaceType.symbol_table)
header.mod_dict[lc] = [data]
commands[idx] = (lc, cmd, data)
return
raise("Can't find LC_SYMTAB")
def replace_methname(macho_file, methname_json, output_dir):
"""
Map method names in Mach-O file with the JSON file
"""
if not os.path.isfile(macho_file):
raise("passing not exist file " + macho_file)
if not os.path.isfile(methname_json):
raise("passing not exist file " + methname_json)
if output_dir is not None and not os.path.isdir(output_dir):
raise("passing not exist dir " + output_dir)
macho = MachO.MachO(macho_file)
name_dict = None
with open(methname_json) as json_file:
name_dict = json.load(json_file)
for header in macho.headers:
ch_methname_sect(header, name_dict)
ch_symtab(header, name_dict)
ori_dir, filename = os.path.split(macho_file)
if output_dir is None:
output_dir = ori_dir
output = os.path.join(output_dir, filename)
try:
copy2(macho_file, output_dir)
except SameFileError:
pass
with open(output, 'r+b') as fp:
macho.write(fp)
os.chmod(output, 0o755)
def main():
replace_methname(sys.argv[0], sys.argv[1], sys.argv[2])
if __name__ == '__main__':
main()
| [
"shutil.copy2",
"os.path.join",
"os.chmod",
"os.path.isfile",
"os.path.split",
"os.path.isdir",
"json.load",
"macholib.MachO.MachO"
] | [((2829, 2852), 'macholib.MachO.MachO', 'MachO.MachO', (['macho_file'], {}), '(macho_file)\n', (2840, 2852), False, 'from macholib import MachO\n'), ((3099, 3124), 'os.path.split', 'os.path.split', (['macho_file'], {}), '(macho_file)\n', (3112, 3124), False, 'import os\n'), ((3194, 3228), 'os.path.join', 'os.path.join', (['output_dir', 'filename'], {}), '(output_dir, filename)\n', (3206, 3228), False, 'import os\n'), ((3385, 3406), 'os.chmod', 'os.chmod', (['output', '(493)'], {}), '(output, 493)\n', (3393, 3406), False, 'import os\n'), ((2517, 2543), 'os.path.isfile', 'os.path.isfile', (['macho_file'], {}), '(macho_file)\n', (2531, 2543), False, 'import os\n'), ((2610, 2639), 'os.path.isfile', 'os.path.isfile', (['methname_json'], {}), '(methname_json)\n', (2624, 2639), False, 'import os\n'), ((2938, 2958), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (2947, 2958), False, 'import json\n'), ((3251, 3280), 'shutil.copy2', 'copy2', (['macho_file', 'output_dir'], {}), '(macho_file, output_dir)\n', (3256, 3280), False, 'from shutil import copy2\n'), ((2736, 2761), 'os.path.isdir', 'os.path.isdir', (['output_dir'], {}), '(output_dir)\n', (2749, 2761), False, 'import os\n')] |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START forms_delete_watch]
from __future__ import print_function
from apiclient import discovery
from httplib2 import Http
from oauth2client import client, file, tools
SCOPES = "https://www.googleapis.com/auth/drive"
API_KEY = "<YOUR_API_KEY>"
DISCOVERY_DOC = f"https://forms.googleapis.com/$discovery/rest?version=v1beta&key={API_KEY}&labels=FORMS_BETA_TESTERS"
store = file.Storage('credentials.json')
creds = None
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)
creds = tools.run_flow(flow, store)
service = discovery.build('forms', 'v1beta', http=creds.authorize(
Http()), discoveryServiceUrl=DISCOVERY_DOC, static_discovery=False)
form_id = '<YOUR_FORM_ID>'
watch_id = '<YOUR_WATCH_ID>'
# Print JSON response after deleting a form watch
result = service.forms().watches().delete(formId=form_id, watchId=watch_id).execute()
print(result)
# [END forms_delete_watch]
| [
"httplib2.Http",
"oauth2client.tools.run_flow",
"oauth2client.client.flow_from_clientsecrets",
"oauth2client.file.Storage"
] | [((952, 984), 'oauth2client.file.Storage', 'file.Storage', (['"""credentials.json"""'], {}), "('credentials.json')\n", (964, 984), False, 'from oauth2client import client, file, tools\n'), ((1040, 1100), 'oauth2client.client.flow_from_clientsecrets', 'client.flow_from_clientsecrets', (['"""client_secret.json"""', 'SCOPES'], {}), "('client_secret.json', SCOPES)\n", (1070, 1100), False, 'from oauth2client import client, file, tools\n'), ((1113, 1140), 'oauth2client.tools.run_flow', 'tools.run_flow', (['flow', 'store'], {}), '(flow, store)\n', (1127, 1140), False, 'from oauth2client import client, file, tools\n'), ((1212, 1218), 'httplib2.Http', 'Http', ([], {}), '()\n', (1216, 1218), False, 'from httplib2 import Http\n')] |
# <NAME>
# S = 1/2, I = 1/2
# Spin 1/2 electron coupled to spin 1/2 nuclei
import numpy as np
from scipy.linalg import expm
from matplotlib.pylab import *
from matplotlib import cm
sigma_x = 0.5*np.r_[[[0, 1],[1, 0]]]
sigma_y = 0.5*np.r_[[[0,-1j],[1j, 0]]]
sigma_z = 0.5*np.r_[[[1, 0],[0, -1]]]
Identity = np.eye(2)
Sx = np.kron(sigma_x, Identity)
Sy = np.kron(sigma_y, Identity)
Sz = np.kron(sigma_z, Identity)
Ix = np.kron(Identity, sigma_x)
Iy = np.kron(Identity, sigma_y)
Iz = np.kron(Identity, sigma_z)
SxIx = np.kron(sigma_x,sigma_z)
SxIx2 = np.dot(Sx,Iz)
print(SxIx)
print(SxIx2)
print(np.allclose(SxIx,SxIx2))
omega_S = 1.76e11 # rad / (s * T)
omega_I = 267.522e6 # rad / (s * T)
Aiso = 2*np.pi * 50.e6 # Isotropic Hyperfine coupling rad / s
B0 = 0.35# T
H = omega_S/(2.*np.pi)*B0*Sz + omega_I/(2.*np.pi)*B0*Iz + Aiso * np.dot(Sz,Iz)
#H = omega_S/(2.*np.pi)*B0*Sz + omega_I/(2.*np.pi)*B0*Iz + Aiso * (np.dot(Sx,Ix) + np.dot(Sy,Iy) + np.dot(Sz,Iz))
print('Hamiltonian:')
print(H)
out = np.linalg.eig(H)
E = out[0]
print(E)
E12 = E[0] - E[1]
E34 = E[2] - E[3]
E13 = E[0] - E[2]
E24 = E[1] - E[3]
print(E12)
print(E34)
print(E13)
print(E24)
print('Nuclear')
print('%0.05f MHz'%(E12 / 1e6))
print('%0.05f MHz'%(E34 / 1e6))
print('Electron')
print('%0.05f GHz'%(E13 / 1e9))
print('%0.05f GHz'%(E24 / 1e9))
matshow(abs(H), cmap = cm.jet)
title('Hamiltonian')
show()
| [
"numpy.eye",
"numpy.allclose",
"numpy.linalg.eig",
"numpy.kron",
"numpy.dot"
] | [((308, 317), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (314, 317), True, 'import numpy as np\n'), ((325, 351), 'numpy.kron', 'np.kron', (['sigma_x', 'Identity'], {}), '(sigma_x, Identity)\n', (332, 351), True, 'import numpy as np\n'), ((357, 383), 'numpy.kron', 'np.kron', (['sigma_y', 'Identity'], {}), '(sigma_y, Identity)\n', (364, 383), True, 'import numpy as np\n'), ((389, 415), 'numpy.kron', 'np.kron', (['sigma_z', 'Identity'], {}), '(sigma_z, Identity)\n', (396, 415), True, 'import numpy as np\n'), ((422, 448), 'numpy.kron', 'np.kron', (['Identity', 'sigma_x'], {}), '(Identity, sigma_x)\n', (429, 448), True, 'import numpy as np\n'), ((454, 480), 'numpy.kron', 'np.kron', (['Identity', 'sigma_y'], {}), '(Identity, sigma_y)\n', (461, 480), True, 'import numpy as np\n'), ((486, 512), 'numpy.kron', 'np.kron', (['Identity', 'sigma_z'], {}), '(Identity, sigma_z)\n', (493, 512), True, 'import numpy as np\n'), ((521, 546), 'numpy.kron', 'np.kron', (['sigma_x', 'sigma_z'], {}), '(sigma_x, sigma_z)\n', (528, 546), True, 'import numpy as np\n'), ((555, 569), 'numpy.dot', 'np.dot', (['Sx', 'Iz'], {}), '(Sx, Iz)\n', (561, 569), True, 'import numpy as np\n'), ((1004, 1020), 'numpy.linalg.eig', 'np.linalg.eig', (['H'], {}), '(H)\n', (1017, 1020), True, 'import numpy as np\n'), ((601, 625), 'numpy.allclose', 'np.allclose', (['SxIx', 'SxIx2'], {}), '(SxIx, SxIx2)\n', (612, 625), True, 'import numpy as np\n'), ((838, 852), 'numpy.dot', 'np.dot', (['Sz', 'Iz'], {}), '(Sz, Iz)\n', (844, 852), True, 'import numpy as np\n')] |
from spiderNest.preIntro import *
path_ = os.path.dirname(os.path.dirname(__file__)) + '/dataBase/log_information.csv'
def save_login_info(VMess, class_):
"""
VMess入库
class_: ssr or v2ray
"""
now = str(datetime.now()).split('.')[0]
with open(path_, 'a', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
# 入库时间,Vmess,初始化状态:0
writer.writerow(['{}'.format(now), '{}'.format(VMess), class_, '0'])
def vmess_IO(class_):
"""
获取可用订阅链接并刷新存储池
class_: ssr ; v2ray
"""
def refresh_log(dataFlow):
with open(path_, 'w', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerows(dataFlow)
try:
with open(path_, 'r', encoding='utf-8') as f:
reader = csv.reader(f)
vm_q = [vm for vm in reader]
new_q = vm_q
for i, value in enumerate(reversed(vm_q)):
if value[-1] == '0' and value[-2] == class_:
vm = value[1]
new_q[-(i + 1)][-1] = '1'
break
refresh_log(new_q)
return vm
except UnboundLocalError:
return '无可用订阅连接'
def avi_num():
from datetime import datetime, timedelta
with open(path_, 'r', encoding='utf-8') as f:
reader = csv.reader(f)
vm_list = [i for i in reader]
# ['2020-08-06 04:27:59', 'link','class_', '1']
vm_q = [vm for vm in vm_list if vm[-1] == '0']
tag_items = ''
for vm in vm_list:
if vm[-1] == '0':
bei_ing_time = datetime.fromisoformat(vm[0]) + timedelta(hours=12)
tag_items += '\n【√可选】【{}】#{}'.format(bei_ing_time, vm[-2])
# return vm_q.__len__()
return tag_items
| [
"datetime.datetime.now",
"datetime.timedelta",
"datetime.datetime.fromisoformat"
] | [((225, 239), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (237, 239), False, 'from datetime import datetime, timedelta\n'), ((1602, 1631), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['vm[0]'], {}), '(vm[0])\n', (1624, 1631), False, 'from datetime import datetime, timedelta\n'), ((1634, 1653), 'datetime.timedelta', 'timedelta', ([], {'hours': '(12)'}), '(hours=12)\n', (1643, 1653), False, 'from datetime import datetime, timedelta\n')] |
import os.path
import re
import sys
import traceback
from pprint import pformat
import tornado
from tornado import template
SENSITIVE_SETTINGS_RE = re.compile(
'api|key|pass|salt|secret|signature|token',
flags=re.IGNORECASE
)
class ExceptionReporter:
def __init__(self, exc_info, handler):
self.exc_type = exc_info[0]
self.exc_value = exc_info[1]
self.exc_tb = exc_info[2]
self.handler = handler
def get_response(self):
loader = template.Loader(os.path.dirname(os.path.abspath(__file__)))
t = loader.load('debug.html')
return t.generate(
traceback=traceback,
pprint=pprint,
handler=self.handler,
app_settings=self.get_app_settings(),
exc_type=self.exc_type,
exc_value=self.exc_value,
exc_tb=self.exc_tb,
frames=self.get_traceback_frames(),
tornado_version=tornado.version,
sys_version='%d.%d.%d' % sys.version_info[0:3],
sys_executable=sys.executable,
sys_path=sys.path,
)
def get_app_settings(self):
settings = {}
for arg, value in self.handler.application.settings.items():
if SENSITIVE_SETTINGS_RE.search(arg):
value = '*' * 15
settings[arg] = value
return settings
def get_source_lines(self, tb):
filename = tb.tb_frame.f_code.co_filename
lineno = tb.tb_lineno
lines = []
try:
with open(filename, 'rb') as f:
_lines = f.read().splitlines()
for _lineno in range(
max(lineno - 5, 0),
min(lineno + 5, len(_lines))
):
lines.append((_lineno + 1, _lines[_lineno]))
except Exception as e:
# could not open file
pass
return lines
def get_traceback_frames(self):
frames = []
tb = self.exc_tb
while tb:
frames.append({
'lineno': tb.tb_lineno,
'filename': tb.tb_frame.f_code.co_filename,
'function': tb.tb_frame.f_code.co_name,
'module_name': tb.tb_frame.f_globals.get('__name__') or '',
'vars': tb.tb_frame.f_locals,
'lines': self.get_source_lines(tb),
})
tb = tb.tb_next
frames.reverse()
return frames
exceptions = []
exc_value = self.exc_value
while exc_value:
exceptions.append(exc_value)
exc_value = self._get_explicit_or_implicit_cause(exc_value)
if exc_value in exceptions:
warnings.warn(
"Cycle in the exception chain detected: exception '%s' "
"encountered again." % exc_value,
ExceptionCycleWarning,
)
# Avoid infinite loop if there's a cyclic reference (#29393).
break
frames = []
# No exceptions were supplied to ExceptionReporter
if not exceptions:
return frames
# In case there's just one exception, take the traceback from self.tb
exc_value = exceptions.pop()
tb = self.tb if not exceptions else exc_value.__traceback__
while True:
frames.extend(self.get_exception_traceback_frames(exc_value, tb))
try:
exc_value = exceptions.pop()
except IndexError:
break
tb = exc_value.__traceback__
return frames
def _get_explicit_or_implicit_cause(self, exc_value):
explicit = getattr(exc_value, '__cause__', None)
suppress_context = getattr(exc_value, '__suppress_context__', None)
implicit = getattr(exc_value, '__context__', None)
return explicit or (None if suppress_context else implicit)
def pprint(value):
try:
return pformat(value, width=1)
except Exception as e:
return 'Error in formatting: %s: %s' % (e.__class__.__name__, e)
| [
"pprint.pformat",
"re.compile"
] | [((151, 226), 're.compile', 're.compile', (['"""api|key|pass|salt|secret|signature|token"""'], {'flags': 're.IGNORECASE'}), "('api|key|pass|salt|secret|signature|token', flags=re.IGNORECASE)\n", (161, 226), False, 'import re\n'), ((3995, 4018), 'pprint.pformat', 'pformat', (['value'], {'width': '(1)'}), '(value, width=1)\n', (4002, 4018), False, 'from pprint import pformat\n')] |
'''
Created on Mar 6, 2018
@author: cef
hp functions for workign with dictionaries
'''
import logging, os, sys, math, copy, inspect
from collections import OrderedDict
from weakref import WeakValueDictionary as wdict
import numpy as np
import hp.basic
mod_logger = logging.getLogger(__name__) #creates a child logger of the root
def dict_2_logr(dict, logger= mod_logger): #log each value of the dictionary to fille
logger = logger.getChild('dict_2_logr')
msg = '\n'
for key, value in dict.iteritems():
msg = msg + ' key: %s\n value: %s \n'%(key, value)
logger.debug(msg)
def key_list(d, #return the intersection of the dict.keys() and the key_list
key_list, logger = mod_logger):
logger = logger.getChild('key_list')
#===========================================================================
# pre check
#===========================================================================
bool_list = hp.basic.bool_list_in_list(d.keys(), key_list)
if not bool_list.any(): raise IOError #check if any are not found
#===========================================================================
# build the found values
#===========================================================================
values_fnd_list = []
for key, value in d.iteritems():
if key in key_list: values_fnd_list.append(value)
return values_fnd_list
def build_nones_dict(key_list, logger=mod_logger): #add 'None' values to the passed keys
val_list = np.full((1, len(key_list)), None)
dict = dict(zip(key_list, val_list))
return dict
def merge_two_dicts(x, y):
if x is None: return y
if y is None: return x
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
def value_by_ksearch(ksearch_str, d, #get the entry that matches the search str
logger=mod_logger, *search_args):
#===========================================================================
# take a shot at a perfect match
#===========================================================================
try:
return d[ksearch_str]
except:
#find a match for this key
k_fnd = hp.basic.list_search(d.keys(), ksearch_str, *search_args)
if k_fnd is None:
logger = logger.getChild('value_by_ksearch')
logger.debug('could not find \'%s\' in %i dict keys. returning None'%(ksearch_str, len(d)))
return None
else:
return d[k_fnd]
def merge(dl, dr, #intelligent dictionary merging
set_type = 'intersect',
method = 'exact',
container = dict(),
logger = mod_logger, *search_args):
if set_type == 'union':
if method == 'exact':
d_merge = merge_two_dicts(dl, dr, logger=logger)
else:
raise IOError #todo
elif set_type == 'intersect':
d_merge = subset(dl, dr.keys(), set_type = set_type,
method=method, container=container, logger=logger, *search_args)
else: raise IOError
logger.debug('got d_merge %i'%len(d_merge))
return container(d_merge)
def subset_pfx(d_big, prefix, logger=mod_logger):
#===========================================================================
# shortcuts
#===========================================================================
if len(d_big) == 0: return dict()
#===========================================================================
# defaults
#===========================================================================
logger = logger.getChild('subset_pfx')
d = copy.copy(d_big)
fnd_d = dict()
for k, v in d.iteritems():
if k.startswith(prefix):
fnd_d[k] = v
logger.debug('found %i entries with prefix \'%s\' \n'%(len(fnd_d), prefix))
return fnd_d
def subset(d_big, l, #get a dictionary subset using standard user inputs
#ordered = False, using containers instead
set_type = 'sub',
method = 'exact',
container = dict,
logger = mod_logger,
*search_args):
"""
#===========================================================================
# INPUTS
#===========================================================================
l: list of keys (within d_big) on which to erturn the sutset
set_type: how to treat the set
intersect: returna dictionary with only the common keys
sub: raise a flag if not every item in 'l' is found in d_big.keys()
method: what type of key search to perform (re.function)
search: look for a key in the dictionary that contains the list entry.
returned d is keyed by the list
"""
logger = logger.getChild('subset')
#===========================================================================
# setup[]
#==========================================================================
d = container()
"""
#dictionary setup
if ordered: d = OrderedDict()
else: d = dict()"""
#input list setup
if isinstance(l, list): pass
elif isinstance(l, basestring): l = [l]
elif l is None: return d
else: raise IOError
nofnd_l = []
#===========================================================================
# determine subset by kwarg
#===========================================================================
for k in l:
try: #attempt teh direct match
d[k] = d_big[k]
except:
#===================================================================
# try again using search functions
#===================================================================
try:
if method == 'search':
#search and return this value
v = value_by_ksearch(k, d_big, logger=logger, *search_args)
if not v is None:
d[k] = v
continue #not sure this is needed
else: raise ValueError
else: raise ValueError
#===================================================================
# nothing found. proceed based on set_type
#===================================================================
except:
logger.debug('unable to find \'%s\' in the dict with method \'%s\''%(k, method))
if set_type == 'sub':
boolar = hp.basic.bool_list_in_list(d_big.keys(), l)
if not np.all(boolar):
logger.error('%i entries in list not found in big_d'%(len(l) - boolar.sum()))
raise IOError
elif set_type == 'intersect': nofnd_l.append(k)
else: raise IOError
#===========================================================================
# wrap up
#===========================================================================
if len(nofnd_l) >0:
logger.debug('%i of %i list entries DO NOT intersect: %s'%(len(nofnd_l), len(l), nofnd_l))
if set_type == 'sub': raise IOError
#===========================================================================
# check
#===========================================================================
if len(d) == 0:
logger.warning('0 common values between d(%i) and l(%i)'%(len(d), len(l)))
logger.debug('returning d with %i entries: %s \n'%(len(d), d.keys()))
return container(d)
#===============================================================================
# def subset(d_big, l, #get a dictionary subset using standard user inputs
# ordered = False, set_type = 'sub', search = 'search',
# logger = mod_logger):
# """
# #===========================================================================
# # INPUTS
# #===========================================================================
# l: list of keys (within d_big) on which to erturn the sutset
#
# set_type: how to treat the set
# intersect: returna dictionary with only the common keys
# sub: raise a flag if not every item in 'l' is found in d_big.keys()
#
# search: what type of key search to perform (re.function)
# """
# logger = logger.getChild('subset')
#
# #===========================================================================
# # setup[]
# #==========================================================================
# #dictionary setup
# if ordered: d = OrderedDict()
# else: d = dict()
#
# #input list setup
# if isinstance(l, list): pass
# elif isinstance(l, basestring): l = [l]
# elif l is None: return None
# else: raise IOError
#
# #===========================================================================
# # determine subset by kwarg
# #===========================================================================
# if set_type == 'sub':
# try:
# for k in l:
# d[k] = d_big[k]
#
# except:
# boolar = hp.basic.bool_list_in_list(d_big.keys(), l)
#
# if not np.all(boolar):
# logger.error('%i entries in list not found in big_d'%(len(l) - boolar.sum()))
#
# raise IOError
#
# if len(d) == 0: raise IOError
#
# elif set_type == 'intersect':
# nofnd_l = []
# for k in l:
# try:
# d[k] = d_big[k]
# except:
# nofnd_l.append(k)
#
# if len(nofnd_l) >0:
# logger.debug('%i of %i list entries DO NOT intersect: %s'%(len(nofnd_l), len(l), nofnd_l))
#
# #===========================================================================
# # check
# #===========================================================================
# if len(d) == 0: logger.warning('0 common values between d(%i) and l(%i)'%
# (len(d), len(l)))
#
# return d
#===============================================================================
class deepcopier():
tries = 0 #keep track of the loop
def __init__(self,obj, logger=mod_logger):
self.logger = logger.getChild('deepcopier')
self.copy_o = obj
def tryit(self, obj=None): #make as deep a copy as possible
if obj is None: obj = self.copy_o
#===========================================================================
# simple try
#===========================================================================
try:
copy_o = copy.deepcopy(obj)
return copy_o
except:
self.logger.debug('failed first attempt')
self.tries += 1
#=======================================================================
# sophisiticated try
#=======================================================================
self.logger.debug('copy attempt %i'%self.tries)
if self.tries > 10: return self.copy_o
#try for each element of the dict
if isinstance(obj, dict):
new_d = dict()
for key, value in obj.iteritems():
try:
new_d[key] = self.tryit(obj = value)
except:
new_d[key] = copy.copy(obj)
self.logger.debug('returning new_d with %i entries: %s'%(len(new_d), new_d.keys()))
else: raise IOError
return new_d
from collections import OrderedDict
class MyOrderedDict(OrderedDict):
"""
as there is no builtin method to add to the head of an ordered dict,
here we add a method
https://stackoverflow.com/questions/16664874/how-can-i-add-an-element-at-the-top-of-an-ordereddict-in-python
"""
def prepend(self, key, value, dict_setitem=dict.__setitem__):
"""add entry to the front of myself"""
root = self._OrderedDict__root
first = root[1]
if key in self:
link = self._OrderedDict__map[key]
link_prev, link_next, _ = link
link_prev[1] = link_next
link_next[0] = link_prev
link[0] = root
link[1] = first
root[1] = first[0] = link
else:
root[1] = first[0] = self._OrderedDict__map[key] = [root, first, key]
dict_setitem(self, key, value)
| [
"logging.getLogger",
"numpy.all",
"copy.copy",
"copy.deepcopy"
] | [((291, 318), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (308, 318), False, 'import logging, os, sys, math, copy, inspect\n'), ((4046, 4062), 'copy.copy', 'copy.copy', (['d_big'], {}), '(d_big)\n', (4055, 4062), False, 'import logging, os, sys, math, copy, inspect\n'), ((11902, 11920), 'copy.deepcopy', 'copy.deepcopy', (['obj'], {}), '(obj)\n', (11915, 11920), False, 'import logging, os, sys, math, copy, inspect\n'), ((12697, 12711), 'copy.copy', 'copy.copy', (['obj'], {}), '(obj)\n', (12706, 12711), False, 'import logging, os, sys, math, copy, inspect\n'), ((7354, 7368), 'numpy.all', 'np.all', (['boolar'], {}), '(boolar)\n', (7360, 7368), True, 'import numpy as np\n')] |
#Grapheme
Rime_tone=[ "a","ă","â","e","ê","i","o","ô","ơ","u","ư","y","iê","oa","oă","oe","oo","uâ","uê","uô","uơ","uy","ươ","uyê","yê", #blank
"á","ắ","ấ","é","ế","í","ó","ố","ớ","ú","ứ","ý","iế","óa","oắ","óe","oó","uấ","uế","uố","ướ","úy","ướ","uyế","yế", #grave
"oá", "oé","óo", "uý",
"à","ằ","ầ","è","ề","ì","ò","ồ","ờ","ù","ừ","ỳ","iề","òa","oằ","òe","oò","uầ","uề","uồ","ườ","ùy","ườ","uyề","yề", #acute
"oà", "oè","òo", "uỳ",
"ả","ẳ","ẩ","ẻ","ể","ỉ","ỏ","ổ","ở","ủ","ử","ỷ","iể","ỏa","oẳ","ỏe","oỏ","uẩ","uể","uổ","ưở","ủy","ưở","uyể","yể", #hook
"oả", "oẻ","ỏo", "uỷ",
"ã","ẵ","ẫ","ẽ","ễ","ĩ","õ","ỗ","ỡ","ũ","ữ","ỹ","iễ","õa","oẵ","õe","oõ","uẫ","uễ","uỗ","ưỡ","ũy","ưỡ","uyễ","yễ", #tilde
"oã", "oẽ","õo", "uỹ",
"ạ","ặ","ậ","ẹ","ệ","ị","ọ","ộ","ợ","ụ","ự","ỵ","iệ","ọa","oặ","ọe","oọ","uậ","uệ","uệ","ượ","ụy","ượ","uyệ","yệ", #dot
"oạ", "oẹ","ọo", "uỵ"]
Onset=["b","d","h","l","m","n","p","r","s","t","v","x","đ","p",
"tr", "th", "ch", "ph","nh","kh","gi","qu",
"ngh","ng","gh","g","k","c"]
#coding: utf-8
#Custom phoneme follow the https://vi.wikipedia.org/wiki/%C3%82m_v%E1%BB%8B_h%E1%BB%8Dc_ti%E1%BA%BFng_Vi%E1%BB%87t
#Improve pronoune between N C S
Cus_onsets = { u'b' : u'b', u't' : u't', u'th' : u'tʰ', u'đ' : u'd', u'ch' : u'c',
u'kh' : u'x', u'g' : u'ɣ', u'l' : u'l', u'm' : u'm', u'n': u'n',
u'ngh': u'ŋ', u'nh' : u'ɲ', u'ng' : u'ŋ', u'ph' : u'f', u'v' : u'v',
u'x' : u's', u'd' : u'z', u'h' : u'h', u'p' : u'p', u'qu' : u'kw',
u'gi' : u'j', u'tr' : u'ʈ', u'k' : u'k', u'c' : u'k', u'gh' : u'ɣ',
u'r' : u'ʐ', u's' : u'ʂ', u'gi': u'j'}
Cus_nuclei = { u'a' : u'a', u'á' : u'a', u'à' : u'a', u'ả' : u'a', u'ã' : u'a', u'ạ' : u'a',
u'â' : u'ɤ̆', u'ấ' : u'ɤ̆', u'ầ' : u'ɤ̆', u'ẩ' : u'ɤ̆', u'ẫ' : u'ɤ̆', u'ậ' : u'ɤ̆',
u'ă' : u'ă', u'ắ' : u'ă', u'ằ' : u'ă', u'ẳ' : u'ă', u'ẵ' : u'ă', u'ặ' : u'ă',
u'e' : u'ɛ', u'é' : u'ɛ', u'è' : u'ɛ', u'ẻ' : u'ɛ', u'ẽ' : u'ɛ', u'ẹ' : u'ɛ',
u'ê' : u'e', u'ế' : u'e', u'ề' : u'e', u'ể' : u'e', u'ễ' : u'e', u'ệ' : u'e',
u'i' : u'i', u'í' : u'i', u'ì' : u'i', u'ỉ' : u'i', u'ĩ' : u'i', u'ị' : u'i',
u'o' : u'ɔ', u'ó' : u'ɔ', u'ò' : u'ɔ', u'ỏ' : u'ɔ', u'õ' : u'ɔ', u'ọ' : u'ɔ',
u'ô' : u'o', u'ố' : u'o', u'ồ' : u'o', u'ổ' : u'o', u'ỗ' : u'o', u'ộ' : u'o',
u'ơ' : u'ɤ', u'ớ' : u'ɤ', u'ờ' : u'ɤ', u'ở' : u'ɤ', u'ỡ' : u'ɤ', u'ợ' : u'ɤ',
u'u' : u'u', u'ú' : u'u', u'ù' : u'u', u'ủ' : u'u', u'ũ' : u'u', u'ụ' : u'u',
u'ư' : u'ɯ', u'ứ' : u'ɯ', u'ừ' : u'ɯ', u'ử' : u'ɯ', u'ữ' : u'ɯ', u'ự' : u'ɯ',
u'y' : u'i', u'ý' : u'i', u'ỳ' : u'i', u'ỷ' : u'i', u'ỹ' : u'i', u'ỵ' : u'i',
u'eo' : u'eo', u'éo' : u'eo', u'èo' : u'eo', u'ẻo' : u'eo', u'ẽo': u'eo', u'ẹo' : u'eo',
u'êu' : u'ɛu', u'ếu' : u'ɛu', u'ều' : u'ɛu', u'ểu' : u'ɛu', u'ễu': u'ɛu', u'ệu' : u'ɛu',
u'ia' : u'iə', u'ía' : u'iə', u'ìa' : u'iə', u'ỉa' : u'iə', u'ĩa' : u'iə', u'ịa' : u'iə',
u'ia' : u'iə', u'iá' : u'iə', u'ià' : u'iə', u'iả' : u'iə', u'iã' : u'iə', u'iạ' : u'iə',
u'iê' : u'iə', u'iế' : u'iə', u'iề' : u'iə', u'iể' : u'iə', u'iễ' : u'iə', u'iệ' : u'iə',
u'oo' : u'ɔ', u'óo' : u'ɔ', u'òo' : u'ɔ', u'ỏo' : u'ɔ', u'õo' : u'ɔ', u'ọo' : u'ɔ',
u'oo' : u'ɔ', u'oó' : u'ɔ', u'oò' : u'ɔ', u'oỏ' : u'ɔ', u'oõ' : u'ɔ', u'oọ' : u'ɔ',
u'ôô' : u'o', u'ốô' : u'o', u'ồô' : u'o', u'ổô' : u'o', u'ỗô' : u'o', u'ộô' : u'o',
u'ôô' : u'o', u'ôố' : u'o', u'ôồ' : u'o', u'ôổ' : u'o', u'ôỗ' : u'o', u'ôộ' : u'o',
u'ua' : u'uə', u'úa' : u'uə', u'ùa' : u'uə', u'ủa' : u'uə', u'ũa' : u'uə', u'ụa' : u'uə',
u'uô' : u'uə', u'uố' : u'uə', u'uồ' : u'uə', u'uổ' : u'uə', u'uỗ' : u'uə', u'uộ' : u'uə',
u'ưa' : u'ɯə', u'ứa' : u'ɯə', u'ừa' : u'ɯə', u'ửa' : u'ɯə', u'ữa' : u'ɯə', u'ựa' : u'ɯə',
u'ươ' : u'ɯə', u'ướ' : u'ɯə', u'ườ' : u'ɯə', u'ưở' : u'ɯə', u'ưỡ' : u'ɯə', u'ượ' : u'ɯə',
u'yê' : u'iɛ', u'yế' : u'iɛ', u'yề' : u'iɛ', u'yể' : u'iɛ', u'yễ' : u'iɛ', u'yệ' : u'iɛ',
u'uơ' : u'uə', u'uở' : u'uə', u'uờ': u'uə', u'uở' : u'uə', u'uỡ' : u'uə', u'uợ' : u'uə',
}
Cus_offglides = { u'ai' : u'aj', u'ái' : u'aj', u'ài' : u'aj', u'ải' : u'aj', u'ãi' : u'aj', u'ại' : u'aj',
u'ay' : u'ăj', u'áy' : u'ăj', u'ày' : u'ăj', u'ảy' : u'ăj', u'ãy' : u'ăj', u'ạy' : u'ăj',
u'ao' : u'aw', u'áo' : u'aw', u'ào' : u'aw', u'ảo' : u'aw', u'ão' : u'aw', u'ạo' : u'aw',
u'au' : u'ăw', u'áu' : u'ăw', u'àu' : u'ăw', u'ảu' : u'ăw', u'ãu' : u'ăw', u'ạu' : u'ăw',
u'ây' : u'ɤ̆j', u'ấy' : u'ɤ̆j', u'ầy' : u'ɤ̆j', u'ẩy' : u'ɤ̆j', u'ẫy' : u'ɤ̆j', u'ậy' : u'ɤ̆j',
u'âu' : u'ɤ̆w', u'ấu' : u'ɤ̆w', u'ầu': u'ɤ̆w', u'ẩu' : u'ɤ̆w', u'ẫu' : u'ɤ̆w', u'ậu' : u'ɤ̆w',
u'eo' : u'ew', u'éo' : u'ew', u'èo' : u'ew', u'ẻo' : u'ew', u'ẽo' : u'ew', u'ẹo' : u'ew',
u'iu' : u'iw', u'íu' : u'iw', u'ìu' : u'iw', u'ỉu' : u'iw', u'ĩu' : u'iw', u'ịu' : u'iw',
u'oi' : u'ɔj', u'ói' : u'ɔj', u'òi' : u'ɔj', u'ỏi' : u'ɔj', u'õi' : u'ɔj', u'ọi' : u'ɔj',
u'ôi' : u'oj', u'ối' : u'oj', u'ồi' : u'oj', u'ổi' : u'oj', u'ỗi' : u'oj', u'ội' : u'oj',
u'ui' : u'uj', u'úi' : u'uj', u'ùi' : u'uj', u'ủi' : u'uj', u'ũi' : u'uj', u'ụi' : u'uj',
#u'uy' : u'uj', u'úy' : u'uj', u'ùy' : u'uj', u'ủy' : u'uj', u'ũy' : u'uj', u'ụy' : u'uj',
u'uy' : u'ʷi', u'úy' : u'uj', u'ùy' : u'uj', u'ủy' : u'uj', u'ũy' : u'uj', u'ụy' : u'uj',
#thay để hạn chế trùng âm
u'uy' : u'ʷi', u'uý' : u'ʷi', u'uỳ' : u'ʷi', u'uỷ' : u'ʷi', u'uỹ' : u'ʷi', u'uỵ' : u'ʷi',
u'ơi' : u'ɤj', u'ới' : u'ɤj', u'ời' : u'ɤj', u'ởi' : u'ɤj', u'ỡi' : u'ɤj', u'ợi' : u'ɤj',
u'ưi' : u'ɯj', u'ứi' : u'ɯj', u'ừi' : u'ɯj', u'ửi' : u'ɯj', u'ữi' : u'ɯj', u'ựi' : u'ɯj',
u'ưu' : u'ɯw', u'ứu' : u'ɯw', u'ừu' : u'ɯw', u'ửu' : u'ɯw', u'ữu' : u'ɯw', u'ựu' : u'ɯw',
u'iêu' : u'iəw', u'iếu' : u'iəw', u'iều' : u'iəw', u'iểu' : u'iəw', u'iễu' : u'iəw', u'iệu' : u'iəw',
u'yêu' : u'iəw', u'yếu' : u'iəw', u'yều' : u'iəw', u'yểu' : u'iəw', u'yễu' : u'iəw', u'yệu' : u'iəw',
u'uôi' : u'uəj', u'uối' : u'uəj', u'uồi' : u'uəj', u'uổi' : u'uəj', u'uỗi' : u'uəj', u'uội' : u'uəj',
u'ươi' : u'ɯəj', u'ưới' : u'ɯəj', u'ười' : u'ɯəj', u'ưởi' : u'ɯəj', u'ưỡi' : u'ɯəj', u'ượi' : u'ɯəj',
u'ươu' : u'ɯəw', u'ướu' : u'ɯəw', u'ườu' : u'ɯəw', u'ưởu' : u'ɯəw', 'ưỡu' : u'ɯəw', u'ượu' : u'ɯəw'
}
#Các âm vòng ở đây i chang không vòm: không có w ở trước => Try to add ʷ
Cus_onglides = { u'oa' : u'ʷa', u'oá' : u'ʷa', u'oà' : u'ʷa', u'oả' : u'ʷa', u'oã' : u'ʷa', u'oạ' : u'ʷa',
u'óa' : u'ʷa', u'òa' : u'ʷa', u'ỏa' : u'ʷa', u'õa' : u'ʷa', u'ọa' : u'ʷa',
u'oă' : u'ʷă', u'oắ' : u'ʷă', u'oằ' : u'ʷă', u'oẳ' : u'ʷă', u'oẵ' : u'ʷă', u'oặ' : u'ʷă',
u'oe' : u'ʷɛ', u'oé' : u'ʷɛ', u'oè' : u'ʷɛ', u'oẻ' : u'ʷɛ', u'oẽ' : u'ʷɛ', u'oẹ' : u'ʷɛ',
u'oe' : u'ʷɛ', u'óe' : u'ʷɛ', u'òe' : u'ʷɛ', u'ỏe' : u'ʷɛ', u'õe' : u'ʷɛ', u'ọe' : u'ʷɛ',
u'ua' : u'ʷa', u'uá' : u'ʷa', u'uà' : u'ʷa', u'uả' : u'ʷa', u'uã' : u'ʷa', u'uạ' : u'ʷa',
u'uă' : u'ʷă', u'uắ' : u'ʷă', u'uằ' : u'ʷă', u'uẳ' : u'ʷă', u'uẵ' : u'ʷă', u'uặ' : u'ʷă',
u'uâ' : u'ʷɤ̆', u'uấ' : u'ʷɤ̆', u'uầ' : u'ʷɤ̆', u'uẩ' : u'ʷɤ̆', u'uẫ' : u'ʷɤ̆', u'uậ' : u'ʷɤ̆',
u'ue' : u'ʷɛ', u'ué' : u'ʷɛ', u'uè' : u'ʷɛ', u'uẻ' : u'ʷɛ', u'uẽ' : u'ʷɛ', u'uẹ' : u'ʷɛ',
u'uê' : u'ʷe', u'uế' : u'ʷe', u'uề' : u'ʷe', u'uể' : u'ʷe', u'uễ' : u'ʷe', u'uệ' : u'ʷe',
u'uơ' : u'ʷɤ', u'uớ' : u'ʷɤ', u'uờ' : u'ʷɤ', u'uở' : u'ʷɤ', u'uỡ' : u'ʷɤ', u'uợ' : u'ʷɤ',
u'uy' : u'ʷi', u'uý' : u'ʷi', u'uỳ' : u'ʷi', u'uỷ' : u'ʷi', u'uỹ' : u'ʷi', u'uỵ' : u'ʷi',
u'uya' : u'ʷiə', u'uyá' : u'ʷiə', u'uyà' : u'ʷiə', u'uyả' : u'ʷiə', u'uyã' : u'ʷiə', u'uyạ' : u'ʷiə',
u'uyê' : u'ʷiə', u'uyế' : u'ʷiə', u'uyề' : u'ʷiə', u'uyể' : u'ʷiə', u'uyễ' : u'ʷiə', u'uyệ' : u'ʷiə',
u'uyu' : u'ʷiu', u'uyú' : u'ʷiu', u'uyù' : u'ʷiu', u'uyủ' : u'ʷiu', u'uyũ' : u'ʷiu', u'uyụ' : u'ʷiu',
u'uyu' : u'ʷiu', u'uýu' : u'ʷiu', u'uỳu' : u'ʷiu', u'uỷu' : u'ʷiu', u'uỹu' : u'ʷiu', u'uỵu' : u'ʷiu',
u'oen' : u'ʷen', u'oén' : u'ʷen', u'oèn' : u'ʷen', u'oẻn' : u'ʷen', u'oẽn' : u'ʷen', u'oẹn' : u'ʷen',
u'oet' : u'ʷet', u'oét' : u'ʷet', u'oèt' : u'ʷet', u'oẻt' : u'ʷet', u'oẽt' : u'ʷet', u'oẹt' : u'ʷet'
}
Cus_onoffglides = { u'oe' : u'ɛj', u'oé' : u'ɛj', u'oè' : u'ɛj', u'oẻ' : u'ɛj', u'oẽ' : u'ɛj', u'oẹ' : u'ɛj',
u'oai' : u'aj', u'oái' : u'aj', u'oài' : u'aj', u'oải' : u'aj', u'oãi' : u'aj', u'oại' : u'aj',
u'oay' : u'ăj', u'oáy' : u'ăj', u'oày' : u'ăj', u'oảy' : u'ăj', u'oãy' : u'ăj', u'oạy' : u'ăj',
u'oao' : u'aw', u'oáo' : u'aw', u'oào' : u'aw', u'oảo' : u'aw', u'oão' : u'aw', u'oạo' : u'aw',
u'oeo' : u'ew', u'oéo' : u'ew', u'oèo' : u'ew', u'oẻo' : u'ew', u'oẽo' : u'ew', u'oẹo' : u'ew',
u'oeo' : u'ew', u'óeo' : u'ew', u'òeo' : u'ew', u'ỏeo' : u'ew', u'õeo' : u'ew', u'ọeo' : u'ew',
u'ueo' : u'ew', u'uéo' : u'ew', u'uèo' : u'ew', u'uẻo' : u'ew', u'uẽo' : u'ew', u'uẹo' : u'ew',
u'uai' : u'aj', u'uái' : u'aj', u'uài' : u'aj', u'uải' : u'aj', u'uãi' : u'aj', u'uại' : u'aj',
u'uay' : u'ăj', u'uáy' : u'ăj', u'uày' : u'ăj', u'uảy' : u'ăj', u'uãy' : u'ăj', u'uạy' : u'ăj',
u'uây' : u'ɤ̆j', u'uấy' : u'ɤ̆j', u'uầy' : u'ɤ̆j', u'uẩy' : u'ɤ̆j', u'uẫy' : u'ɤ̆j', u'uậy' : u'ɤ̆j'
}
Cus_codas = { u'p' : u'p', u't' : u't', u'c' : u'k', u'm' : u'm', u'n' : u'n', u'ng' : u'ŋ', u'nh' : u'ɲ', u'ch' : u'tʃ' }
Cus_tones_p = { u'á' : 5, u'à' : 2, u'ả' : 4, u'ã' : 3, u'ạ' : 6,
u'ấ' : 5, u'ầ' : 2, u'ẩ' : 4, u'ẫ' : 3, u'ậ' : 6,
u'ắ' : 5, u'ằ' : 2, u'ẳ' : 4, u'ẵ' : 3, u'ặ' : 6,
u'é' : 5, u'è' : 2, u'ẻ' : 4, u'ẽ' : 3, u'ẹ' : 6,
u'ế' : 5, u'ề' : 2, u'ể' : 4, u'ễ' : 3, u'ệ' : 6,
u'í' : 5, u'ì' : 2, u'ỉ' : 4, u'ĩ' : 3, u'ị' : 6,
u'ó' : 5, u'ò' : 2, u'ỏ' : 4, u'õ' : 3, u'ọ' : 6,
u'ố' : 5, u'ồ' : 2, u'ổ' : 4, u'ỗ' : 3, u'ộ' : 6,
u'ớ' : 5, u'ờ' : 2, u'ở' : 4, u'ỡ' : 3, u'ợ' : 6,
u'ú' : 5, u'ù' : 2, u'ủ' : 4, u'ũ' : 3, u'ụ' : 6,
u'ứ' : 5, u'ừ' : 2, u'ử' : 4, u'ữ' : 3, u'ự' : 6,
u'ý' : 5, u'ỳ' : 2, u'ỷ' : 4, u'ỹ' : 3, u'ỵ' : 6,
}
Cus_gi = { u'gi' : u'zi', u'gí': u'zi', u'gì' : u'zi', u'gì' : u'zi', u'gĩ' : u'zi', u'gị' : u'zi'}
Cus_qu = {u'quy' : u'kwi', u'qúy' : u'kwi', u'qùy' : u'kwi', u'qủy' : u'kwi', u'qũy' : u'kwi', u'qụy' : u'kwi'}
#######################################################
# North
# #coding: utf-8
N_onsets = { u'b' : u'b', u't' : u't', u'th' : u'tʰ', u'đ' : u'd', u'ch' : u'c',
u'kh' : u'x', u'g' : u'ɣ', u'l' : u'l', u'm' : u'm', u'n': u'n',
u'ngh': u'ŋ', u'nh' : u'ɲ', u'ng' : u'ŋ', u'ph' : u'f', u'v' : u'v',
u'x' : u's', u'd' : u'z', u'h' : u'h', u'p' : u'p', u'qu' : u'kw',
u'gi' : u'z', u'tr' : u'c', u'k' : u'k', u'c' : u'k', u'gh' : u'ɣ',
u'r' : u'z', u's' : u's', u'gi': u'z'}
N_nuclei = { u'a' : u'a', u'á' : u'a', u'à' : u'a', u'ả' : u'a', u'ã' : u'a', u'ạ' : u'a',
u'â' : u'ɤ̆', u'ấ' : u'ɤ̆', u'ầ' : u'ɤ̆', u'ẩ' : u'ɤ̆', u'ẫ' : u'ɤ̆', u'ậ' : u'ɤ̆',
u'ă' : u'ă', u'ắ' : u'ă', u'ằ' : u'ă', u'ẳ' : u'ă', u'ẵ' : u'ă', u'ặ' : u'ă',
u'e' : u'ɛ', u'é' : u'ɛ', u'è' : u'ɛ', u'ẻ' : u'ɛ', u'ẽ' : u'ɛ', u'ẹ' : u'ɛ',
u'ê' : u'e', u'ế' : u'e', u'ề' : u'e', u'ể' : u'e', u'ễ' : u'e', u'ệ' : u'e',
u'i' : u'i', u'í' : u'i', u'ì' : u'i', u'ỉ' : u'i', u'ĩ' : u'i', u'ị' : u'i',
u'o' : u'ɔ', u'ó' : u'ɔ', u'ò' : u'ɔ', u'ỏ' : u'ɔ', u'õ' : u'ɔ', u'ọ' : u'ɔ',
u'ô' : u'o', u'ố' : u'o', u'ồ' : u'o', u'ổ' : u'o', u'ỗ' : u'o', u'ộ' : u'o',
u'ơ' : u'ɤ', u'ớ' : u'ɤ', u'ờ' : u'ɤ', u'ở' : u'ɤ', u'ỡ' : u'ɤ', u'ợ' : u'ɤ',
u'u' : u'u', u'ú' : u'u', u'ù' : u'u', u'ủ' : u'u', u'ũ' : u'u', u'ụ' : u'u',
u'ư' : u'ɯ', u'ứ' : u'ɯ', u'ừ' : u'ɯ', u'ử' : u'ɯ', u'ữ' : u'ɯ', u'ự' : u'ɯ',
u'y' : u'i', u'ý' : u'i', u'ỳ' : u'i', u'ỷ' : u'i', u'ỹ' : u'i', u'ỵ' : u'i',
u'eo' : u'eo', u'éo' : u'eo', u'èo' : u'eo', u'ẻo' : u'eo', u'ẽo': u'eo', u'ẹo' : u'eo',
u'êu' : u'ɛu', u'ếu' : u'ɛu', u'ều' : u'ɛu', u'ểu' : u'ɛu', u'ễu': u'ɛu', u'ệu' : u'ɛu',
u'ia' : u'iə', u'ía' : u'iə', u'ìa' : u'iə', u'ỉa' : u'iə', u'ĩa' : u'iə', u'ịa' : u'iə',
u'ia' : u'iə', u'iá' : u'iə', u'ià' : u'iə', u'iả' : u'iə', u'iã' : u'iə', u'iạ' : u'iə',
u'iê' : u'iə', u'iế' : u'iə', u'iề' : u'iə', u'iể' : u'iə', u'iễ' : u'iə', u'iệ' : u'iə',
u'oo' : u'ɔ', u'óo' : u'ɔ', u'òo' : u'ɔ', u'ỏo' : u'ɔ', u'õo' : u'ɔ', u'ọo' : u'ɔ',
u'oo' : u'ɔ', u'oó' : u'ɔ', u'oò' : u'ɔ', u'oỏ' : u'ɔ', u'oõ' : u'ɔ', u'oọ' : u'ɔ',
u'ôô' : u'o', u'ốô' : u'o', u'ồô' : u'o', u'ổô' : u'o', u'ỗô' : u'o', u'ộô' : u'o',
u'ôô' : u'o', u'ôố' : u'o', u'ôồ' : u'o', u'ôổ' : u'o', u'ôỗ' : u'o', u'ôộ' : u'o',
u'ua' : u'uə', u'úa' : u'uə', u'ùa' : u'uə', u'ủa' : u'uə', u'ũa' : u'uə', u'ụa' : u'uə',
u'uô' : u'uə', u'uố' : u'uə', u'uồ' : u'uə', u'uổ' : u'uə', u'uỗ' : u'uə', u'uộ' : u'uə',
u'ưa' : u'ɯə', u'ứa' : u'ɯə', u'ừa' : u'ɯə', u'ửa' : u'ɯə', u'ữa' : u'ɯə', u'ựa' : u'ɯə',
u'ươ' : u'ɯə', u'ướ' : u'ɯə', u'ườ' : u'ɯə', u'ưở' : u'ɯə', u'ưỡ' : u'ɯə', u'ượ' : u'ɯə',
u'yê' : u'iɛ', u'yế' : u'iɛ', u'yề' : u'iɛ', u'yể' : u'iɛ', u'yễ' : u'iɛ', u'yệ' : u'iɛ',
u'uơ' : u'uə', u'uở' : u'uə', u'uờ': u'uə', u'uở' : u'uə', u'uỡ' : u'uə', u'uợ' : u'uə',
}
N_offglides = { u'ai' : u'aj', u'ái' : u'aj', u'ài' : u'aj', u'ải' : u'aj', u'ãi' : u'aj', u'ại' : u'aj',
u'ay' : u'ăj', u'áy' : u'ăj', u'ày' : u'ăj', u'ảy' : u'ăj', u'ãy' : u'ăj', u'ạy' : u'ăj',
u'ao' : u'aw', u'áo' : u'aw', u'ào' : u'aw', u'ảo' : u'aw', u'ão' : u'aw', u'ạo' : u'aw',
u'au' : u'ăw', u'áu' : u'ăw', u'àu' : u'ăw', u'ảu' : u'ăw', u'ãu' : u'ăw', u'ạu' : u'ăw',
u'ây' : u'ɤ̆j', u'ấy' : u'ɤ̆j', u'ầy' : u'ɤ̆j', u'ẩy' : u'ɤ̆j', u'ẫy' : u'ɤ̆j', u'ậy' : u'ɤ̆j',
u'âu' : u'ɤ̆w', u'ấu' : u'ɤ̆w', u'ầu': u'ɤ̆w', u'ẩu' : u'ɤ̆w', u'ẫu' : u'ɤ̆w', u'ậu' : u'ɤ̆w',
u'eo' : u'ew', u'éo' : u'ew', u'èo' : u'ew', u'ẻo' : u'ew', u'ẽo' : u'ew', u'ẹo' : u'ew',
u'iu' : u'iw', u'íu' : u'iw', u'ìu' : u'iw', u'ỉu' : u'iw', u'ĩu' : u'iw', u'ịu' : u'iw',
u'oi' : u'ɔj', u'ói' : u'ɔj', u'òi' : u'ɔj', u'ỏi' : u'ɔj', u'õi' : u'ɔj', u'ọi' : u'ɔj',
u'ôi' : u'oj', u'ối' : u'oj', u'ồi' : u'oj', u'ổi' : u'oj', u'ỗi' : u'oj', u'ội' : u'oj',
u'ui' : u'uj', u'úi' : u'uj', u'ùi' : u'uj', u'ủi' : u'uj', u'ũi' : u'uj', u'ụi' : u'uj',
u'uy' : u'uj', u'úy' : u'uj', u'ùy' : u'uj', u'ủy' : u'uj', u'ũy' : u'uj', u'ụy' : u'uj',
u'ơi' : u'ɤj', u'ới' : u'ɤj', u'ời' : u'ɤj', u'ởi' : u'ɤj', u'ỡi' : u'ɤj', u'ợi' : u'ɤj',
u'ưi' : u'ɯj', u'ứi' : u'ɯj', u'ừi' : u'ɯj', u'ửi' : u'ɯj', u'ữi' : u'ɯj', u'ựi' : u'ɯj',
u'ưu' : u'ɯw', u'ứu' : u'ɯw', u'ừu' : u'ɯw', u'ửu' : u'ɯw', u'ữu' : u'ɯw', u'ựu' : u'ɯw',
u'iêu' : u'iəw', u'iếu' : u'iəw', u'iều' : u'iəw', u'iểu' : u'iəw', u'iễu' : u'iəw', u'iệu' : u'iəw',
u'yêu' : u'iəw', u'yếu' : u'iəw', u'yều' : u'iəw', u'yểu' : u'iəw', u'yễu' : u'iəw', u'yệu' : u'iəw',
u'uôi' : u'uəj', u'uối' : u'uəj', u'uồi' : u'uəj', u'uổi' : u'uəj', u'uỗi' : u'uəj', u'uội' : u'uəj',
u'ươi' : u'ɯəj', u'ưới' : u'ɯəj', u'ười' : u'ɯəj', u'ưởi' : u'ɯəj', u'ưỡi' : u'ɯəj', u'ượi' : u'ɯəj',
u'ươu' : u'ɯəw', u'ướu' : u'ɯəw', u'ườu' : u'ɯəw', u'ưởu' : u'ɯəw', 'ưỡu' : u'ɯəw', u'ượu' : u'ɯəw'
}
N_onglides = { u'oa' : u'a', u'oá' : u'a', u'oà' : u'a', u'oả' : u'a', u'oã' : u'a', u'oạ' : u'a',
u'óa' : u'a', u'òa' : u'a', u'ỏa' : u'a', u'õa' : u'a', u'ọa' : u'a',
u'oă' : u'ă', u'oắ' : u'ă', u'oằ' : u'ă', u'oẳ' : u'ă', u'oẵ' : u'ă', u'oặ' : u'ă',
u'oe' : u'e', u'oé' : u'e', u'oè' : u'e', u'oẻ' : u'e', u'oẽ' : u'e', u'oẹ' : u'e',
u'oe' : u'e', u'óe' : u'e', u'òe' : u'e', u'ỏe' : u'e', u'õe' : u'e', u'ọe' : u'e',
u'ua' : u'a', u'uá' : u'a', u'uà' : u'a', u'uả' : u'a', u'uã' : u'a', u'uạ' : u'a',
u'uă' : u'ă', u'uắ' : u'ă', u'uằ' : u'ă', u'uẳ' : u'ă', u'uẵ' : u'ă', u'uặ' : u'ă',
u'uâ' : u'ɤ̆', u'uấ' : u'ɤ̆', u'uầ' : u'ɤ̆', u'uẩ' : u'ɤ̆', u'uẫ' : u'ɤ̆', u'uậ' : u'ɤ̆',
u'ue' : u'ɛ', u'ué' : u'ɛ', u'uè' : u'ɛ', u'uẻ' : u'ɛ', u'uẽ' : u'ɛ', u'uẹ' : u'ɛ',
u'uê' : u'e', u'uế' : u'e', u'uề' : u'e', u'uể' : u'e', u'uễ' : u'e', u'uệ' : u'e',
u'uơ' : u'ɤ', u'uớ' : u'ɤ', u'uờ' : u'ɤ', u'uở' : u'ɤ', u'uỡ' : u'ɤ', u'uợ' : u'ɤ',
u'uy' : u'i', u'uý' : u'i', u'uỳ' : u'i', u'uỷ' : u'i', u'uỹ' : u'i', u'uỵ' : u'i',
u'uya' : u'iə', u'uyá' : u'iə', u'uyà' : u'iə', u'uyả' : u'iə', u'uyã' : u'iə', u'uyạ' : u'iə',
u'uyê' : u'iə', u'uyế' : u'iə', u'uyề' : u'iə', u'uyể' : u'iə', u'uyễ' : u'iə', u'uyệ' : u'iə',
u'uyu' : u'iu', u'uyú' : u'iu', u'uyù' : u'iu', u'uyủ' : u'iu', u'uyũ' : u'iu', u'uyụ' : u'iu',
u'uyu' : u'iu', u'uýu' : u'iu', u'uỳu' : u'iu', u'uỷu' : u'iu', u'uỹu' : u'iu', u'uỵu' : u'iu',
u'oen' : u'en', u'oén' : u'en', u'oèn' : u'en', u'oẻn' : u'en', u'oẽn' : u'en', u'oẹn' : u'en',
u'oet' : u'et', u'oét' : u'et', u'oèt' : u'et', u'oẻt' : u'et', u'oẽt' : u'et', u'oẹt' : u'et'
}
N_onoffglides = { u'oe' : u'ej', u'oé' : u'ej', u'oè' : u'ej', u'oẻ' : u'ej', u'oẽ' : u'ej', u'oẹ' : u'ej',
u'oai' : u'aj', u'oái' : u'aj', u'oài' : u'aj', u'oải' : u'aj', u'oãi' : u'aj', u'oại' : u'aj',
u'oay' : u'ăj', u'oáy' : u'ăj', u'oày' : u'ăj', u'oảy' : u'ăj', u'oãy' : u'ăj', u'oạy' : u'ăj',
u'oao' : u'aw', u'oáo' : u'aw', u'oào' : u'aw', u'oảo' : u'aw', u'oão' : u'aw', u'oạo' : u'aw',
u'oeo' : u'ew', u'oéo' : u'ew', u'oèo' : u'ew', u'oẻo' : u'ew', u'oẽo' : u'ew', u'oẹo' : u'ew',
u'oeo' : u'ew', u'óeo' : u'ew', u'òeo' : u'ew', u'ỏeo' : u'ew', u'õeo' : u'ew', u'ọeo' : u'ew',
u'ueo' : u'ew', u'uéo' : u'ew', u'uèo' : u'ew', u'uẻo' : u'ew', u'uẽo' : u'ew', u'uẹo' : u'ew',
u'uai' : u'aj', u'uái' : u'aj', u'uài' : u'aj', u'uải' : u'aj', u'uãi' : u'aj', u'uại' : u'aj',
u'uay' : u'ăj', u'uáy' : u'ăj', u'uày' : u'ăj', u'uảy' : u'ăj', u'uãy' : u'ăj', u'uạy' : u'ăj',
u'uây' : u'ɤ̆j', u'uấy' : u'ɤ̆j', u'uầy' : u'ɤ̆j', u'uẩy' : u'ɤ̆j', u'uẫy' : u'ɤ̆j', u'uậy' : u'ɤ̆j'
}
N_codas = { u'p' : u'p', u't' : u't', u'c' : u'k', u'm' : u'm', u'n' : u'n', u'ng' : u'ŋ', u'nh' : u'ɲ', u'ch' : u'k' }
#tones = { u'a' : 33, u'á' : 24, u'à' : 32, u'ả' : 312, u'ã' : u'35g', u'ạ' : u'21g',
# u'â' : 33, u'ấ' : 24, u'ầ' : 32, u'ẩ' : 312, u'ẫ' : u'35g', u'ậ' : u'21g',
# u'ă' : 33, u'ắ' : 24, u'ằ' : 32, u'ẳ' : 312, u'ẵ' : u'35g', u'ặ' : u'21g',
# u'e' : 33, u'é' : 24, u'è' : 32, u'ẻ' : 312, u'ẽ' : u'35g', u'ẹ' : u'21g',
# u'ê' : 33, u'ế' : 24, u'ề' : 32, u'ể' : 312, u'ễ' : u'35g', u'ệ' : u'21g',
# u'i' : 33, u'í' : 24, u'ì' : 32, u'ỉ' : 312, u'ĩ' : u'35g', u'ị' : u'21g',
# u'o' : 33, u'ó' : 24, u'ò' : 32, u'ỏ' : 312, u'õ' : u'35g', u'ọ' : u'21g',
# u'ô' : 33, u'ố' : 24, u'ồ' : 32, u'ổ' : 312, u'ỗ' : u'35g', u'ộ' : u'21g',
# u'ơ' : 33, u'ớ' : 24, u'ờ' : 32, u'ở' : 312, u'ỡ' : u'35g', u'ợ' : u'21g',
# u'u' : 33, u'ú' : 24, u'ù' : 32, u'ủ' : 312, u'ũ' : u'35g', u'ụ' : u'21g',
# u'ư' : 33, u'ứ' : 24, u'ừ' : 32, u'ử' : 312, u'ữ' : u'35g', u'ự' : u'21g',
# u'y' : 33, u'ý' : 24, u'ỳ' : 32, u'ỷ' : 312, u'ỹ' : u'35g', u'ỵ' : u'21g',
# }
N_tones = { u'á' : 24, u'à' : 32, u'ả' : 312, u'ã' : u'35g', u'ạ' : u'21g',
u'ấ' : 24, u'ầ' : 32, u'ẩ' : 312, u'ẫ' : u'35g', u'ậ' : u'21g',
u'ắ' : 24, u'ằ' : 32, u'ẳ' : 312, u'ẵ' : u'35g', u'ặ' : u'21g',
u'é' : 24, u'è' : 32, u'ẻ' : 312, u'ẽ' : u'35g', u'ẹ' : u'21g',
u'ế' : 24, u'ề' : 32, u'ể' : 312, u'ễ' : u'35g', u'ệ' : u'21g',
u'í' : 24, u'ì' : 32, u'ỉ' : 312, u'ĩ' : u'35g', u'ị' : u'21g',
u'ó' : 24, u'ò' : 32, u'ỏ' : 312, u'õ' : u'35g', u'ọ' : u'21g',
u'ố' : 24, u'ồ' : 32, u'ổ' : 312, u'ỗ' : u'35g', u'ộ' : u'21g',
u'ớ' : 24, u'ờ' : 32, u'ở' : 312, u'ỡ' : u'35g', u'ợ' : u'21g',
u'ú' : 24, u'ù' : 32, u'ủ' : 312, u'ũ' : u'35g', u'ụ' : u'21g',
u'ứ' : 24, u'ừ' : 32, u'ử' : 312, u'ữ' : u'35g', u'ự' : u'21g',
u'ý' : 24, u'ỳ' : 32, u'ỷ' : 312, u'ỹ' : u'35g', u'ỵ' : u'21g',
}
# used to use \u02C0 for the unicode raised glottal character
N_tones_p = { u'á' : 5, u'à' : 2, u'ả' : 4, u'ã' : 3, u'ạ' : 6,
u'ấ' : 5, u'ầ' : 2, u'ẩ' : 4, u'ẫ' : 3, u'ậ' : 6,
u'ắ' : 5, u'ằ' : 2, u'ẳ' : 4, u'ẵ' : 3, u'ặ' : 6,
u'é' : 5, u'è' : 2, u'ẻ' : 4, u'ẽ' : 3, u'ẹ' : 6,
u'ế' : 5, u'ề' : 2, u'ể' : 4, u'ễ' : 3, u'ệ' : 6,
u'í' : 5, u'ì' : 2, u'ỉ' : 4, u'ĩ' : 3, u'ị' : 6,
u'ó' : 5, u'ò' : 2, u'ỏ' : 4, u'õ' : 3, u'ọ' : 6,
u'ố' : 5, u'ồ' : 2, u'ổ' : 4, u'ỗ' : 3, u'ộ' : 6,
u'ớ' : 5, u'ờ' : 2, u'ở' : 4, u'ỡ' : 3, u'ợ' : 6,
u'ú' : 5, u'ù' : 2, u'ủ' : 4, u'ũ' : 3, u'ụ' : 6,
u'ứ' : 5, u'ừ' : 2, u'ử' : 4, u'ữ' : 3, u'ự' : 6,
u'ý' : 5, u'ỳ' : 2, u'ỷ' : 4, u'ỹ' : 3, u'ỵ' : 6,
}
N_gi = { u'gi' : u'zi', u'gí': u'zi', u'gì' : u'zi', u'gì' : u'zi', u'gĩ' : u'zi', u'gị' : u'zi'}
N_qu = {u'quy' : u'kwi', u'qúy' : u'kwi', u'qùy' : u'kwi', u'qủy' : u'kwi', u'qũy' : u'kwi', u'qụy' : u'kwi'}
#######################################################
#central.py
#coding: utf-8
C_onsets = { u'b' : u'b', u't' : u't', u'th' : u'tʰ', u'đ' : u'd', u'ch' : u'c',
u'kh' : u'x', u'g' : u'ɣ', u'l' : u'l', u'm' : u'm', u'n': u'n',
u'ngh': u'ŋ', u'nh' : u'ɲ', u'ng' : u'ŋ', u'ph' : u'f', u'v' : u'j',
u'x' : u's', u'd' : u'j', u'h' : u'h', u'p' : u'p', u'qu' : u'w',
u'gi' : u'j', u'tr' : u'ʈ', u'k' : u'k', u'c' : u'k', u'gh' : u'ɣ',
u'r' : u'ʐ', u's' : u'ʂ', u'gi' : u'j'
}
C_nuclei = { u'a' : u'a', u'á' : u'a', u'à' : u'a', u'ả' : u'a', u'ã' : u'a', u'ạ' : u'a',
u'â' : u'ɤ̆', u'ấ' : u'ɤ̆', u'ầ' : u'ɤ̆', u'ẩ' : u'ɤ̆', u'ẫ' : u'ɤ̆', u'ậ' : u'ɤ̆',
u'ă' : u'ă', u'ắ' : u'ă', u'ằ' : u'ă', u'ẳ' : u'ă', u'ẵ' : u'ă', u'ặ' : u'ă',
u'e' : u'ɛ', u'é' : u'ɛ', u'è' : u'ɛ', u'ẻ' : u'ɛ', u'ẽ' : u'ɛ', u'ẹ' : u'ɛ',
u'ê' : u'e', u'ế' : u'e', u'ề' : u'e', u'ể' : u'e', u'ễ' : u'e', u'ệ' : u'e',
u'i' : u'i', u'í' : u'i', u'ì' : u'i', u'ỉ' : u'i', u'ĩ' : u'i', u'ị' : u'i',
u'o' : u'ɔ', u'ó' : u'ɔ', u'ò' : u'ɔ', u'ỏ' : u'ɔ', u'õ' : u'ɔ', u'ọ' : u'ɔ',
u'ô' : u'o', u'ố' : u'o', u'ồ' : u'o', u'ổ' : u'o', u'ỗ' : u'o', u'ộ' : u'o',
u'ơ' : u'ɤ', u'ớ' : u'ɤ', u'ờ' : u'ɤ', u'ở' : u'ɤ', u'ỡ' : u'ɤ', u'ợ' : u'ɤ',
u'u' : u'u', u'ú' : u'u', u'ù' : u'u', u'ủ' : u'u', u'ũ' : u'u', u'ụ' : u'u',
u'ư' : u'ɯ', u'ứ' : u'ɯ', u'ừ' : u'ɯ', u'ử' : u'ɯ', u'ữ' : u'ɯ', u'ự' : u'ɯ',
u'y' : u'i', u'ý' : u'i', u'ỳ' : u'i', u'ỷ' : u'i', u'ỹ' : u'i', u'ỵ' : u'i',
u'eo' : u'eo', u'éo' : u'eo', u'èo' : u'eo', u'ẻo' : u'eo', u'ẽo': u'eo', u'ẹo' : u'eo',
u'êu' : u'ɛu', u'ếu' : u'ɛu', u'ều' : u'ɛu', u'ểu' : u'ɛu', u'ễu': u'ɛu', u'ệu' : u'ɛu',
u'ia' : u'iə', u'ía' : u'iə', u'ìa' : u'iə', u'ỉa' : u'iə', u'ĩa' : u'iə', u'ịa' : u'iə',
u'ia' : u'iə', u'iá' : u'iə', u'ià' : u'iə', u'iả' : u'iə', u'iã' : u'iə', u'iạ' : u'iə',
u'iê' : u'iə', u'iế' : u'iə', u'iề' : u'iə', u'iể' : u'iə', u'iễ' : u'iə', u'iệ' : u'iə',
u'oo' : u'ɔ', u'óo' : u'ɔ', u'òo' : u'ɔ', u'ỏo' : u'ɔ', u'õo' : u'ɔ', u'ọo' : u'ɔ',
u'oo' : u'ɔ', u'oó' : u'ɔ', u'oò' : u'ɔ', u'oỏ' : u'ɔ', u'oõ' : u'ɔ', u'oọ' : u'ɔ',
u'ôô' : u'o', u'ốô' : u'o', u'ồô' : u'o', u'ổô' : u'o', u'ỗô' : u'o', u'ộô' : u'o',
u'ôô' : u'o', u'ôố' : u'o', u'ôồ' : u'o', u'ôổ' : u'o', u'ôỗ' : u'o', u'ôộ' : u'o',
u'ua' : u'uə', u'úa' : u'uə', u'ùa' : u'uə', u'ủa' : u'uə', u'ũa' : u'uə', u'ụa' : u'uə',
u'uô' : u'uə', u'uố' : u'uə', u'uồ' : u'uə', u'uổ' : u'uə', u'uỗ' : u'uə', u'uộ' : u'uə',
u'ưa' : u'ɯə', u'ứa' : u'ɯə', u'ừa' : u'ɯə', u'ửa' : u'ɯə', u'ữa' : u'ɯə', u'ựa' : u'ɯə',
u'ươ' : u'ɯə', u'ướ' : u'ɯə', u'ườ' : u'ɯə', u'ưở' : u'ɯə', u'ưỡ' : u'ɯə', u'ượ' : u'ɯə',
u'yê' : u'iɛ', u'yế' : u'iɛ', u'yề' : u'iɛ', u'yể' : u'iɛ', u'yễ' : u'iɛ', u'yệ' : u'iɛ',
u'uơ' : u'uə', u'uở' : u'uə', u'uờ': u'uə', u'uở' : u'uə', u'uỡ' : u'uə', u'uợ' : u'uə',
}
C_offglides = { u'ai' : u'aj', u'ái' : u'aj', u'ài' : u'aj', u'ải' : u'aj', u'ãi' : u'aj', u'ại' : u'aj',
u'ay' : u'ăj', u'áy' : u'ăj', u'ày' : u'ăj', u'ảy' : u'ăj', u'ãy' : u'ăj', u'ạy' : u'ăj',
u'ao' : u'aw', u'áo' : u'aw', u'ào' : u'aw', u'ảo' : u'aw', u'ão' : u'aw', u'ạo' : u'aw',
u'au' : u'ăw', u'áu' : u'ăw', u'àu' : u'ăw', u'ảu' : u'ăw', u'ãu' : u'ăw', u'ạu' : u'ăw',
u'ây' : u'ɤ̆j', u'ấy' : u'ɤ̆j', u'ầy' : u'ɤ̆j', u'ẩy' : u'ɤ̆j', u'ẫy' : u'ɤ̆j', u'ậy' : u'ɤ̆j',
u'âu' : u'ɤ̆w', u'ấu' : u'ɤ̆w', u'ầu': u'ɤ̆w', u'ẩu' : u'ɤ̆w', u'ẫu' : u'ɤ̆w', u'ậu' : u'ɤ̆w',
u'eo' : u'ew', u'éo' : u'ew', u'èo' : u'ew', u'ẻo' : u'ew', u'ẽo' : u'ew', u'ẹo' : u'ew',
u'iu' : u'iw', u'íu' : u'iw', u'ìu' : u'iw', u'ỉu' : u'iw', u'ĩu' : u'iw', u'ịu' : u'iw',
u'oi' : u'ɔj', u'ói' : u'ɔj', u'òi' : u'ɔj', u'ỏi' : u'ɔj', u'õi' : u'ɔj', u'ọi' : u'ɔj',
u'ôi' : u'oj', u'ối' : u'oj', u'ồi' : u'oj', u'ổi' : u'oj', u'ỗi' : u'oj', u'ội' : u'oj',
u'ui' : u'uj', u'úi' : u'uj', u'ùi' : u'uj', u'ủi' : u'uj', u'ũi' : u'uj', u'ụi' : u'uj',
u'uy' : u'uj', u'úy' : u'uj', u'ùy' : u'uj', u'ủy' : u'uj', u'ũy' : u'uj', u'ụy' : u'uj',
u'ơi' : u'ɤj', u'ới' : u'ɤj', u'ời' : u'ɤj', u'ởi' : u'ɤj', u'ỡi' : u'ɤj', u'ợi' : u'ɤj',
u'ưi' : u'ɯj', u'ứi' : u'ɯj', u'ừi' : u'ɯj', u'ửi' : u'ɯj', u'ữi' : u'ɯj', u'ựi' : u'ɯj',
u'ưu' : u'ɯw', u'ứu' : u'ɯw', u'ừu' : u'ɯw', u'ửu' : u'ɯw', u'ữu' : u'ɯw', u'ựu' : u'ɯw',
u'iêu' : u'iəw', u'iếu' : u'iəw', u'iều' : u'iəw', u'iểu' : u'iəw', u'iễu' : u'iəw', u'iệu' : u'iəw',
u'yêu' : u'iəw', u'yếu' : u'iəw', u'yều' : u'iəw', u'yểu' : u'iəw', u'yễu' : u'iəw', u'yệu' : u'iəw',
u'uôi' : u'uəj', u'uối' : u'uəj', u'uồi' : u'uəj', u'uổi' : u'uəj', u'uỗi' : u'uəj', u'uội' : u'uəj',
u'ươi' : u'ɯəj', u'ưới' : u'ɯəj', u'ười' : u'ɯəj', u'ưởi' : u'ɯəj', u'ưỡi' : u'ɯəj', u'ượi' : u'ɯəj',
u'ươu' : u'ɯəw', u'ướu' : u'ɯəw', u'ườu' : u'ɯəw', u'ưởu' : u'ɯəw', 'ưỡu' : u'ɯəw', u'ượu' : u'ɯəw'
}
C_onglides = { u'oa' : u'a', u'oá' : u'a', u'oà' : u'a', u'oả' : u'a', u'oã' : u'a', u'oạ' : u'a',
u'óa' : u'a', u'òa' : u'a', u'ỏa' : u'a', u'õa' : u'a', u'ọa' : u'a',
u'oă' : u'ă', u'oắ' : u'ă', u'oằ' : u'ă', u'oẳ' : u'ă', u'oẵ' : u'ă', u'oặ' : u'ă',
u'oe' : u'e', u'oé' : u'e', u'oè' : u'e', u'oẻ' : u'e', u'oẽ' : u'e', u'oẹ' : u'e',
u'oe' : u'e', u'óe' : u'e', u'òe' : u'e', u'ỏe' : u'e', u'õe' : u'e', u'ọe' : u'e',
u'ua' : u'a', u'uá' : u'a', u'uà' : u'a', u'uả' : u'a', u'uã' : u'a', u'uạ' : u'a',
u'uă' : u'ă', u'uắ' : u'ă', u'uằ' : u'ă', u'uẳ' : u'ă', u'uẵ' : u'ă', u'uặ' : u'ă',
u'uâ' : u'ɤ̆', u'uấ' : u'ɤ̆', u'uầ' : u'ɤ̆', u'uẩ' : u'ɤ̆', u'uẫ' : u'ɤ̆', u'uậ' : u'ɤ̆',
u'ue' : u'ɛ', u'ué' : u'ɛ', u'uè' : u'ɛ', u'uẻ' : u'ɛ', u'uẽ' : u'ɛ', u'uẹ' : u'ɛ',
u'uê' : u'e', u'uế' : u'e', u'uề' : u'e', u'uể' : u'e', u'uễ' : u'e', u'uệ' : u'e',
u'uơ' : u'ɤ', u'uớ' : u'ɤ', u'uờ' : u'ɤ', u'uở' : u'ɤ', u'uỡ' : u'ɤ', u'uợ' : u'ɤ',
u'uy' : u'i', u'uý' : u'i', u'uỳ' : u'i', u'uỷ' : u'i', u'uỹ' : u'i', u'uỵ' : u'i',
u'uya' : u'iə', u'uyá' : u'iə', u'uyà' : u'iə', u'uyả' : u'iə', u'uyã' : u'iə', u'uyạ' : u'iə',
u'uyê' : u'iə', u'uyế' : u'iə', u'uyề' : u'iə', u'uyể' : u'iə', u'uyễ' : u'iə', u'uyệ' : u'iə',
u'uyu' : u'iu', u'uyú' : u'iu', u'uyù' : u'iu', u'uyủ' : u'iu', u'uyũ' : u'iu', u'uyụ' : u'iu',
u'uyu' : u'iu', u'uýu' : u'iu', u'uỳu' : u'iu', u'uỷu' : u'iu', u'uỹu' : u'iu', u'uỵu' : u'iu',
u'oen' : u'en', u'oén' : u'en', u'oèn' : u'en', u'oẻn' : u'en', u'oẽn' : u'en', u'oẹn' : u'en',
u'oet' : u'et', u'oét' : u'et', u'oèt' : u'et', u'oẻt' : u'et', u'oẽt' : u'et', u'oẹt' : u'et'
}
C_onoffglides = { u'oe' : u'ej', u'oé' : u'ej', u'oè' : u'ej', u'oẻ' : u'ej', u'oẽ' : u'ej', u'oẹ' : u'ej',
u'oai' : u'aj', u'oái' : u'aj', u'oài' : u'aj', u'oải' : u'aj', u'oãi' : u'aj', u'oại' : u'aj',
u'oay' : u'ăj', u'oáy' : u'ăj', u'oày' : u'ăj', u'oảy' : u'ăj', u'oãy' : u'ăj', u'oạy' : u'ăj',
u'oao' : u'aw', u'oáo' : u'aw', u'oào' : u'aw', u'oảo' : u'aw', u'oão' : u'aw', u'oạo' : u'aw',
u'oeo' : u'ew', u'oéo' : u'ew', u'oèo' : u'ew', u'oẻo' : u'ew', u'oẽo' : u'ew', u'oẹo' : u'ew',
u'oeo' : u'ew', u'óeo' : u'ew', u'òeo' : u'ew', u'ỏeo' : u'ew', u'õeo' : u'ew', u'ọeo' : u'ew',
u'ueo' : u'ew', u'uéo' : u'ew', u'uèo' : u'ew', u'uẻo' : u'ew', u'uẽo' : u'ew', u'uẹo' : u'ew',
u'uai' : u'aj', u'uái' : u'aj', u'uài' : u'aj', u'uải' : u'aj', u'uãi' : u'aj', u'uại' : u'aj',
u'uay' : u'ăj', u'uáy' : u'ăj', u'uày' : u'ăj', u'uảy' : u'ăj', u'uãy' : u'ăj', u'uạy' : u'ăj',
u'uây' : u'ɤ̆j', u'uấy' : u'ɤ̆j', u'uầy' : u'ɤ̆j', u'uẩy' : u'ɤ̆j', u'uẫy' : u'ɤ̆j', u'uậy' : u'ɤ̆j'
}
C_codas = { u'p' : u'p', u't' : u'k', u'c' : u'k', u'm' : u'm', u'n' : u'ŋ', u'ng' : u'ŋ', u'nh' : u'n', u'ch' : u'k' }
# See Alves 2007 (SEALS XII), Vũ 1982
C_tones = { u'á' : 13, u'à' : 42, u'ả' : 312, u'ã' : 312, u'ạ' : u'21g',
u'ấ' : 13, u'ầ' : 42, u'ẩ' : 312, u'ẫ' : 312, u'ậ' : u'21g',
u'ắ' : 13, u'ằ' : 42, u'ẳ' : 312, u'ẵ' : 312, u'ặ' : u'21g',
u'é' : 13, u'è' : 42, u'ẻ' : 312, u'ẽ' : 312, u'ẹ' : u'21g',
u'ế' : 13, u'ề' : 42, u'ể' : 312, u'ễ' : 312, u'ệ' : u'21g',
u'í' : 13, u'ì' : 42, u'ỉ' : 312, u'ĩ' : 312, u'ị' : u'21g',
u'ó' : 13, u'ò' : 42, u'ỏ' : 312, u'õ' : 312, u'ọ' : u'21g',
u'ố' : 13, u'ồ' : 42, u'ổ' : 312, u'ỗ' : 312, u'ộ' : u'21g',
u'ớ' : 13, u'ờ' : 42, u'ở' : 312, u'ỡ' : 312, u'ợ' : u'21g',
u'ú' : 13, u'ù' : 42, u'ủ' : 312, u'ũ' : 312, u'ụ' : u'21g',
u'ứ' : 13, u'ừ' : 42, u'ử' : 312, u'ữ' : 312, u'ự' : u'21g',
u'ý' : 13, u'ỳ' : 42, u'ỷ' : 312, u'ỹ' : 312, u'ỵ' : u'21g',
}
# used to use \u02C0 for raised glottal instead of g
C_tones_p = { u'á' : 5, u'à' : 2, u'ả' : 4, u'ã' : 4, u'ạ' : 6,
u'ấ' : 5, u'ầ' : 2, u'ẩ' : 4, u'ẫ' : 4, u'ậ' : 6,
u'ắ' : 5, u'ằ' : 2, u'ẳ' : 4, u'ẵ' : 4, u'ặ' : 6,
u'é' : 5, u'è' : 2, u'ẻ' : 4, u'ẽ' : 4, u'ẹ' : 6,
u'ế' : 5, u'ề' : 2, u'ể' : 4, u'ễ' : 4, u'ệ' : 6,
u'í' : 5, u'ì' : 2, u'ỉ' : 4, u'ĩ' : 4, u'ị' : 6,
u'ó' : 5, u'ò' : 2, u'ỏ' : 4, u'õ' : 4, u'ọ' : 6,
u'ố' : 5, u'ồ' : 2, u'ổ' : 4, u'ỗ' : 4, u'ộ' : 6,
u'ớ' : 5, u'ờ' : 2, u'ở' : 4, u'ỡ' : 4, u'ợ' : 6,
u'ú' : 5, u'ù' : 2, u'ủ' : 4, u'ũ' : 4, u'ụ' : 6,
u'ứ' : 5, u'ừ' : 2, u'ử' : 4, u'ữ' : 4, u'ự' : 6,
u'ý' : 5, u'ỳ' : 2, u'ỷ' : 4, u'ỹ' : 4, u'ỵ' : 6,
}
C_gi = { u'gi' : u'ji', u'gí': u'ji', u'gì' : u'ji', u'gì' : u'ji', u'gĩ' : u'ji', u'gị' : u'ji' }
C_qu = {u'quy' : u'wi', u'qúy' : u'wi', u'qùy' : u'wi', u'qủy' : u'wi', u'qũy' : u'wi', u'qụy' : u'wi'}
############################################
#south.py
#coding: utf-8
S_onsets = { u'b' : u'b', u't' : u't', u'th' : u'tʰ', u'đ' : u'd', u'ch' : u'c',
u'kh' : u'x', u'g' : u'ɣ', u'l' : u'l', u'm' : u'm', u'n': u'n',
u'ngh': u'ŋ', u'nh' : u'ɲ', u'ng' : u'ŋ', u'ph' : u'f', u'v' : u'j',
u'x' : u's', u'd' : u'j', u'h' : u'h', u'p' : u'p', u'qu' : u'w',
u'gi' : u'j', u'tr' : u'ʈ', u'k' : u'k', u'c' : u'k', u'gh' : u'ɣ',
u'r' : u'ʐ', u's' : u'ʂ', u'gi' : u'j'
}
S_nuclei = { u'a' : u'a', u'á' : u'a', u'à' : u'a', u'ả' : u'a', u'ã' : u'a', u'ạ' : u'a',
u'â' : u'ɤ̆', u'ấ' : u'ɤ̆', u'ầ' : u'ɤ̆', u'ẩ' : u'ɤ̆', u'ẫ' : u'ɤ̆', u'ậ' : u'ɤ̆',
u'ă' : u'ă', u'ắ' : u'ă', u'ằ' : u'ă', u'ẳ' : u'ă', u'ẵ' : u'ă', u'ặ' : u'ă',
u'e' : u'ɛ', u'é' : u'ɛ', u'è' : u'ɛ', u'ẻ' : u'ɛ', u'ẽ' : u'ɛ', u'ẹ' : u'ɛ',
u'ê' : u'e', u'ế' : u'e', u'ề' : u'e', u'ể' : u'e', u'ễ' : u'e', u'ệ' : u'e',
u'i' : u'i', u'í' : u'i', u'ì' : u'i', u'ỉ' : u'i', u'ĩ' : u'i', u'ị' : u'i',
u'o' : u'ɔ', u'ó' : u'ɔ', u'ò' : u'ɔ', u'ỏ' : u'ɔ', u'õ' : u'ɔ', u'ọ' : u'ɔ',
u'ô' : u'o', u'ố' : u'o', u'ồ' : u'o', u'ổ' : u'o', u'ỗ' : u'o', u'ộ' : u'o',
u'ơ' : u'ɤ', u'ớ' : u'ɤ', u'ờ' : u'ɤ', u'ở' : u'ɤ', u'ỡ' : u'ɤ', u'ợ' : u'ɤ',
u'u' : u'u', u'ú' : u'u', u'ù' : u'u', u'ủ' : u'u', u'ũ' : u'u', u'ụ' : u'u',
u'ư' : u'ɯ', u'ứ' : u'ɯ', u'ừ' : u'ɯ', u'ử' : u'ɯ', u'ữ' : u'ɯ', u'ự' : u'ɯ',
u'y' : u'i', u'ý' : u'i', u'ỳ' : u'i', u'ỷ' : u'i', u'ỹ' : u'i', u'ỵ' : u'i',
u'eo' : u'eo', u'éo' : u'eo', u'èo' : u'eo', u'ẻo' : u'eo', u'ẽo': u'eo', u'ẹo' : u'eo',
u'êu' : u'ɛu', u'ếu' : u'ɛu', u'ều' : u'ɛu', u'ểu' : u'ɛu', u'ễu': u'ɛu', u'ệu' : u'ɛu',
u'ia' : u'iə', u'ía' : u'iə', u'ìa' : u'iə', u'ỉa' : u'iə', u'ĩa' : u'iə', u'ịa' : u'iə',
u'ia' : u'iə', u'iá' : u'iə', u'ià' : u'iə', u'iả' : u'iə', u'iã' : u'iə', u'iạ' : u'iə',
u'iê' : u'iə', u'iế' : u'iə', u'iề' : u'iə', u'iể' : u'iə', u'iễ' : u'iə', u'iệ' : u'iə',
u'oo' : u'ɔ', u'óo' : u'ɔ', u'òo' : u'ɔ', u'ỏo' : u'ɔ', u'õo' : u'ɔ', u'ọo' : u'ɔ',
u'oo' : u'ɔ', u'oó' : u'ɔ', u'oò' : u'ɔ', u'oỏ' : u'ɔ', u'oõ' : u'ɔ', u'oọ' : u'ɔ',
u'ôô' : u'o', u'ốô' : u'o', u'ồô' : u'o', u'ổô' : u'o', u'ỗô' : u'o', u'ộô' : u'o', u'ôô' : u'o', u'ôố' : u'o', u'ôồ' : u'o', u'ôổ' : u'o', u'ôỗ' : u'o', u'ôộ' : u'o', u'ua' : u'uə', u'úa' : u'uə', u'ùa' : u'uə', u'ủa' : u'uə', u'ũa' : u'uə', u'ụa' : u'uə',
u'uô' : u'uə', u'uố' : u'uə', u'uồ' : u'uə', u'uổ' : u'uə', u'uỗ' : u'uə', u'uộ' : u'uə',
u'ưa' : u'ɯə', u'ứa' : u'ɯə', u'ừa' : u'ɯə', u'ửa' : u'ɯə', u'ữa' : u'ɯə', u'ựa' : u'ɯə',
u'ươ' : u'ɯə', u'ướ' : u'ɯə', u'ườ' : u'ɯə', u'ưở' : u'ɯə', u'ưỡ' : u'ɯə', u'ượ' : u'ɯə',
u'yê' : u'iɛ', u'yế' : u'iɛ', u'yề' : u'iɛ', u'yể' : u'iɛ', u'yễ' : u'iɛ', u'yệ' : u'iɛ',
u'uơ' : u'uə', u'uở' : u'uə', u'uờ': u'uə', u'uở' : u'uə', u'uỡ' : u'uə', u'uợ' : u'uə',
}
S_offglides = { u'ai' : u'aj', u'ái' : u'aj', u'ài' : u'aj', u'ải' : u'aj', u'ãi' : u'aj', u'ại' : u'aj',
u'ay' : u'ăj', u'áy' : u'ăj', u'ày' : u'ăj', u'ảy' : u'ăj', u'ãy' : u'ăj', u'ạy' : u'ăj',
u'ao' : u'aw', u'áo' : u'aw', u'ào' : u'aw', u'ảo' : u'aw', u'ão' : u'aw', u'ạo' : u'aw',
u'au' : u'ăw', u'áu' : u'ăw', u'àu' : u'ăw', u'ảu' : u'ăw', u'ãu' : u'ăw', u'ạu' : u'ăw',
u'ây' : u'ɤ̆j', u'ấy' : u'ɤ̆j', u'ầy' : u'ɤ̆j', u'ẩy' : u'ɤ̆j', u'ẫy' : u'ɤ̆j', u'ậy' : u'ɤ̆j',
u'âu' : u'ɤ̆w', u'ấu' : u'ɤ̆w', u'ầu': u'ɤ̆w', u'ẩu' : u'ɤ̆w', u'ẫu' : u'ɤ̆w', u'ậu' : u'ɤ̆w',
u'eo' : u'ew', u'éo' : u'ew', u'èo' : u'ew', u'ẻo' : u'ew', u'ẽo' : u'ew', u'ẹo' : u'ew',
u'iu' : u'iw', u'íu' : u'iw', u'ìu' : u'iw', u'ỉu' : u'iw', u'ĩu' : u'iw', u'ịu' : u'iw',
u'oi' : u'ɔj', u'ói' : u'ɔj', u'òi' : u'ɔj', u'ỏi' : u'ɔj', u'õi' : u'ɔj', u'ọi' : u'ɔj',
u'ôi' : u'oj', u'ối' : u'oj', u'ồi' : u'oj', u'ổi' : u'oj', u'ỗi' : u'oj', u'ội' : u'oj',
u'ui' : u'uj', u'úi' : u'uj', u'ùi' : u'uj', u'ủi' : u'uj', u'ũi' : u'uj', u'ụi' : u'uj',
u'uy' : u'uj', u'úy' : u'uj', u'ùy' : u'uj', u'ủy' : u'uj', u'ũy' : u'uj', u'ụy' : u'uj',
u'ơi' : u'ɤj', u'ới' : u'ɤj', u'ời' : u'ɤj', u'ởi' : u'ɤj', u'ỡi' : u'ɤj', u'ợi' : u'ɤj',
u'ưi' : u'ɯj', u'ứi' : u'ɯj', u'ừi' : u'ɯj', u'ửi' : u'ɯj', u'ữi' : u'ɯj', u'ựi' : u'ɯj',
u'ưu' : u'ɯw', u'ứu' : u'ɯw', u'ừu' : u'ɯw', u'ửu' : u'ɯw', u'ữu' : u'ɯw', u'ựu' : u'ɯw',
u'iêu' : u'iəw', u'iếu' : u'iəw', u'iều' : u'iəw', u'iểu' : u'iəw', u'iễu' : u'iəw', u'iệu' : u'iəw',
u'yêu' : u'iəw', u'yếu' : u'iəw', u'yều' : u'iəw', u'yểu' : u'iəw', u'yễu' : u'iəw', u'yệu' : u'iəw',
u'uôi' : u'uəj', u'uối' : u'uəj', u'uồi' : u'uəj', u'uổi' : u'uəj', u'uỗi' : u'uəj', u'uội' : u'uəj',
u'ươi' : u'ɯəj', u'ưới' : u'ɯəj', u'ười' : u'ɯəj', u'ưởi' : u'ɯəj', u'ưỡi' : u'ɯəj', u'ượi' : u'ɯəj',
u'ươu' : u'ɯəw', u'ướu' : u'ɯəw', u'ườu' : u'ɯəw', u'ưởu' : u'ɯəw', 'ưỡu' : u'ɯəw', u'ượu' : u'ɯəw'
}
S_onglides = { u'oa' : u'a', u'oá' : u'a', u'oà' : u'a', u'oả' : u'a', u'oã' : u'a', u'oạ' : u'a',
u'óa' : u'a', u'òa' : u'a', u'ỏa' : u'a', u'õa' : u'a', u'ọa' : u'a',
u'oă' : u'ă', u'oắ' : u'ă', u'oằ' : u'ă', u'oẳ' : u'ă', u'oẵ' : u'ă', u'oặ' : u'ă',
u'oe' : u'e', u'oé' : u'e', u'oè' : u'e', u'oẻ' : u'e', u'oẽ' : u'e', u'oẹ' : u'e',
u'oe' : u'e', u'óe' : u'e', u'òe' : u'e', u'ỏe' : u'e', u'õe' : u'e', u'ọe' : u'e',
u'ua' : u'a', u'uá' : u'a', u'uà' : u'a', u'uả' : u'a', u'uã' : u'a', u'uạ' : u'a',
u'uă' : u'ă', u'uắ' : u'ă', u'uằ' : u'ă', u'uẳ' : u'ă', u'uẵ' : u'ă', u'uặ' : u'ă',
u'uâ' : u'ɤ̆', u'uấ' : u'ɤ̆', u'uầ' : u'ɤ̆', u'uẩ' : u'ɤ̆', u'uẫ' : u'ɤ̆', u'uậ' : u'ɤ̆',
u'ue' : u'ɛ', u'ué' : u'ɛ', u'uè' : u'ɛ', u'uẻ' : u'ɛ', u'uẽ' : u'ɛ', u'uẹ' : u'ɛ',
u'uê' : u'e', u'uế' : u'e', u'uề' : u'e', u'uể' : u'e', u'uễ' : u'e', u'uệ' : u'e',
u'uơ' : u'ɤ', u'uớ' : u'ɤ', u'uờ' : u'ɤ', u'uở' : u'ɤ', u'uỡ' : u'ɤ', u'uợ' : u'ɤ',
u'uy' : u'i', u'uý' : u'i', u'uỳ' : u'i', u'uỷ' : u'i', u'uỹ' : u'i', u'uỵ' : u'i',
u'uya' : u'iə', u'uyá' : u'iə', u'uyà' : u'iə', u'uyả' : u'iə', u'uyã' : u'iə', u'uyạ' : u'iə',
u'uyê' : u'iə', u'uyế' : u'iə', u'uyề' : u'iə', u'uyể' : u'iə', u'uyễ' : u'iə', u'uyệ' : u'iə',
u'uyu' : u'iu', u'uyú' : u'iu', u'uyù' : u'iu', u'uyủ' : u'iu', u'uyũ' : u'iu', u'uyụ' : u'iu',
u'uyu' : u'iu', u'uýu' : u'iu', u'uỳu' : u'iu', u'uỷu' : u'iu', u'uỹu' : u'iu', u'uỵu' : u'iu',
u'oen' : u'en', u'oén' : u'en', u'oèn' : u'en', u'oẻn' : u'en', u'oẽn' : u'en', u'oẹn' : u'en',
u'oet' : u'et', u'oét' : u'et', u'oèt' : u'et', u'oẻt' : u'et', u'oẽt' : u'et', u'oẹt' : u'et'
}
S_onoffglides = { u'oe' : u'ej', u'oé' : u'ej', u'oè' : u'ej', u'oẻ' : u'ej', u'oẽ' : u'ej', u'oẹ' : u'ej',
u'oai' : u'aj', u'oái' : u'aj', u'oài' : u'aj', u'oải' : u'aj', u'oãi' : u'aj', u'oại' : u'aj',
u'oay' : u'ăj', u'oáy' : u'ăj', u'oày' : u'ăj', u'oảy' : u'ăj', u'oãy' : u'ăj', u'oạy' : u'ăj',
u'oao' : u'aw', u'oáo' : u'aw', u'oào' : u'aw', u'oảo' : u'aw', u'oão' : u'aw', u'oạo' : u'aw',
u'oeo' : u'ew', u'oéo' : u'ew', u'oèo' : u'ew', u'oẻo' : u'ew', u'oẽo' : u'ew', u'oẹo' : u'ew',
u'oeo' : u'ew', u'óeo' : u'ew', u'òeo' : u'ew', u'ỏeo' : u'ew', u'õeo' : u'ew', u'ọeo' : u'ew',
u'ueo' : u'ew', u'uéo' : u'ew', u'uèo' : u'ew', u'uẻo' : u'ew', u'uẽo' : u'ew', u'uẹo' : u'ew',
u'uai' : u'aj', u'uái' : u'aj', u'uài' : u'aj', u'uải' : u'aj', u'uãi' : u'aj', u'uại' : u'aj',
u'uay' : u'ăj', u'uáy' : u'ăj', u'uày' : u'ăj', u'uảy' : u'ăj', u'uãy' : u'ăj', u'uạy' : u'ăj',
u'uây' : u'ɤ̆j', u'uấy' : u'ɤ̆j', u'uầy' : u'ɤ̆j', u'uẩy' : u'ɤ̆j', u'uẫy' : u'ɤ̆j', u'uậy' : u'ɤ̆j'
}
S_codas = { u'p' : u'p', u't' : u't', u'c' : u'k', u'm' : u'm', u'n' : u'ŋ', u'ng' : u'ŋ', u'nh' : u'n', u'ch' : u't' }
S_tones = { u'á' : 45, u'à' : 32, u'ả' : 214, u'ã' : 214, u'ạ' : 212,
u'ấ' : 45, u'ầ' : 32, u'ẩ' : 214, u'ẫ' : 214, u'ậ' : 212,
u'ắ' : 45, u'ằ' : 32, u'ẳ' : 214, u'ẵ' : 214, u'ặ' : 212,
u'é' : 45, u'è' : 32, u'ẻ' : 214, u'ẽ' : 214, u'ẹ' : 212,
u'ế' : 45, u'ề' : 32, u'ể' : 214, u'ễ' : 214, u'ệ' : 212,
u'í' : 45, u'ì' : 32, u'ỉ' : 214, u'ĩ' : 214, u'ị' : 212,
u'ó' : 45, u'ò' : 32, u'ỏ' : 214, u'õ' : 214, u'ọ' : 212,
u'ố' : 45, u'ồ' : 32, u'ổ' : 214, u'ỗ' : 214, u'ộ' : 212,
u'ớ' : 45, u'ờ' : 32, u'ở' : 214, u'ỡ' : 214, u'ợ' : 212,
u'ú' : 45, u'ù' : 32, u'ủ' : 214, u'ũ' : 214, u'ụ' : 212,
u'ứ' : 45, u'ừ' : 32, u'ử' : 214, u'ữ' : 214, u'ự' : 212,
u'ý' : 45, u'ỳ' : 32, u'ỷ' : 214, u'ỹ' : 214, u'ỵ' : 212,
}
S_tones_p = { u'á' : 5, u'à' : 2, u'ả' : 4, u'ã' : 4, u'ạ' : 6,
u'ấ' : 5, u'ầ' : 2, u'ẩ' : 4, u'ẫ' : 4, u'ậ' : 6,
u'ắ' : 5, u'ằ' : 2, u'ẳ' : 4, u'ẵ' : 4, u'ặ' : 6,
u'é' : 5, u'è' : 2, u'ẻ' : 4, u'ẽ' : 4, u'ẹ' : 6,
u'ế' : 5, u'ề' : 2, u'ể' : 4, u'ễ' : 4, u'ệ' : 6,
u'í' : 5, u'ì' : 2, u'ỉ' : 4, u'ĩ' : 4, u'ị' : 6,
u'ó' : 5, u'ò' : 2, u'ỏ' : 4, u'õ' : 4, u'ọ' : 6,
u'ố' : 5, u'ồ' : 2, u'ổ' : 4, u'ỗ' : 4, u'ộ' : 6,
u'ớ' : 5, u'ờ' : 2, u'ở' : 4, u'ỡ' : 4, u'ợ' : 6,
u'ú' : 5, u'ù' : 2, u'ủ' : 4, u'ũ' : 4, u'ụ' : 6,
u'ứ' : 5, u'ừ' : 2, u'ử' : 4, u'ữ' : 4, u'ự' : 6,
u'ý' : 5, u'ỳ' : 2, u'ỷ' : 4, u'ỹ' : 4, u'ỵ' : 6,
}
S_gi = { u'gi' : u'ji', u'gí': u'ji', u'gì' : u'ji', u'gì' : u'ji', u'gĩ' : u'ji', u'gị' : u'ji' }
S_qu = {u'quy' : u'wi', u'qúy' : u'wi', u'qùy' : u'wi', u'qủy' : u'wi', u'qũy' : u'wi', u'qụy' : u'wi'}
################################################3
import sys, codecs, re
from io import StringIO
from optparse import OptionParser
from string import punctuation
def trans(word, dialect, glottal, pham, cao, palatals):
# This looks ugly, but newer versions of python complain about "from x import *" syntax
if dialect == 'n':
onsets, nuclei, codas, tones, onglides, offglides, onoffglides, qu, gi = N_onsets, N_nuclei, N_codas, N_tones, N_onglides, N_offglides, N_onoffglides, N_qu, N_gi
elif dialect == 'c':
onsets, nuclei, codas, tones, onglides, offglides, onoffglides, qu, gi = C_onsets, C_nuclei, C_codas, C_tones, C_onglides, C_offglides, C_onoffglides, C_qu, C_gi
elif dialect == 's':
onsets, nuclei, codas, tones, onglides, offglides, onoffglides, qu, gi = S_onsets, S_nuclei, S_codas, S_tones, S_onglides, S_offglides, S_onoffglides, S_qu, S_gi
#Custom
onsets, nuclei, codas, onglides, offglides, onoffglides, qu, gi = Cus_onsets, Cus_nuclei, Cus_codas, Cus_onglides, Cus_offglides, Cus_onoffglides, Cus_qu, Cus_gi
if pham or cao:
if dialect == 'n': tones_p = N_tones_p
if dialect == 'c': tones_p = C_tones_p
if dialect == 's': tones_p = S_tones_p
#Custom
tones_p = Cus_tones_p
tones = tones_p
ons = ''
nuc = ''
cod = ''
ton = 0
oOffset = 0
cOffset = 0
l = len(word)
if l > 0:
if word[0:3] in onsets: # if onset is 'ngh'
ons = onsets[word[0:3]]
oOffset = 3
elif word[0:2] in onsets: # if onset is 'nh', 'gh', 'kʷ' etc
ons = onsets[word[0:2]]
oOffset = 2
elif word[0] in onsets: # if single onset
ons = onsets[word[0]]
oOffset = 1
if word[l-2:l] in codas: # if two-character coda
cod = codas[word[l-2:l]]
cOffset = 2
elif word[l-1] in codas: # if one-character coda
cod = codas[word[l-1]]
cOffset = 1
#if word[0:2] == u'gi' and cod and len(word) == 3: # if you just have 'gi' and a coda...
if word[0:2] in gi and cod and len(word) == 3: # if you just have 'gi' and a coda...
nucl = u'i'
ons = u'z'
else:
nucl = word[oOffset:l-cOffset]
if nucl in nuclei:
if oOffset == 0:
if glottal == 1:
if word[0] not in onsets: # if there isn't an onset....
ons = u'ʔ'+nuclei[nucl] # add a glottal stop
else: # otherwise...
nuc = nuclei[nucl] # there's your nucleus
else:
nuc = nuclei[nucl] # there's your nucleus
else: # otherwise...
nuc = nuclei[nucl] # there's your nucleus
elif nucl in onglides and ons != u'kw': # if there is an onglide...
nuc = onglides[nucl] # modify the nuc accordingly
if ons: # if there is an onset...
ons = ons+u'w' # labialize it, but...
else: # if there is no onset...
ons = u'w' # add a labiovelar onset
elif nucl in onglides and ons == u'kw':
nuc = onglides[nucl]
elif nucl in onoffglides:
cod = onoffglides[nucl][-1]
nuc = onoffglides[nucl][0:-1]
if ons != u'kw':
if ons:
ons = ons+u'w'
else:
ons = u'w'
elif nucl in offglides:
cod = offglides[nucl][-1]
nuc = offglides[nucl][:-1]
elif word in gi: # if word == 'gi', 'gì',...
ons = gi[word][0]
nuc = gi[word][1]
elif word in qu: # if word == 'quy', 'qúy',...
ons = qu[word][:-1]
nuc = qu[word][-1]
else:
# Something is non-Viet
return (None, None, None, None)
# Velar Fronting (Northern dialect)
if dialect == 'n':
if nuc == u'a':
if cod == u'k' and cOffset == 2: nuc = u'ɛ'
if cod == u'ɲ' and nuc == u'a': nuc = u'ɛ'
# Final palatals (Northern dialect)
if nuc not in [u'i', u'e', u'ɛ']:
if cod == u'ɲ':
cod = u'ɲ' # u'ŋ'
elif palatals != 1 and nuc in [u'i', u'e', u'ɛ']:
if cod == u'ɲ':
cod = u'ɲ'#u'ŋ'
if palatals == 1:
if cod == u'k' and nuc in [u'i', u'e', u'ɛ']:
cod = u'c'
# Velar Fronting (Southern and Central dialects)
else:
if nuc in [u'i', u'e']:
if cod == u'k': cod = u't'
if cod == u'ŋ': cod = u'n'
# There is also this reverse fronting, see Thompson 1965:94 ff.
elif nuc in [u'iə', u'ɯə', u'uə', u'u', u'ɯ', u'ɤ', u'o', u'ɔ', u'ă', u'ɤ̆']:
if cod == u't':
cod = u'k'
if cod == u'n': cod = u'ŋ'
# Monophthongization (Southern dialects: Thompson 1965: 86; Hoàng 1985: 181)
if dialect == 's':
if cod in [u'm', u'p']:
if nuc == u'iə': nuc = u'i'
if nuc == u'uə': nuc = u'u'
if nuc == u'ɯə': nuc = u'ɯ'
# Tones
# Modified 20 Sep 2008 to fix aberrant 33 error
tonelist = [tones[word[i]] for i in range(0,l) if word[i] in tones]
if tonelist:
ton = str(tonelist[len(tonelist)-1])
else:
if not (pham or cao):
if dialect == 'c':
ton = str('35')
else:
ton = str('33')
else:
ton = str('1')
# Modifications for closed syllables
if cOffset !=0:
# Obstruent-final nang tones are modal voice
if (dialect == 'n' or dialect == 's') and ton == u'21g' and cod in ['p', 't', 'k']:
#if ton == u'21\u02C0' and cod in ['p', 't', 'k']: # fixed 8 Nov 2016
ton = u'21'
# Modification for sắc in closed syllables (Northern and Central only)
if ((dialect == 'n' and ton == u'24') or (dialect == 'c' and ton == u'13')) and cod in ['p', 't', 'k']:
ton = u'45'
# Modification for 8-tone system
if cao == 1:
if ton == u'5' and cod in ['p', 't', 'k']:
ton = u'5b'
if ton == u'6' and cod in ['p', 't', 'k']:
ton = u'6b'
# labialized allophony (added 17.09.08)
if nuc in [u'u', u'o', u'ɔ']:
if cod == u'ŋ':
cod = u'ŋ͡m'
if cod == u'k':
cod = u'k͡p'
return (ons, nuc, cod, ton)
def convert(word, dialect, glottal, pham, cao, palatals, delimit):
"""Convert a single orthographic string to IPA."""
ons = ''
nuc = ''
cod = ''
ton = 0
seq = ''
try:
(ons, nuc, cod, ton) = trans(word, dialect, glottal, pham, cao, palatals)
if None in (ons, nuc, cod, ton):
seq = u'['+word+u']'
else:
seq = delimit+delimit.join(filter(None, (ons, nuc, cod, ton)))+delimit
except (TypeError):
pass
return seq
########################333
from vinorm import *
from underthesea import word_tokenize
import eng_to_ipa
SET=[S_onsets, S_nuclei, S_codas#, S_tones
, S_onglides, S_offglides, S_onoffglides, S_qu, S_gi, C_onsets, C_nuclei, C_codas#, C_tones
, C_onglides, C_offglides, C_onoffglides, C_qu, C_gi, N_onsets, N_nuclei, N_codas#, N_tones
, N_onglides, N_offglides, N_onoffglides, N_qu, N_gi, Cus_onsets, Cus_nuclei, Cus_codas#, N_tones
, Cus_onglides, Cus_offglides, Cus_onoffglides, Cus_qu, Cus_gi]
DICT={}
#144 in total
syms=['ɯəj', 'ɤ̆j', 'ʷiə', 'ɤ̆w', 'ɯəw', 'ʷet', 'iəw', 'uəj', 'ʷen', 'tʰw', 'ʷɤ̆', 'ʷiu', 'kwi', 'ŋ͡m', 'k͡p', 'cw', 'jw', 'uə', 'eə', 'bw', 'oj', 'ʷi', 'vw', 'ăw', 'ʈw', 'ʂw', 'aʊ', 'fw', 'ɛu', 'tʰ', 'tʃ', 'ɔɪ', 'xw', 'ʷɤ', 'ɤ̆', 'ŋw', 'ʊə', 'zi', 'ʷă', 'dw', 'eɪ', 'aɪ', 'ew', 'iə', 'ɣw', 'zw', 'ɯj', 'ʷɛ', 'ɯw', 'ɤj', 'ɔ:', 'əʊ', 'ʷa', 'mw', 'ɑ:', 'hw', 'ɔj', 'uj', 'lw', 'ɪə', 'ăj', 'u:', 'aw', 'ɛj', 'iw', 'aj', 'ɜ:', 'kw', 'nw', 't∫', 'ɲw', 'eo', 'sw', 'tw', 'ʐw', 'iɛ', 'ʷe', 'i:', 'ɯə', 'dʒ', 'ɲ', 'θ', 'ʌ', 'l', 'w', '1', 'ɪ', 'ɯ', 'd', '∫', 'p', 'ə', 'u', 'o', '3', 'ɣ', '!', 'ð', 'ʧ', '6', 'ʒ', 'ʐ', 'z', 'v', 'g', 'ă', '_', 'æ', 'ɤ', '2', 'ʤ', 'i', '.', 'ɒ', 'b', 'h', 'n', 'ʂ', 'ɔ', 'ɛ', 'k', 'm', '5', ' ', 'c', 'j', 'x', 'ʈ', ',', '4', 'ʊ', 's', 'ŋ', 'a', 'ʃ', '?', 'r', ':', 'η', 'f', ';', 'e', 't', "'"]
def Parsing(listParse, text, delimit):
undefine_symbol = "'"
if listParse == "default":
listParse=['ʷiə', 'uəj', 'iəw', 'k͡p', 'ʷɤ̆', 'ɤ̆j', 'ŋ͡m', 'kwi', 'ɤ̆w', 'ɯəj', 'ʷen', 'ʷiu', 'ʷet', 'ɯəw', 'ʷɛ', 'ʷɤ', 'ɯj', 'oj', 'ăw', 'zi', 'kw', 'aɪ', 'iɛ', 'ɤ̆', 'ɔ:', 'ăj', 'ʷa', 'eə', 'u:', 'uj', 'aʊ', 'uə', 'aj', 'iə', 'iw', 'əʊ', 'ɑ:', 'tʃ', 'ʷe', 'ɛu', 'ɔɪ', 'ʷi', 'eɪ', 'ɤj', 'ɯw', 'ɛj', 'ɔj', 'i:', 't∫', 'ɪə', 'ʷă', 'ɜ:', 'tʰ', 'dʒ', 'ew', 'ʊə', 'ɯə', 'aw', '3', 'θ', 'v', 'ʊ', 'ʤ', 'ɔ', '1', 'ʧ', 'ʈ', ' ', 'd', 'i', 'ɣ', 'ɲ', 'ɤ', '?', 'ɪ', 'l', '.', 'j', ':', 't', 'ʒ', 'ə', 'ʌ', 'm', '!', '∫', 'ð', 'u', 'e', 'w', 'p', 'ʃ', 'æ', "'", 'h', 'o', 'k', '5', 'g', '4', 'n', ';', 'r', 'b', 'ɯ', 'a', 's', 'ʐ', 'η', 'ŋ', 'ɒ', 'ʂ', '_', 'f', ',', 'ɛ', 'z', '6', '2', 'x', 'ă']
listParse.sort(reverse = True,key=len)
output=""
skip=0
for ic,char in enumerate(text):
#print(char,skip)
check = 0
if skip>0:
skip=skip-1
continue
for l in listParse:
if len(l) <= len(text[ic:]) and l == text[ic:ic+len(l)]:
output+=delimit+l
check =1
skip=len(l)-1
break
if check == 0:
#Case symbol not in list
if str(char) in ["ˈ","ˌ","*"]:
continue
print("this is not in symbol :"+ char+":")
output+=delimit+undefine_symbol
return output.rstrip()+delimit
#print("Parsing",Parsing("default","iu iu","|"))
def getSymbol():
for s in SET:
DICT.update(s)
list_phoneme=DICT.values()
list_phoneme=list(list_phoneme)
English_phoneme=["p","b","t","d","t∫","dʒ","k","g","f","v","ð","θ","s","z","∫","ʒ","m","n","η","l","r","w","j","ɪ","i:","ʊ","u:","e","ə","ɜ:","ɒ","ɔ:","æ","ʌ","ɑ:","ɪə","ʊə","eə","eɪ","ɔɪ","aɪ","əʊ","aʊ",'ʃ',"ʤ","ʧ"]
Special=['jw', 'ŋw', 'bw', 'vw', 'dw', 'eo', 'ʈw', 'mw', 'zw', 'fw', 'tw', 'tʰw', 'ɲw', 'cw', 'ʂw', 'ɣw', 'ʐw', 'xw', 'lw', 'hw', 'nw', 'sw', 'c']
word_pad = ["_"]
space = [" "]
tone=["1","2","3","4","5","6"]
punctuation = [".",",","!",":","?",";","'"] #" ' ( ) Have been removed due to none sound
modifi = ["k͡p","ŋ͡m"]
symbols = list_phoneme + space+word_pad + English_phoneme + punctuation + tone + modifi + Special
symbols = list(set(symbols))
symbols.sort(reverse = True,key=len)
return symbols
def vi2IPA_pitrain(text):
epi = epitran.Epitran('vie-Latn')
r=epi.transliterate(text)
return r
def T2IPA_split(text,delimit):
sys.path.append('./Rules') # make sure we can find the Rules files
#Setup option
glottal = 0
pham = 0
cao = 0
palatals = 0
tokenize = 0
dialect='n' #"c""s"
tone_type=0
if tone_type==0:
pham=1
else:
cao=1
#Input text
line = text
if line =='\n':
return ""
else:
compound = u''
ortho = u''
words = line.split()
## toss len==0 junk
words = [word for word in words if len(word)>0]
## hack to get rid of single hyphens or underscores
words = [word for word in words if word!=u'-']
words = [word for word in words if word!=u'_']
for i in range(0,len(words)):
word = words[i].strip()
ortho += word
word = word.strip(punctuation).lower()
## 29.03.16: check if tokenize is true
## if true, call this routine for each substring
## and re-concatenate
if (tokenize and '-' in word) or (tokenize and '_' in word):
substrings = re.split(r'(_|-)', word)
values = substrings[::2]
delimiters = substrings[1::2] + ['']
ipa = [convert(x, dialect, glottal, pham, cao, palatals, delimit).strip() for x in values]
seq = ''.join(v+d for v,d in zip(ipa, delimiters))
else:
seq = convert(word, dialect, glottal, pham, cao, palatals, delimit).strip()
# concatenate
if len(words) >= 2:
ortho += ' '
if i < len(words)-1:
seq = seq+u' '
compound = compound + seq
return compound
def T2IPA(text):
sys.path.append('./Rules') # make sure we can find the Rules files
#Setup option
glottal = 0
pham = 0
cao = 0
palatals = 0
tokenize = 0
delimit = ''
dialect='n' #"c""s"
tone_type=0
if tone_type==0:
pham=1
else:
cao=1
#Input text
line = text
if line =='\n':
return ""
else:
compound = u''
ortho = u''
words = line.split()
## toss len==0 junk
words = [word for word in words if len(word)>0]
## hack to get rid of single hyphens or underscores
words = [word for word in words if word!=u'-']
words = [word for word in words if word!=u'_']
for i in range(0,len(words)):
word = words[i].strip()
ortho += word
word = word.strip(punctuation).lower()
## 29.03.16: check if tokenize is true
## if true, call this routine for each substring
## and re-concatenate
if (tokenize and '-' in word) or (tokenize and '_' in word):
substrings = re.split(r'(_|-)', word)
values = substrings[::2]
delimiters = substrings[1::2] + ['']
ipa = [convert(x, dialect, glottal, pham, cao, palatals, delimit).strip() for x in values]
seq = ''.join(v+d for v,d in zip(ipa, delimiters))
else:
seq = convert(word, dialect, glottal, pham, cao, palatals, delimit).strip()
# concatenate
if len(words) >= 2:
ortho += ' '
if i < len(words)-1:
seq = seq+u' '
compound = compound + seq
return compound
EN={"a":"ây","ă":"á","â":"ớ","b":"bi","c":"si","d":"đi","đ":"đê","e":"i","ê":"ê","f":"ép","g":"giy","h":"ếch","i":"ai","j":"giây","k":"cây","l":"eo","m":"em","n":"en","o":"âu","ô":"ô","ơ":"ơ","p":"pi","q":"kiu","r":"a","s":"ét","t":"ti","u":"diu","ư":"ư","v":"vi","w":"đắp liu","x":"ít","y":"quai","z":"giét"}
import re
def vi2IPA_split(texts,delimit):
content=[]
with open("Popular.txt",encoding="utf-8") as f:
content=f.read().splitlines()
tess = texts.split(".")
Results =""
for text in tess:
print("------------------------------------------------------")
TN= TTSnorm(text)
print("------------------------------------------------------")
print("Text normalize: ",TN)
TK= word_tokenize(TN)
print("Vietnamese Tokenize: ",TK)
for iuv,under_valid in enumerate(TK):
token_under=under_valid.split(" ")
checkinvalid=0
print(token_under)
if len(token_under) >1:
for tok in token_under:
if tok not in content or "[" in T2IPA(tok):
checkinvalid=1
if checkinvalid==1:
TK = TK[:iuv] + TK[iuv+1 :]
for tok in reversed(token_under):
TK.insert(iuv, tok)
IPA=""
for tk in TK:
ipa = T2IPA_split(tk,delimit).replace(" ","_")
if ipa =="":
IPA+=delimit+tk+delimit+" "
elif ipa[0]=="[" and ipa[-1]=="]":
eng = eng_to_ipa.convert(tk)
if eng[-1] == "*":
if tk.lower().upper() == tk:
#print("ENGLISH",tk)
#Đọc tiếng anh từng chữ
letter2sound=""
for char in tk:
CHAR = str(char).lower()
if CHAR in list(EN.keys()):
letter2sound+=EN[CHAR]+" "
else:
letter2sound+=char+" "
IPA+=T2IPA_split(letter2sound,delimit)+" "
else:
#Giữ nguyên
IPA+=Parsing("default",tk.lower(),delimit)+" "
else:
IPA+=Parsing("default",eng,delimit)+" "
#Check tu dien tieng anh Etrain bưc
#Neu co Mapping
#Neu khong, check co nguyen am
#Neu co de nguyen
#Neu khong danh van
print(" ..................Out of domain word: " ,ipa)
else:
IPA+=ipa+" "
IPA=re.sub(delimit+'+', delimit, IPA)
IPA=re.sub(' +', ' ', IPA)
print("IPA Vietnamese: ",IPA)
print("------------------------------------------------------")
Results+= IPA.rstrip()+" "+delimit+"."+delimit+" "
#For checking: need much memory
'''
check_sym="ɯəjɤ̆jʷiəɤ̆wɯəwʷetiəwuəjʷentʰwʷɤ̆ʷiukwiŋ͡mk͡pcwjwuəeəbwojʷivwăwʈwʂwaʊfwɛutʰtʃɔɪxwʷɤɤ̆ŋwʊəziʷădweɪaɪewiəɣwzwɯjʷɛɯwɤjɔ:əʊʷamwɑ:hwɔjujlwɪəăju:awɛjiwajɜ:kwnwt∫ɲweoswtwʐwiɛʷei:ɯədʒɲθʌlw1ɪɯd∫pəuo3ɣ!ðʧ6ʒʐzvgă_æɤ2ʤi.ɒbhnʂɔɛkm5cjxʈ,4ʊsŋaʃ?r:ηf;et'"
for ine,res in enumerate(Results):
if res not in check_sym:
Results[ine]="'"
'''
return Results.rstrip()
def vi2IPA(text):
print("------------------------------------------------------")
TN= TTSnorm(text)
print("------------------------------------------------------")
print("Text normalize: ",TN)
TK= word_tokenize(TN)
print("Vietnamese Tokenize: ",TK)
IPA=""
for tk in TK:
ipa = T2IPA(tk).replace(" ","_")
if ipa =="":
IPA+=tk+" "
elif ipa[0]=="[" and ipa[-1]=="]":
eng = eng_to_ipa.convert(tk)
if eng[-1] == "*":
if tk.lower().upper() == tk:
#Đọc tiếng anh từng chữ
letter2sound=""
for char in tk:
CHAR = str(char).lower()
if CHAR in list(EN.keys()):
letter2sound+=EN[CHAR]+" "
else:
letter2sound+=char+" "
IPA+=T2IPA_split(letter2sound,"")+" "
else:
#Giữ nguyên
IPA+=Parsing("default",tk,"")+" "
else:
IPA+=eng+" "
#Check tu dien tieng anh Etrain bưc
#Neu co Mapping
#Neu khong, check co nguyen am
#Neu co de nguyen
#Neu khong danh van
print(" ..................Out of domain word: " ,ipa)
else:
IPA+=ipa+" "
IPA=re.sub(' +', ' ', IPA)
print("IPA Vietnamese: ",IPA)
print("------------------------------------------------------")
return IPA
def checkDict():
cout=0
trung=0
List_token=[]
List_pair = []
with open("Popular.txt", encoding="utf-8") as f:
content=f.read().splitlines()
for line in content:
#nor_tr = vi2IPA_pitrain(line)
#nor = vi2IPA(line)
nor = T2IPA(line)
if nor in List_token:
print(line + " -> "+nor)
trung +=1
List_pair.append(line)
List_token.append(nor)
if nor=="":
cout+=1
print(line)
print("Number of token can not convert: ",cout)
print("Number of token in the same mapping:",trung)
List_token = list(set(List_token))
#print(List_token)
print(len(List_token))
################################
#Looking for pair
Pair = {}
for lt in List_pair:
Pair[T2IPA(lt)] = lt
cout_same=0
with open("Popular.txt", encoding="utf-8") as f:
content=f.read().splitlines()
for line in content:
if T2IPA(line) in Pair:
lin2 =Pair[T2IPA(line)]
if line != lin2:
if (lin2[0]=="k" and line[0]=="c") or (lin2[-1] in ['i','í','ì','ĩ','ỉ','ị'] and line[-1] in ['y','ý','ỳ','ỷ','ỹ','ỵ']) or (lin2[-1] in ['y','ý','ỳ','ỷ','ỹ','ỵ'] and line[-1] in ['i','í','ì','ĩ','ỉ','ị']):
continue
cout_same+=1
print(line+ " <-> " + lin2 +"\t\t:\t\t"+T2IPA(line))
print("Same pair:" , cout_same)
#Các trường hợp dẫn đến trùng âm là:
# Phương ngữ khác nhau đã thống nhất ở list custom
# Các trường hợp có cách bỏ dấu khác nhau đều gộp chung làm một
#Disable convert from 'ɲ' to 'ɲ'' in north
#Các âm vòng ở đây i chang không vòm: không có w ở trước như: "oa,ua,a" đều như một > must consider (nhưng nếu thêm vào ảnh hưởng chữ qu cũng ra w)
#Try to add ʷ to all start o and u as in wiki
# *** Problem with ủy onglide and off-glide is a big problem
#Same positive
#k <-> c
#g <-> gh
#i <-> y
#Same negative / need to fix
#oe <-> uê -> fix oe from e to ɛ
#âm cuối: ch : k theo bắc : t theo nam -> custom k vì nó giảm trùng nhiều hơn 241->153 case
#Tuy nhiên cuối cùng "ch" "c" "t" không phân âm được => ý tưởng mượn "tʃ" trong teach and watch để thay thế => k for c , t for t, tʃ for ch
#Thay offglide: úy -> wi để phân biệt với úi
#Remain
'''
di <-> gi : zi1
dìm <-> gìm : zim2
din <-> gin : zin1
díp <-> gíp : zip5
gen <-> ghen : ɣɛn1
ghì <-> gì : ɣi2
ghích <-> gích : ɣitʃ5
ia <-> iê : iə1
iêu <-> yêu : iəw1
khoắng <-> khuắng : xwʷăŋ5
khỏe <-> khoẻ : xwʷɛ4
khua <-> khuơ : xuə1
lóe <-> loé : lwʷɛ5
ngét <-> nghét : ŋɛt5
ngễu <-> nghễu : ŋɛu3
nghía <-> ngía : ŋiə5
nghịu <-> ngịu : ŋiw6
nghoèo <-> ngoèo : ŋwew2
quít <-> quýt : kwit5
thủa <-> thuở : tʰuə4
tòe <-> toè : twʷɛ2
ua <-> uơ : uə1
ưa <-> ươ : ɯə1
xõa <-> xoã : swʷa3
'''
#Ở đây tiết kiệm chi phí chạy máy không normal phoneme về cường độ âm sắc chỉ dừng từ 1->6
#học ác cho kết quả "c" khác nhau
###################################################
checkDict()
#print(vi2IPA_split("!Singapo english? đại học là IUYE gì khôngtontaij NIYE BoOK","'"))
#check các ipa của tiếng anh
#print(vi2IPA_split("Another table was prepared to show available onsets. Onsets are splitted into 3 types. Type 1 are onsets which has one letter ","/"))
#Lọc bỏ dấu nhấn của tiếng anh "'"
#print(vi2IPA_split("speech? Secondly, we paper, we investigate work! One is that e language to another by","/").replace("/",""))
#Case need to be deal:
# NIYE BoOK
#print(len(getSymbol()))
#print(getSymbol())
'''
test="t"
if test in syms:
print(test)
else:
print("none")
'''
###################################################
#Step
#Vinorm
#Underthesea
#For each Convert to phoneme
#Nếu không được check phoneme tiếng anh
#Nếu không có trong từ tiếng anh -> đọc từng kí tự
#Now
#+Thêm kí tự IPA của tiếng ANH
#+Thêm xử lí case không có cũng như case Tiếng anh: => dùng etrain cho tiếng anh
#+Deal case thống nhất âm vực phoneme -> ok
#+Get lại bộ symbol | [
"re.split",
"eng_to_ipa.convert",
"re.sub",
"underthesea.word_tokenize",
"sys.path.append"
] | [((53180, 53206), 'sys.path.append', 'sys.path.append', (['"""./Rules"""'], {}), "('./Rules')\n", (53195, 53206), False, 'import sys, codecs, re\n'), ((54939, 54965), 'sys.path.append', 'sys.path.append', (['"""./Rules"""'], {}), "('./Rules')\n", (54954, 54965), False, 'import sys, codecs, re\n'), ((60447, 60464), 'underthesea.word_tokenize', 'word_tokenize', (['TN'], {}), '(TN)\n', (60460, 60464), False, 'from underthesea import word_tokenize\n'), ((61714, 61736), 're.sub', 're.sub', (['""" +"""', '""" """', 'IPA'], {}), "(' +', ' ', IPA)\n", (61720, 61736), False, 'import re\n'), ((57463, 57480), 'underthesea.word_tokenize', 'word_tokenize', (['TN'], {}), '(TN)\n', (57476, 57480), False, 'from underthesea import word_tokenize\n'), ((59507, 59542), 're.sub', 're.sub', (["(delimit + '+')", 'delimit', 'IPA'], {}), "(delimit + '+', delimit, IPA)\n", (59513, 59542), False, 'import re\n'), ((59554, 59576), 're.sub', 're.sub', (['""" +"""', '""" """', 'IPA'], {}), "(' +', ' ', IPA)\n", (59560, 59576), False, 'import re\n'), ((54287, 54310), 're.split', 're.split', (['"""(_|-)"""', 'word'], {}), "('(_|-)', word)\n", (54295, 54310), False, 'import re\n'), ((56064, 56087), 're.split', 're.split', (['"""(_|-)"""', 'word'], {}), "('(_|-)', word)\n", (56072, 56087), False, 'import re\n'), ((60695, 60717), 'eng_to_ipa.convert', 'eng_to_ipa.convert', (['tk'], {}), '(tk)\n', (60713, 60717), False, 'import eng_to_ipa\n'), ((58297, 58319), 'eng_to_ipa.convert', 'eng_to_ipa.convert', (['tk'], {}), '(tk)\n', (58315, 58319), False, 'import eng_to_ipa\n')] |
import unittest
#write the import for function for assignment7 sum_list_values
from src.assignments.assignment7 import sum_list_values
class Test_Assign7(unittest.TestCase):
def sample_test(self):
self.assertEqual(1,1)
#create a test for the sum_list_values function with list elements:
# bill 23 16 19 22
def test_sum_w_23_16_19_22(self):
test_list = ['bill', 23, 16, 19, 22]
self.assertEqual(80, sum_list_values(test_list))
#unittest.main(verbosity=2)
| [
"src.assignments.assignment7.sum_list_values"
] | [((441, 467), 'src.assignments.assignment7.sum_list_values', 'sum_list_values', (['test_list'], {}), '(test_list)\n', (456, 467), False, 'from src.assignments.assignment7 import sum_list_values\n')] |
import pytest
import numpy as np
from numpy.testing import assert_allclose
from keras import backend as K
from keras import activations
def get_standard_values():
'''
These are just a set of floats used for testing the activation
functions, and are useful in multiple tests.
'''
return np.array([[0, 0.1, 0.5, 0.9, 1.0]], dtype=K.floatx())
def test_softmax():
'''
Test using a reference implementation of softmax
'''
def softmax(values):
m = np.max(values)
e = np.exp(values - m)
return e / np.sum(e)
x = K.placeholder(ndim=2)
f = K.function([x], [activations.softmax(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = softmax(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_time_distributed_softmax():
x = K.placeholder(shape=(1, 1, 5))
f = K.function([x], [activations.softmax(x)])
test_values = get_standard_values()
test_values = np.reshape(test_values, (1, 1, np.size(test_values)))
f([test_values])[0]
def test_softplus():
'''
Test using a reference softplus implementation
'''
def softplus(x):
return np.log(np.ones_like(x) + np.exp(x))
x = K.placeholder(ndim=2)
f = K.function([x], [activations.softplus(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = softplus(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_softsign():
'''
Test using a reference softsign implementation
'''
def softsign(x):
return np.divide(x, np.ones_like(x) + np.absolute(x))
x = K.placeholder(ndim=2)
f = K.function([x], [activations.softsign(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = softsign(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_sigmoid():
'''
Test using a numerically stable reference sigmoid implementation
'''
def ref_sigmoid(x):
if x >= 0:
return 1 / (1 + np.exp(-x))
else:
z = np.exp(x)
return z / (1 + z)
sigmoid = np.vectorize(ref_sigmoid)
x = K.placeholder(ndim=2)
f = K.function([x], [activations.sigmoid(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = sigmoid(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_hard_sigmoid():
'''
Test using a reference hard sigmoid implementation
'''
def ref_hard_sigmoid(x):
'''
Reference hard sigmoid with slope and shift values from theano, see
https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/sigm.py
'''
x = (x * 0.2) + 0.5
z = 0.0 if x <= 0 else (1.0 if x >= 1 else x)
return z
hard_sigmoid = np.vectorize(ref_hard_sigmoid)
x = K.placeholder(ndim=2)
f = K.function([x], [activations.hard_sigmoid(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = hard_sigmoid(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_relu():
'''
Relu implementation doesn't depend on the value being
a theano variable. Testing ints, floats and theano tensors.
'''
x = K.placeholder(ndim=2)
f = K.function([x], [activations.relu(x)])
test_values = get_standard_values()
result = f([test_values])[0]
# because no negatives in test values
assert_allclose(result, test_values, rtol=1e-05)
def test_elu():
x = K.placeholder(ndim=2)
f = K.function([x], [activations.elu(x, 0.5)])
test_values = get_standard_values()
result = f([test_values])[0]
# because no negatives in test values
assert_allclose(result, test_values, rtol=1e-05)
negative_values = np.array([[-1, -2]], dtype=K.floatx())
result = f([negative_values])[0]
true_result = (np.exp(negative_values) - 1) / 2
assert_allclose(result, true_result)
def test_tanh():
test_values = get_standard_values()
x = K.placeholder(ndim=2)
exp = activations.tanh(x)
f = K.function([x], [exp])
result = f([test_values])[0]
expected = np.tanh(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_linear():
'''
This function does no input validation, it just returns the thing
that was passed in.
'''
xs = [1, 5, True, None, 'foo']
for x in xs:
assert(x == activations.linear(x))
if __name__ == '__main__':
pytest.main([__file__])
| [
"keras.activations.linear",
"keras.backend.floatx",
"keras.activations.hard_sigmoid",
"numpy.testing.assert_allclose",
"keras.backend.placeholder",
"numpy.tanh",
"pytest.main",
"numpy.max",
"numpy.exp",
"keras.activations.softmax",
"keras.activations.sigmoid",
"numpy.size",
"keras.activations.softsign",
"numpy.vectorize",
"numpy.ones_like",
"keras.activations.relu",
"numpy.absolute",
"keras.activations.softplus",
"keras.activations.tanh",
"numpy.sum",
"keras.activations.elu",
"keras.backend.function"
] | [((575, 596), 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(2)'}), '(ndim=2)\n', (588, 596), True, 'from keras import backend as K\n'), ((761, 806), 'numpy.testing.assert_allclose', 'assert_allclose', (['result', 'expected'], {'rtol': '(1e-05)'}), '(result, expected, rtol=1e-05)\n', (776, 806), False, 'from numpy.testing import assert_allclose\n'), ((854, 884), 'keras.backend.placeholder', 'K.placeholder', ([], {'shape': '(1, 1, 5)'}), '(shape=(1, 1, 5))\n', (867, 884), True, 'from keras import backend as K\n'), ((1242, 1263), 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(2)'}), '(ndim=2)\n', (1255, 1263), True, 'from keras import backend as K\n'), ((1430, 1475), 'numpy.testing.assert_allclose', 'assert_allclose', (['result', 'expected'], {'rtol': '(1e-05)'}), '(result, expected, rtol=1e-05)\n', (1445, 1475), False, 'from numpy.testing import assert_allclose\n'), ((1658, 1679), 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(2)'}), '(ndim=2)\n', (1671, 1679), True, 'from keras import backend as K\n'), ((1846, 1891), 'numpy.testing.assert_allclose', 'assert_allclose', (['result', 'expected'], {'rtol': '(1e-05)'}), '(result, expected, rtol=1e-05)\n', (1861, 1891), False, 'from numpy.testing import assert_allclose\n'), ((2167, 2192), 'numpy.vectorize', 'np.vectorize', (['ref_sigmoid'], {}), '(ref_sigmoid)\n', (2179, 2192), True, 'import numpy as np\n'), ((2202, 2223), 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(2)'}), '(ndim=2)\n', (2215, 2223), True, 'from keras import backend as K\n'), ((2388, 2433), 'numpy.testing.assert_allclose', 'assert_allclose', (['result', 'expected'], {'rtol': '(1e-05)'}), '(result, expected, rtol=1e-05)\n', (2403, 2433), False, 'from numpy.testing import assert_allclose\n'), ((2859, 2889), 'numpy.vectorize', 'np.vectorize', (['ref_hard_sigmoid'], {}), '(ref_hard_sigmoid)\n', (2871, 2889), True, 'import numpy as np\n'), ((2899, 2920), 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(2)'}), '(ndim=2)\n', (2912, 2920), True, 'from keras import backend as K\n'), ((3095, 3140), 'numpy.testing.assert_allclose', 'assert_allclose', (['result', 'expected'], {'rtol': '(1e-05)'}), '(result, expected, rtol=1e-05)\n', (3110, 3140), False, 'from numpy.testing import assert_allclose\n'), ((3306, 3327), 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(2)'}), '(ndim=2)\n', (3319, 3327), True, 'from keras import backend as K\n'), ((3496, 3544), 'numpy.testing.assert_allclose', 'assert_allclose', (['result', 'test_values'], {'rtol': '(1e-05)'}), '(result, test_values, rtol=1e-05)\n', (3511, 3544), False, 'from numpy.testing import assert_allclose\n'), ((3571, 3592), 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(2)'}), '(ndim=2)\n', (3584, 3592), True, 'from keras import backend as K\n'), ((3765, 3813), 'numpy.testing.assert_allclose', 'assert_allclose', (['result', 'test_values'], {'rtol': '(1e-05)'}), '(result, test_values, rtol=1e-05)\n', (3780, 3813), False, 'from numpy.testing import assert_allclose\n'), ((3970, 4006), 'numpy.testing.assert_allclose', 'assert_allclose', (['result', 'true_result'], {}), '(result, true_result)\n', (3985, 4006), False, 'from numpy.testing import assert_allclose\n'), ((4075, 4096), 'keras.backend.placeholder', 'K.placeholder', ([], {'ndim': '(2)'}), '(ndim=2)\n', (4088, 4096), True, 'from keras import backend as K\n'), ((4107, 4126), 'keras.activations.tanh', 'activations.tanh', (['x'], {}), '(x)\n', (4123, 4126), False, 'from keras import activations\n'), ((4135, 4157), 'keras.backend.function', 'K.function', (['[x]', '[exp]'], {}), '([x], [exp])\n', (4145, 4157), True, 'from keras import backend as K\n'), ((4207, 4227), 'numpy.tanh', 'np.tanh', (['test_values'], {}), '(test_values)\n', (4214, 4227), True, 'import numpy as np\n'), ((4232, 4277), 'numpy.testing.assert_allclose', 'assert_allclose', (['result', 'expected'], {'rtol': '(1e-05)'}), '(result, expected, rtol=1e-05)\n', (4247, 4277), False, 'from numpy.testing import assert_allclose\n'), ((4537, 4560), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (4548, 4560), False, 'import pytest\n'), ((491, 505), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (497, 505), True, 'import numpy as np\n'), ((518, 536), 'numpy.exp', 'np.exp', (['(values - m)'], {}), '(values - m)\n', (524, 536), True, 'import numpy as np\n'), ((351, 361), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (359, 361), True, 'from keras import backend as K\n'), ((556, 565), 'numpy.sum', 'np.sum', (['e'], {}), '(e)\n', (562, 565), True, 'import numpy as np\n'), ((622, 644), 'keras.activations.softmax', 'activations.softmax', (['x'], {}), '(x)\n', (641, 644), False, 'from keras import activations\n'), ((910, 932), 'keras.activations.softmax', 'activations.softmax', (['x'], {}), '(x)\n', (929, 932), False, 'from keras import activations\n'), ((1024, 1044), 'numpy.size', 'np.size', (['test_values'], {}), '(test_values)\n', (1031, 1044), True, 'import numpy as np\n'), ((1289, 1312), 'keras.activations.softplus', 'activations.softplus', (['x'], {}), '(x)\n', (1309, 1312), False, 'from keras import activations\n'), ((1705, 1728), 'keras.activations.softsign', 'activations.softsign', (['x'], {}), '(x)\n', (1725, 1728), False, 'from keras import activations\n'), ((2112, 2121), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2118, 2121), True, 'import numpy as np\n'), ((2249, 2271), 'keras.activations.sigmoid', 'activations.sigmoid', (['x'], {}), '(x)\n', (2268, 2271), False, 'from keras import activations\n'), ((2946, 2973), 'keras.activations.hard_sigmoid', 'activations.hard_sigmoid', (['x'], {}), '(x)\n', (2970, 2973), False, 'from keras import activations\n'), ((3353, 3372), 'keras.activations.relu', 'activations.relu', (['x'], {}), '(x)\n', (3369, 3372), False, 'from keras import activations\n'), ((3618, 3641), 'keras.activations.elu', 'activations.elu', (['x', '(0.5)'], {}), '(x, 0.5)\n', (3633, 3641), False, 'from keras import activations\n'), ((3864, 3874), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (3872, 3874), True, 'from keras import backend as K\n'), ((3932, 3955), 'numpy.exp', 'np.exp', (['negative_values'], {}), '(negative_values)\n', (3938, 3955), True, 'import numpy as np\n'), ((4481, 4502), 'keras.activations.linear', 'activations.linear', (['x'], {}), '(x)\n', (4499, 4502), False, 'from keras import activations\n'), ((1204, 1219), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (1216, 1219), True, 'import numpy as np\n'), ((1222, 1231), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1228, 1231), True, 'import numpy as np\n'), ((1615, 1630), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (1627, 1630), True, 'import numpy as np\n'), ((1633, 1647), 'numpy.absolute', 'np.absolute', (['x'], {}), '(x)\n', (1644, 1647), True, 'import numpy as np\n'), ((2070, 2080), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (2076, 2080), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This test module contains the tests for the aea.cli.generate sub-module."""
from unittest import TestCase, mock
from aea.cli.generate import _generate_item
from tests.test_cli.tools_for_testing import ContextMock
def _raise_file_exists(self, *args, **kwargs):
raise FileExistsError()
@mock.patch("builtins.open", mock.mock_open())
@mock.patch("aea.cli.generate.ConfigLoader")
@mock.patch("aea.cli.generate.os.path.join", return_value="joined-path")
@mock.patch("aea.cli.generate.ProtocolGenerator.generate", _raise_file_exists)
class GenerateItemTestCase(TestCase):
"""Test case for fetch_agent_locally method."""
def test__generate_item_file_exists(self, *mocks):
"""Test for fetch_agent_locally method positive result."""
ctx_mock = ContextMock()
with self.assertRaises(SystemExit):
_generate_item(ctx_mock, "protocol", "path")
| [
"unittest.mock.mock_open",
"tests.test_cli.tools_for_testing.ContextMock",
"unittest.mock.patch",
"aea.cli.generate._generate_item"
] | [((1142, 1185), 'unittest.mock.patch', 'mock.patch', (['"""aea.cli.generate.ConfigLoader"""'], {}), "('aea.cli.generate.ConfigLoader')\n", (1152, 1185), False, 'from unittest import TestCase, mock\n'), ((1187, 1258), 'unittest.mock.patch', 'mock.patch', (['"""aea.cli.generate.os.path.join"""'], {'return_value': '"""joined-path"""'}), "('aea.cli.generate.os.path.join', return_value='joined-path')\n", (1197, 1258), False, 'from unittest import TestCase, mock\n'), ((1260, 1337), 'unittest.mock.patch', 'mock.patch', (['"""aea.cli.generate.ProtocolGenerator.generate"""', '_raise_file_exists'], {}), "('aea.cli.generate.ProtocolGenerator.generate', _raise_file_exists)\n", (1270, 1337), False, 'from unittest import TestCase, mock\n'), ((1123, 1139), 'unittest.mock.mock_open', 'mock.mock_open', ([], {}), '()\n', (1137, 1139), False, 'from unittest import TestCase, mock\n'), ((1570, 1583), 'tests.test_cli.tools_for_testing.ContextMock', 'ContextMock', ([], {}), '()\n', (1581, 1583), False, 'from tests.test_cli.tools_for_testing import ContextMock\n'), ((1640, 1684), 'aea.cli.generate._generate_item', '_generate_item', (['ctx_mock', '"""protocol"""', '"""path"""'], {}), "(ctx_mock, 'protocol', 'path')\n", (1654, 1684), False, 'from aea.cli.generate import _generate_item\n')] |
"""
sphinx.ext.napoleon
~~~~~~~~~~~~~~~~~~~
Support for NumPy and Google style docstrings.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from sphinx import __display_version__ as __version__
from sphinx.application import Sphinx
from sphinx.ext.napoleon.docstring import GoogleDocstring, NumpyDocstring
if False:
# For type annotation
from typing import Any, Dict, List # NOQA
class Config:
"""Sphinx napoleon extension settings in `conf.py`.
Listed below are all the settings used by napoleon and their default
values. These settings can be changed in the Sphinx `conf.py` file. Make
sure that "sphinx.ext.napoleon" is enabled in `conf.py`::
# conf.py
# Add any Sphinx extension module names here, as strings
extensions = ['sphinx.ext.napoleon']
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
napoleon_use_keyword = True
napoleon_custom_sections = None
.. _Google style:
https://google.github.io/styleguide/pyguide.html
.. _NumPy style:
https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
Attributes
----------
napoleon_google_docstring : :obj:`bool` (Defaults to True)
True to parse `Google style`_ docstrings. False to disable support
for Google style docstrings.
napoleon_numpy_docstring : :obj:`bool` (Defaults to True)
True to parse `NumPy style`_ docstrings. False to disable support
for NumPy style docstrings.
napoleon_include_init_with_doc : :obj:`bool` (Defaults to False)
True to list ``__init___`` docstrings separately from the class
docstring. False to fall back to Sphinx's default behavior, which
considers the ``__init___`` docstring as part of the class
documentation.
**If True**::
def __init__(self):
\"\"\"
This will be included in the docs because it has a docstring
\"\"\"
def __init__(self):
# This will NOT be included in the docs
napoleon_include_private_with_doc : :obj:`bool` (Defaults to False)
True to include private members (like ``_membername``) with docstrings
in the documentation. False to fall back to Sphinx's default behavior.
**If True**::
def _included(self):
\"\"\"
This will be included in the docs because it has a docstring
\"\"\"
pass
def _skipped(self):
# This will NOT be included in the docs
pass
napoleon_include_special_with_doc : :obj:`bool` (Defaults to False)
True to include special members (like ``__membername__``) with
docstrings in the documentation. False to fall back to Sphinx's
default behavior.
**If True**::
def __str__(self):
\"\"\"
This will be included in the docs because it has a docstring
\"\"\"
return unicode(self).encode('utf-8')
def __unicode__(self):
# This will NOT be included in the docs
return unicode(self.__class__.__name__)
napoleon_use_admonition_for_examples : :obj:`bool` (Defaults to False)
True to use the ``.. admonition::`` directive for the **Example** and
**Examples** sections. False to use the ``.. rubric::`` directive
instead. One may look better than the other depending on what HTML
theme is used.
This `NumPy style`_ snippet will be converted as follows::
Example
-------
This is just a quick example
**If True**::
.. admonition:: Example
This is just a quick example
**If False**::
.. rubric:: Example
This is just a quick example
napoleon_use_admonition_for_notes : :obj:`bool` (Defaults to False)
True to use the ``.. admonition::`` directive for **Notes** sections.
False to use the ``.. rubric::`` directive instead.
Note
----
The singular **Note** section will always be converted to a
``.. note::`` directive.
See Also
--------
:attr:`napoleon_use_admonition_for_examples`
napoleon_use_admonition_for_references : :obj:`bool` (Defaults to False)
True to use the ``.. admonition::`` directive for **References**
sections. False to use the ``.. rubric::`` directive instead.
See Also
--------
:attr:`napoleon_use_admonition_for_examples`
napoleon_use_ivar : :obj:`bool` (Defaults to False)
True to use the ``:ivar:`` role for instance variables. False to use
the ``.. attribute::`` directive instead.
This `NumPy style`_ snippet will be converted as follows::
Attributes
----------
attr1 : int
Description of `attr1`
**If True**::
:ivar attr1: Description of `attr1`
:vartype attr1: int
**If False**::
.. attribute:: attr1
Description of `attr1`
:type: int
napoleon_use_param : :obj:`bool` (Defaults to True)
True to use a ``:param:`` role for each function parameter. False to
use a single ``:parameters:`` role for all the parameters.
This `NumPy style`_ snippet will be converted as follows::
Parameters
----------
arg1 : str
Description of `arg1`
arg2 : int, optional
Description of `arg2`, defaults to 0
**If True**::
:param arg1: Description of `arg1`
:type arg1: str
:param arg2: Description of `arg2`, defaults to 0
:type arg2: int, optional
**If False**::
:parameters: * **arg1** (*str*) --
Description of `arg1`
* **arg2** (*int, optional*) --
Description of `arg2`, defaults to 0
napoleon_use_keyword : :obj:`bool` (Defaults to True)
True to use a ``:keyword:`` role for each function keyword argument.
False to use a single ``:keyword arguments:`` role for all the
keywords.
This behaves similarly to :attr:`napoleon_use_param`. Note unlike
docutils, ``:keyword:`` and ``:param:`` will not be treated the same
way - there will be a separate "Keyword Arguments" section, rendered
in the same fashion as "Parameters" section (type links created if
possible)
See Also
--------
:attr:`napoleon_use_param`
napoleon_use_rtype : :obj:`bool` (Defaults to True)
True to use the ``:rtype:`` role for the return type. False to output
the return type inline with the description.
This `NumPy style`_ snippet will be converted as follows::
Returns
-------
bool
True if successful, False otherwise
**If True**::
:returns: True if successful, False otherwise
:rtype: bool
**If False**::
:returns: *bool* -- True if successful, False otherwise
napoleon_custom_sections : :obj:`list` (Defaults to None)
Add a list of custom sections to include, expanding the list of parsed sections.
The entries can either be strings or tuples, depending on the intention:
* To create a custom "generic" section, just pass a string.
* To create an alias for an existing section, pass a tuple containing the
alias name and the original, in that order.
If an entry is just a string, it is interpreted as a header for a generic
section. If the entry is a tuple/list/indexed container, the first entry
is the name of the section, the second is the section key to emulate.
"""
_config_values = {
'napoleon_google_docstring': (True, 'env'),
'napoleon_numpy_docstring': (True, 'env'),
'napoleon_include_init_with_doc': (False, 'env'),
'napoleon_include_private_with_doc': (False, 'env'),
'napoleon_include_special_with_doc': (False, 'env'),
'napoleon_use_admonition_for_examples': (False, 'env'),
'napoleon_use_admonition_for_notes': (False, 'env'),
'napoleon_use_admonition_for_references': (False, 'env'),
'napoleon_use_ivar': (False, 'env'),
'napoleon_use_param': (True, 'env'),
'napoleon_use_rtype': (True, 'env'),
'napoleon_use_keyword': (True, 'env'),
'napoleon_custom_sections': (None, 'env')
}
def __init__(self, **settings):
# type: (Any) -> None
for name, (default, rebuild) in self._config_values.items():
setattr(self, name, default)
for name, value in settings.items():
setattr(self, name, value)
def setup(app):
# type: (Sphinx) -> Dict[str, Any]
"""Sphinx extension setup function.
When the extension is loaded, Sphinx imports this module and executes
the ``setup()`` function, which in turn notifies Sphinx of everything
the extension offers.
Parameters
----------
app : sphinx.application.Sphinx
Application object representing the Sphinx process
See Also
--------
`The Sphinx documentation on Extensions
<http://sphinx-doc.org/extensions.html>`_
`The Extension Tutorial <http://sphinx-doc.org/extdev/tutorial.html>`_
`The Extension API <http://sphinx-doc.org/extdev/appapi.html>`_
"""
if not isinstance(app, Sphinx):
# probably called by tests
return {'version': __version__, 'parallel_read_safe': True}
_patch_python_domain()
app.setup_extension('sphinx.ext.autodoc')
app.connect('autodoc-process-docstring', _process_docstring)
app.connect('autodoc-skip-member', _skip_member)
for name, (default, rebuild) in Config._config_values.items():
app.add_config_value(name, default, rebuild)
return {'version': __version__, 'parallel_read_safe': True}
def _patch_python_domain():
# type: () -> None
try:
from sphinx.domains.python import PyTypedField
except ImportError:
pass
else:
import sphinx.domains.python
from sphinx.locale import _
for doc_field in sphinx.domains.python.PyObject.doc_field_types:
if doc_field.name == 'parameter':
doc_field.names = ('param', 'parameter', 'arg', 'argument')
break
sphinx.domains.python.PyObject.doc_field_types.append(
PyTypedField('keyword', label=_('Keyword Arguments'),
names=('keyword', 'kwarg', 'kwparam'),
typerolename='obj', typenames=('paramtype', 'kwtype'),
can_collapse=True))
def _process_docstring(app, what, name, obj, options, lines):
# type: (Sphinx, str, str, Any, Any, List[str]) -> None
"""Process the docstring for a given python object.
Called when autodoc has read and processed a docstring. `lines` is a list
of docstring lines that `_process_docstring` modifies in place to change
what Sphinx outputs.
The following settings in conf.py control what styles of docstrings will
be parsed:
* ``napoleon_google_docstring`` -- parse Google style docstrings
* ``napoleon_numpy_docstring`` -- parse NumPy style docstrings
Parameters
----------
app : sphinx.application.Sphinx
Application object representing the Sphinx process.
what : str
A string specifying the type of the object to which the docstring
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : str
The fully qualified name of the object.
obj : module, class, exception, function, method, or attribute
The object to which the docstring belongs.
options : sphinx.ext.autodoc.Options
The options given to the directive: an object with attributes
inherited_members, undoc_members, show_inheritance and noindex that
are True if the flag option of same name was given to the auto
directive.
lines : list of str
The lines of the docstring, see above.
.. note:: `lines` is modified *in place*
"""
result_lines = lines
docstring = None # type: GoogleDocstring
if app.config.napoleon_numpy_docstring:
docstring = NumpyDocstring(result_lines, app.config, app, what, name,
obj, options)
result_lines = docstring.lines()
if app.config.napoleon_google_docstring:
docstring = GoogleDocstring(result_lines, app.config, app, what, name,
obj, options)
result_lines = docstring.lines()
lines[:] = result_lines[:]
def _skip_member(app, what, name, obj, skip, options):
# type: (Sphinx, str, str, Any, bool, Any) -> bool
"""Determine if private and special class members are included in docs.
The following settings in conf.py determine if private and special class
members or init methods are included in the generated documentation:
* ``napoleon_include_init_with_doc`` --
include init methods if they have docstrings
* ``napoleon_include_private_with_doc`` --
include private members if they have docstrings
* ``napoleon_include_special_with_doc`` --
include special members if they have docstrings
Parameters
----------
app : sphinx.application.Sphinx
Application object representing the Sphinx process
what : str
A string specifying the type of the object to which the member
belongs. Valid values: "module", "class", "exception", "function",
"method", "attribute".
name : str
The name of the member.
obj : module, class, exception, function, method, or attribute.
For example, if the member is the __init__ method of class A, then
`obj` will be `A.__init__`.
skip : bool
A boolean indicating if autodoc will skip this member if `_skip_member`
does not override the decision
options : sphinx.ext.autodoc.Options
The options given to the directive: an object with attributes
inherited_members, undoc_members, show_inheritance and noindex that
are True if the flag option of same name was given to the auto
directive.
Returns
-------
bool
True if the member should be skipped during creation of the docs,
False if it should be included in the docs.
"""
has_doc = getattr(obj, '__doc__', False)
is_member = (what == 'class' or what == 'exception' or what == 'module')
if name != '__weakref__' and has_doc and is_member:
cls_is_owner = False
if what == 'class' or what == 'exception':
qualname = getattr(obj, '__qualname__', '')
cls_path, _, _ = qualname.rpartition('.')
if cls_path:
try:
if '.' in cls_path:
import importlib
import functools
mod = importlib.import_module(obj.__module__)
mod_path = cls_path.split('.')
cls = functools.reduce(getattr, mod_path, mod)
else:
cls = obj.__globals__[cls_path]
except Exception:
cls_is_owner = False
else:
cls_is_owner = (cls and hasattr(cls, name) and # type: ignore
name in cls.__dict__)
else:
cls_is_owner = False
if what == 'module' or cls_is_owner:
is_init = (name == '__init__')
is_special = (not is_init and name.startswith('__') and
name.endswith('__'))
is_private = (not is_init and not is_special and
name.startswith('_'))
inc_init = app.config.napoleon_include_init_with_doc
inc_special = app.config.napoleon_include_special_with_doc
inc_private = app.config.napoleon_include_private_with_doc
if ((is_special and inc_special) or
(is_private and inc_private) or
(is_init and inc_init)):
return False
return None
| [
"importlib.import_module",
"functools.reduce",
"sphinx.locale._",
"sphinx.ext.napoleon.docstring.GoogleDocstring",
"sphinx.ext.napoleon.docstring.NumpyDocstring"
] | [((13152, 13223), 'sphinx.ext.napoleon.docstring.NumpyDocstring', 'NumpyDocstring', (['result_lines', 'app.config', 'app', 'what', 'name', 'obj', 'options'], {}), '(result_lines, app.config, app, what, name, obj, options)\n', (13166, 13223), False, 'from sphinx.ext.napoleon.docstring import GoogleDocstring, NumpyDocstring\n'), ((13365, 13437), 'sphinx.ext.napoleon.docstring.GoogleDocstring', 'GoogleDocstring', (['result_lines', 'app.config', 'app', 'what', 'name', 'obj', 'options'], {}), '(result_lines, app.config, app, what, name, obj, options)\n', (13380, 13437), False, 'from sphinx.ext.napoleon.docstring import GoogleDocstring, NumpyDocstring\n'), ((11303, 11325), 'sphinx.locale._', '_', (['"""Keyword Arguments"""'], {}), "('Keyword Arguments')\n", (11304, 11325), False, 'from sphinx.locale import _\n'), ((15875, 15914), 'importlib.import_module', 'importlib.import_module', (['obj.__module__'], {}), '(obj.__module__)\n', (15898, 15914), False, 'import importlib\n'), ((16000, 16040), 'functools.reduce', 'functools.reduce', (['getattr', 'mod_path', 'mod'], {}), '(getattr, mod_path, mod)\n', (16016, 16040), False, 'import functools\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from rdkit import DataStructs
import plugin_api
__license__ = "X11"
class LbvsEntry(plugin_api.PluginInterface):
"""
Compute Tanimoto similarity.
"""
def __init__(self):
self.stream = None
self.counter = 0
self.first_entry = False
def execute(self, files):
query = LbvsEntry._load_file(files["query_file"])
database = LbvsEntry._load_file(files["database_file"])
with open(files["output_file"], "w") as stream:
self.stream = stream
self.write_output_header()
self.compute_and_write_similarities_for_items(query, database)
self.write_output_footer()
def write_output_header(self):
self.stream.write('{"data":[')
def write_output_footer(self):
self.stream.write(']}')
def compute_and_write_similarities_for_items(self, query, database):
self.first_entry = True
for query_item in query:
for database_item in database:
self._write_separator_if_needed()
self.first_entry = False
self._compute_and_write_similarity(query_item, database_item)
def _write_separator_if_needed(self):
if not self.first_entry:
self.stream.write(",")
def _compute_and_write_similarity(self, query, item):
similarity = LbvsEntry._compute_similarity(
query["value"], item["value"])
json.dump({
"query": query["id"],
"id": item["id"],
"value": similarity
}, self.stream)
@staticmethod
def _load_file(path):
with open(path) as stream:
return [{
"id": item["id"],
"value": LbvsEntry._as_sparse_vector(item["value"])
} for item in json.load(stream)["data"]]
@staticmethod
def _as_sparse_vector(data):
# Use max integer value as a size.
vector = DataStructs.cDataStructs.IntSparseIntVect(8388608)
for key in data:
vector[(int)(key)] = (int)(data[key])
return vector
@staticmethod
def _compute_similarity(left, right):
return DataStructs.TanimotoSimilarity(left, right)
def get_metadata(self) -> object:
return {
"id": "rdkit/tanimoto"
}
| [
"json.load",
"rdkit.DataStructs.TanimotoSimilarity",
"rdkit.DataStructs.cDataStructs.IntSparseIntVect",
"json.dump"
] | [((1496, 1585), 'json.dump', 'json.dump', (["{'query': query['id'], 'id': item['id'], 'value': similarity}", 'self.stream'], {}), "({'query': query['id'], 'id': item['id'], 'value': similarity},\n self.stream)\n", (1505, 1585), False, 'import json\n'), ((2021, 2071), 'rdkit.DataStructs.cDataStructs.IntSparseIntVect', 'DataStructs.cDataStructs.IntSparseIntVect', (['(8388608)'], {}), '(8388608)\n', (2062, 2071), False, 'from rdkit import DataStructs\n'), ((2245, 2288), 'rdkit.DataStructs.TanimotoSimilarity', 'DataStructs.TanimotoSimilarity', (['left', 'right'], {}), '(left, right)\n', (2275, 2288), False, 'from rdkit import DataStructs\n'), ((1882, 1899), 'json.load', 'json.load', (['stream'], {}), '(stream)\n', (1891, 1899), False, 'import json\n')] |
import os
import weather
import datetime
import unittest
import tempfile
class WeatherTestCase(unittest.TestCase):
def setUp(self):
self.db_fd, weather.app.config['DATABASE'] = tempfile.mkstemp()
weather.app.config['TESTING'] = True
self.app = weather.app.test_client()
weather.init_db()
def tearDown(self):
os.close(self.db_fd)
os.unlink(weather.app.config['DATABASE'])
def test_empty_db(self):
"""Test empty database with no entries."""
rv = self.app.get('/')
assert 'Nothing logged yet.' in rv.data
def test_report(self):
"""Test reporting weather"""
rv = self.app.get('/report/11210/63/23', follow_redirects=True)
assert b'11210' in rv.data
def test_full_db(self):
"""Test reporting weather"""
rv = self.app.get('/', follow_redirects=True)
assert b'11210' in rv.data
if __name__ == '__main__':
unittest.main()
| [
"weather.init_db",
"os.close",
"weather.app.test_client",
"os.unlink",
"unittest.main",
"tempfile.mkstemp"
] | [((949, 964), 'unittest.main', 'unittest.main', ([], {}), '()\n', (962, 964), False, 'import unittest\n'), ((191, 209), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (207, 209), False, 'import tempfile\n'), ((274, 299), 'weather.app.test_client', 'weather.app.test_client', ([], {}), '()\n', (297, 299), False, 'import weather\n'), ((308, 325), 'weather.init_db', 'weather.init_db', ([], {}), '()\n', (323, 325), False, 'import weather\n'), ((359, 379), 'os.close', 'os.close', (['self.db_fd'], {}), '(self.db_fd)\n', (367, 379), False, 'import os\n'), ((388, 429), 'os.unlink', 'os.unlink', (["weather.app.config['DATABASE']"], {}), "(weather.app.config['DATABASE'])\n", (397, 429), False, 'import os\n')] |
import yagmail
receiver = "<EMAIL>" #Receiver's gmail address
body = "Hello there from Yagmail"
filename = "document.pdf"
yag = yagmail.SMTP("<EMAIL>")#Your gmail address
yag.send(
to=receiver,
subject="Yagmail test (attachment included",
contents=body,
attachments=filename,
)
| [
"yagmail.SMTP"
] | [((136, 159), 'yagmail.SMTP', 'yagmail.SMTP', (['"""<EMAIL>"""'], {}), "('<EMAIL>')\n", (148, 159), False, 'import yagmail\n')] |
##############################################################################
#
# Copyright (c) 2003-2020 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
# Development from 2019 by School of Earth and Environmental Sciences
#
##############################################################################
from __future__ import print_function, division
__copyright__="""Copyright (c) 2003-2020 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
"""
transformations
:var __author__: name of author
:var __copyright__: copyrights
:var __license__: licence agreement
:var __url__: url entry point on documentation
:var __version__: version
:var __date__: date of the version
:var DEG: unit of degree
:var RAD: unit of radiant
"""
__author__="<NAME>, <EMAIL>"
import numpy
import math
_TYPE=numpy.float64
DEG=math.pi/180.
RAD=1.
class Transformation(object):
"""
General class to define an affine transformation *x->Ax+b*.
"""
def __init__(self):
"""
Creates a linear transformation.
"""
pass
def __call__(self,x=numpy.zeros((3,))):
"""
Applies transformation to ``x``.
"""
raise NotImplementeError()
class Translation(Transformation):
"""
Defines a translation *x->x+b*.
"""
def __init__(self,b=numpy.zeros((3,),dtype=_TYPE)):
"""
Creates the linear transformation *x->x+b*.
"""
super(Translation, self).__init__()
self.__b=numpy.array(b,_TYPE)
def __call__(self,x=numpy.zeros((3,))):
"""
Applies translation to ``x``.
"""
return numpy.array(x,_TYPE)+self.__b
class Rotatation(Transformation):
"""
Defines a rotation.
"""
def __init__(self,axis=numpy.ones((3,),dtype=_TYPE),point=numpy.zeros((3,),dtype=_TYPE),angle=0.*RAD):
"""
Creates a rotation using an axis and a point on the axis.
"""
self.__axis=numpy.array(axis,dtype=_TYPE)
self.__point=numpy.array(point,dtype=_TYPE)
lax=numpy.dot(self.__axis,self.__axis)
if not lax>0:
raise ValueError("points must be distinct.")
self.__axis/=math.sqrt(lax)
self.__angle=float(angle)
def __call__(self,x=numpy.zeros((3,))):
"""
Applies the rotation to ``x``.
"""
x=numpy.array(x,_TYPE)
z=x-self.__point
z0=numpy.dot(z,self.__axis)
z_per=z-z0*self.__axis
lz_per=numpy.dot(z_per,z_per)
if lz_per>0:
axis1=z_per/math.sqrt(lz_per)
axis2=_cross(axis1,self.__axis)
lax2=numpy.dot(axis2,axis2)
if lax2>0:
axis2/=math.sqrt(lax2)
return z0*self.__axis+math.sqrt(lz_per)*(math.cos(self.__angle)*axis1-math.sin(self.__angle)*axis2)+self.__point
else:
return x
else:
return x
def _cross(x, y):
"""
Returns the cross product of ``x`` and ``y``.
"""
return numpy.array([x[1] * y[2] - x[2] * y[1], x[2] * y[0] - x[0] * y[2], x[0] * y[1] - x[1] * y[0]], _TYPE)
class Dilation(Transformation):
"""
Defines a dilation.
"""
def __init__(self,factor=1.,center=numpy.zeros((3,),dtype=_TYPE)):
"""
Creates a dilation with a center and a given expansion/contraction
factor.
"""
if not abs(factor)>0:
raise ValueError("factor must be non-zero.")
self.__factor=factor
self.__center=numpy.array(center,dtype=_TYPE)
def __call__(self,x=numpy.zeros((3,))):
"""
Applies dilation to ``x``.
"""
x=numpy.array(x,_TYPE)
return self.__factor*(x-self.__center)+self.__center
class Reflection(Transformation):
"""
Defines a reflection on a plane.
"""
def __init__(self,normal=numpy.ones((3,),dtype=_TYPE),offset=0.):
"""
Defines a reflection on a plane defined in normal form.
"""
self.__normal=numpy.array(normal,dtype=_TYPE)
ln=math.sqrt(numpy.dot(self.__normal,self.__normal))
if not ln>0.:
raise ValueError("normal must have positive length.")
self.__normal/=ln
if isinstance(offset,float) or isinstance(offset,int):
self.__offset=offset/ln
else:
self.__offset=numpy.dot(numpy.array(offset,dtype=_TYPE),self.__normal)
def __call__(self,x=numpy.zeros((3,))):
"""
Applies reflection to ``x``.
"""
x=numpy.array(x,_TYPE)
return x - 2*(numpy.dot(x,self.__normal)-self.__offset)*self.__normal
| [
"numpy.ones",
"math.sqrt",
"math.cos",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"math.sin"
] | [((3470, 3576), 'numpy.array', 'numpy.array', (['[x[1] * y[2] - x[2] * y[1], x[2] * y[0] - x[0] * y[2], x[0] * y[1] - x[1] *\n y[0]]', '_TYPE'], {}), '([x[1] * y[2] - x[2] * y[1], x[2] * y[0] - x[0] * y[2], x[0] * y\n [1] - x[1] * y[0]], _TYPE)\n', (3481, 3576), False, 'import numpy\n'), ((1609, 1626), 'numpy.zeros', 'numpy.zeros', (['(3,)'], {}), '((3,))\n', (1620, 1626), False, 'import numpy\n'), ((1837, 1867), 'numpy.zeros', 'numpy.zeros', (['(3,)'], {'dtype': '_TYPE'}), '((3,), dtype=_TYPE)\n', (1848, 1867), False, 'import numpy\n'), ((2001, 2022), 'numpy.array', 'numpy.array', (['b', '_TYPE'], {}), '(b, _TYPE)\n', (2012, 2022), False, 'import numpy\n'), ((2047, 2064), 'numpy.zeros', 'numpy.zeros', (['(3,)'], {}), '((3,))\n', (2058, 2064), False, 'import numpy\n'), ((2272, 2301), 'numpy.ones', 'numpy.ones', (['(3,)'], {'dtype': '_TYPE'}), '((3,), dtype=_TYPE)\n', (2282, 2301), False, 'import numpy\n'), ((2307, 2337), 'numpy.zeros', 'numpy.zeros', (['(3,)'], {'dtype': '_TYPE'}), '((3,), dtype=_TYPE)\n', (2318, 2337), False, 'import numpy\n'), ((2458, 2488), 'numpy.array', 'numpy.array', (['axis'], {'dtype': '_TYPE'}), '(axis, dtype=_TYPE)\n', (2469, 2488), False, 'import numpy\n'), ((2508, 2539), 'numpy.array', 'numpy.array', (['point'], {'dtype': '_TYPE'}), '(point, dtype=_TYPE)\n', (2519, 2539), False, 'import numpy\n'), ((2550, 2585), 'numpy.dot', 'numpy.dot', (['self.__axis', 'self.__axis'], {}), '(self.__axis, self.__axis)\n', (2559, 2585), False, 'import numpy\n'), ((2681, 2695), 'math.sqrt', 'math.sqrt', (['lax'], {}), '(lax)\n', (2690, 2695), False, 'import math\n'), ((2754, 2771), 'numpy.zeros', 'numpy.zeros', (['(3,)'], {}), '((3,))\n', (2765, 2771), False, 'import numpy\n'), ((2843, 2864), 'numpy.array', 'numpy.array', (['x', '_TYPE'], {}), '(x, _TYPE)\n', (2854, 2864), False, 'import numpy\n'), ((2898, 2923), 'numpy.dot', 'numpy.dot', (['z', 'self.__axis'], {}), '(z, self.__axis)\n', (2907, 2923), False, 'import numpy\n'), ((2967, 2990), 'numpy.dot', 'numpy.dot', (['z_per', 'z_per'], {}), '(z_per, z_per)\n', (2976, 2990), False, 'import numpy\n'), ((3684, 3714), 'numpy.zeros', 'numpy.zeros', (['(3,)'], {'dtype': '_TYPE'}), '((3,), dtype=_TYPE)\n', (3695, 3714), False, 'import numpy\n'), ((3960, 3992), 'numpy.array', 'numpy.array', (['center'], {'dtype': '_TYPE'}), '(center, dtype=_TYPE)\n', (3971, 3992), False, 'import numpy\n'), ((4017, 4034), 'numpy.zeros', 'numpy.zeros', (['(3,)'], {}), '((3,))\n', (4028, 4034), False, 'import numpy\n'), ((4102, 4123), 'numpy.array', 'numpy.array', (['x', '_TYPE'], {}), '(x, _TYPE)\n', (4113, 4123), False, 'import numpy\n'), ((4300, 4329), 'numpy.ones', 'numpy.ones', (['(3,)'], {'dtype': '_TYPE'}), '((3,), dtype=_TYPE)\n', (4310, 4329), False, 'import numpy\n'), ((4447, 4479), 'numpy.array', 'numpy.array', (['normal'], {'dtype': '_TYPE'}), '(normal, dtype=_TYPE)\n', (4458, 4479), False, 'import numpy\n'), ((4864, 4881), 'numpy.zeros', 'numpy.zeros', (['(3,)'], {}), '((3,))\n', (4875, 4881), False, 'import numpy\n'), ((4951, 4972), 'numpy.array', 'numpy.array', (['x', '_TYPE'], {}), '(x, _TYPE)\n', (4962, 4972), False, 'import numpy\n'), ((2140, 2161), 'numpy.array', 'numpy.array', (['x', '_TYPE'], {}), '(x, _TYPE)\n', (2151, 2161), False, 'import numpy\n'), ((3104, 3127), 'numpy.dot', 'numpy.dot', (['axis2', 'axis2'], {}), '(axis2, axis2)\n', (3113, 3127), False, 'import numpy\n'), ((4499, 4538), 'numpy.dot', 'numpy.dot', (['self.__normal', 'self.__normal'], {}), '(self.__normal, self.__normal)\n', (4508, 4538), False, 'import numpy\n'), ((3031, 3048), 'math.sqrt', 'math.sqrt', (['lz_per'], {}), '(lz_per)\n', (3040, 3048), False, 'import math\n'), ((3166, 3181), 'math.sqrt', 'math.sqrt', (['lax2'], {}), '(lax2)\n', (3175, 3181), False, 'import math\n'), ((4792, 4824), 'numpy.array', 'numpy.array', (['offset'], {'dtype': '_TYPE'}), '(offset, dtype=_TYPE)\n', (4803, 4824), False, 'import numpy\n'), ((4993, 5020), 'numpy.dot', 'numpy.dot', (['x', 'self.__normal'], {}), '(x, self.__normal)\n', (5002, 5020), False, 'import numpy\n'), ((3216, 3233), 'math.sqrt', 'math.sqrt', (['lz_per'], {}), '(lz_per)\n', (3225, 3233), False, 'import math\n'), ((3235, 3257), 'math.cos', 'math.cos', (['self.__angle'], {}), '(self.__angle)\n', (3243, 3257), False, 'import math\n'), ((3264, 3286), 'math.sin', 'math.sin', (['self.__angle'], {}), '(self.__angle)\n', (3272, 3286), False, 'import math\n')] |
#!/usr/bin/env python
# coding=utf-8
from setuptools import setup, find_packages
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('LICENSE', encoding='utf-8') as f:
license = f.read()
with open('requirements.txt', encoding='utf-8') as f:
reqs = f.read()
pkgs = [p for p in find_packages() if p.startswith('fastNLP')]
print(pkgs)
setup(
name='FastNLP',
version='0.7.0',
url='https://gitee.com/fastnlp/fastNLP',
description='fastNLP: Deep Learning Toolkit for NLP, developed by Fudan FastNLP Team',
long_description=readme,
long_description_content_type='text/markdown',
license='Apache License',
author='<NAME>',
python_requires='>=3.6',
packages=pkgs,
install_requires=reqs.strip().split('\n'),
)
| [
"setuptools.find_packages"
] | [((315, 330), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (328, 330), False, 'from setuptools import setup, find_packages\n')] |
#enhanced keyboard driver
import copy
import os
import configparser
from pp_displaymanager import DisplayManager
class pp_kbddriver_plus(object):
# control list items
NAME=0 # symbolic name for input and output
DIRECTION = 1 # in/out
MATCH = 2 # for input the character/string to match (no EOL)
MODE= 3 # for input the match mode any-char,char,any-line,line
TEMPLATE=['','','','']
# CLASS VARIABLES (pp_kbddriver_plus.)
driver_active=False
title='' # usd for error reporting and logging
tick_interval='' # mS between polls of the serial input
match_mode='' # char or line, whether input characters are matched for each character or a complete line
inputs={}
# executed by main program and by each object using the driver
def __init__(self):
self.dm=DisplayManager()
# executed once from main program
def init(self,filename,filepath,widget,pp_dir,pp_home,pp_profile,event_callback=None):
# instantiate arguments
self.widget=widget
self.filename=filename
self.filepath=filepath
self.event_callback=event_callback
pp_kbddriver_plus.driver_active = False
# read pp_kbddriver_plus.cfg file.
reason,message=self._read(self.filename,self.filepath)
if reason =='error':
return 'error',message
if self.config.has_section('DRIVER') is False:
return 'error','No DRIVER section in '+self.filepath
# all the below are used by another instance of pp_kbddriver_plus so must reference class variables
# read information from DRIVER section
pp_kbddriver_plus.title=self.config.get('DRIVER','title')
pp_kbddriver_plus.bind_printing = self.config.get('DRIVER','bind-printing')
# construct the control list from the config file
pp_kbddriver_plus.in_names=[]
pp_kbddriver_plus.out_names=[]
for section in self.config.sections():
if section == 'DRIVER':
continue
entry=copy.deepcopy(pp_kbddriver_plus.TEMPLATE)
entry[pp_kbddriver_plus.NAME]=self.config.get(section,'name')
entry[pp_kbddriver_plus.DIRECTION]=self.config.get(section,'direction')
if entry[pp_kbddriver_plus.DIRECTION] == 'none':
continue
elif entry[pp_kbddriver_plus.DIRECTION] == 'in':
entry[pp_kbddriver_plus.MODE]=self.config.get(section,'mode')
if entry[pp_kbddriver_plus.MODE] in ('specific-character','specific-line'):
entry[pp_kbddriver_plus.MATCH]=self.config.get(section,'match')
pp_kbddriver_plus.in_names.append(copy.deepcopy(entry))
else:
return 'error',pp_kbddriver_plus.title + ' direction not in or out'
# print pp_kbddriver_plus.in_names
# bind the keys
self._bind_keys(widget,self._key_received)
# all ok so indicate the driver is active
pp_kbddriver_plus.driver_active=True
# init must return two arguments
return 'normal',pp_kbddriver_plus.title + ' active'
# sets up tkinter keyboard events such that any key press
# does a callback to _key_received() with the event object
def _bind_keys(self,widget,callback):
for display_name in DisplayManager.display_map:
status,message,display_id,canvas=self.dm.id_of_canvas(display_name)
if status !='normal':
continue
# bind all the normal keys that return a printing character such that x produces pp-key-x (but fileterd in _key_received)
canvas.bind("<Key>", lambda event,match='<Key>',name='': self._key_received(event,match,name))
# print 'bind printing'
# Bind <Return> so that eol detection works, <Return> cannot be used to trigger an input event
# if you wnt that use keys.cfg
canvas.bind("<Return>", lambda event,match='<Return>',name='': self._key_received(event,match,name))
# print 'bind Return to make eol work'
# go through entries and bind all specific-character matches to _key_received
for entry in pp_kbddriver_plus.in_names:
if entry[pp_kbddriver_plus.MODE] == 'specific-character':
match = entry[pp_kbddriver_plus.MATCH]
name = entry[pp_kbddriver_plus.NAME]
canvas.bind(match, lambda event, match=match,name=name: self._key_received(event,match,name))
# print 'bind specific-char', match,name
# start method must be defined. If not using inputs just pass
def start(self):
pp_kbddriver_plus.inputs['current-character']=''
pp_kbddriver_plus.inputs['current-line']=''
pp_kbddriver_plus.inputs['previous-line']=''
def _key_received(self,event,match,name):
# generate the events with symbolic names if driver is active
if pp_kbddriver_plus.driver_active is True:
char=event.char
# print 'received ',char,match,name
# if char is eol then match the line and start a new line
if match =='<Return>':
# do match of line
# print 'do match line',pp_kbddriver_plus.inputs['current-line']
self.match_line(pp_kbddriver_plus.inputs['current-line'])
# shuffle and empty the buffer
pp_kbddriver_plus.inputs['previous-line'] = pp_kbddriver_plus.inputs['current-line']
pp_kbddriver_plus.inputs['current-line']=''
pp_kbddriver_plus.inputs['current-character']=''
if name !='':
# print 'bound <Return> key'
if self.event_callback is not None:
self.event_callback(name,pp_kbddriver_plus.title)
else:
# process a character
if char == '' and match == '<Key>':
# unbound special key
# print 'unbound special key ', match
pass
else:
# a character has been received
pp_kbddriver_plus.inputs['current-character']=char
pp_kbddriver_plus.inputs['current-line']+=char
# print pp_kbddriver_plus.inputs['current-character'],pp_kbddriver_plus.inputs['current-line']
if match == '<Key>' and char != '' and self.bind_printing =='yes':
# print 'printable key, bind-printing is yes',char,match
# printable character without overiding section
if self.event_callback is not None:
self.event_callback('pp-key-'+ char,pp_kbddriver_plus.title)
else:
if name != '':
# print 'bound non-printable character',char,name
if self.event_callback is not None:
self.event_callback(name,pp_kbddriver_plus.title)
# look through entries for any-character
for entry in pp_kbddriver_plus.in_names:
if entry[pp_kbddriver_plus.MODE] == 'any-character':
# print 'match any character', char, 'current line is ',pp_kbddriver_plus.inputs['current-line']
if self.event_callback is not None:
self.event_callback(entry[pp_kbddriver_plus.NAME],pp_kbddriver_plus.title)
def match_line(self,line):
for entry in pp_kbddriver_plus.in_names:
if entry[pp_kbddriver_plus.MODE] == 'any-line':
# print 'match any line',line
if self.event_callback is not None:
self.event_callback(entry[pp_kbddriver_plus.NAME],pp_kbddriver_plus.title)
if entry[pp_kbddriver_plus.MODE] == 'specific-line' and line == entry[pp_kbddriver_plus.MATCH]:
# print 'match specific line', line
if self.event_callback is not None:
self.event_callback(entry[pp_kbddriver_plus.NAME],pp_kbddriver_plus.title)
# allow track plugins (or anything else) to access analog input values
def get_input(self,key):
if key in pp_kbddriver_plus.inputs:
return True, pp_kbddriver_plus.inputs[key]
else:
return False, None
# allow querying of driver state
def is_active(self):
return pp_kbddriver_plus.driver_active
# called by main program only. Called when PP is closed down
def terminate(self):
pp_kbddriver_plus.driver_active = False
# ************************************************
# output interface method
# this can be called from many objects so needs to operate on class variables
# ************************************************
# execute an output event
def handle_output_event(self,name,param_type,param_values,req_time):
return 'normal','no output methods'
# ***********************************
# reading .cfg file
# ************************************
def _read(self,filename,filepath):
if os.path.exists(filepath):
self.config = configparser.ConfigParser(inline_comment_prefixes = (';',))
self.config.read(filepath)
return 'normal',filename+' read'
else:
return 'error',filename+' not found at: '+filepath
if __name__ == '__main__':
from tkinter import *
def key_callback(symbol,source):
print('callback',symbol,source,'\n')
if symbol=='pp-stop':
idd.terminate()
exit()
pass
root = Tk()
w = Label(root, text="pp_kbddriver_plus.py test harness")
w.pack()
idd=pp_kbddriver_plus()
reason,message=idd.init('pp_kbddriver_plus.cfg','/home/pi/pipresents/pp_io_config/keys_plus.cfg',root,key_callback)
print(reason,message)
if reason != 'error':
idd.start()
root.mainloop()
| [
"pp_displaymanager.DisplayManager",
"os.path.exists",
"configparser.ConfigParser",
"copy.deepcopy"
] | [((884, 900), 'pp_displaymanager.DisplayManager', 'DisplayManager', ([], {}), '()\n', (898, 900), False, 'from pp_displaymanager import DisplayManager\n'), ((9565, 9589), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (9579, 9589), False, 'import os\n'), ((2132, 2173), 'copy.deepcopy', 'copy.deepcopy', (['pp_kbddriver_plus.TEMPLATE'], {}), '(pp_kbddriver_plus.TEMPLATE)\n', (2145, 2173), False, 'import copy\n'), ((9617, 9674), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {'inline_comment_prefixes': "(';',)"}), "(inline_comment_prefixes=(';',))\n", (9642, 9674), False, 'import configparser\n'), ((2783, 2803), 'copy.deepcopy', 'copy.deepcopy', (['entry'], {}), '(entry)\n', (2796, 2803), False, 'import copy\n')] |
#!/usr/bin/env python
# Copyright 1996-2019 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate textures prepared for OSM, based on image templates."""
import glob
import os
from PIL import Image
# change directory to this script directory in order to allow this script to be called from another directory.
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# get all the template files in put them in a list of tuples
templates = []
for f in glob.glob("*_diffuse_template.jpg"):
templates.append((f, f.replace('_diffuse_', '_color_mask_')))
# target colors
# ref: http://wiki.openstreetmap.org/wiki/Key:colour
# TODO: is it sufficient?
colors = {
'000000': (0.0, 0.0, 0.0),
'FFFFFF': (0.84, 0.84, 0.84),
'808080': (0.4, 0.4, 0.4),
'C0C0C0': (0.65, 0.65, 0.65),
'800000': (0.4, 0.15, 0.15),
'FF0000': (0.45, 0.0, 0.0),
'808000': (0.4, 0.4, 0.2),
'FFFF00': (0.7, 0.6, 0.15),
'008000': (0.15, 0.3, 0.15),
'00FF00': (0.55, 0.69, 0.52),
'008080': (0.15, 0.3, 0.3),
'00FFFF': (0.6, 0.7, 0.7),
'000080': (0.2, 0.2, 0.3),
'0000FF': (0.4, 0.4, 0.75),
'800080': (0.5, 0.4, 0.5),
'FF00FF': (0.9, 0.75, 0.85),
'F5DEB3': (0.83, 0.78, 0.65),
'8B4513': (0.3, 0.1, 0.05)
}
effectFactor = 0.5 # power of the effect, found empirically
# foreach template
for template in templates:
# load the templates
diffuse = Image.open(template[0])
mask = Image.open(template[1])
assert diffuse.size == mask.size
width, height = diffuse.size
# create an image per color
for colorString, color in colors.iteritems():
image = Image.new('RGB', diffuse.size)
pixels = image.load()
for x in range(height):
for y in range(width):
dR, dG, dB = diffuse.getpixel((x, y))
mR, mG, mB = mask.getpixel((x, y))
r = dR + int(255.0 * (mR / 255.0) * (color[0] * 2.0 - 1.0) * effectFactor)
g = dG + int(255.0 * (mG / 255.0) * (color[1] * 2.0 - 1.0) * effectFactor)
b = dB + int(255.0 * (mB / 255.0) * (color[2] * 2.0 - 1.0) * effectFactor)
pixels[x, y] = (r, g, b)
image.save(template[0].replace('_diffuse_template', '_' + colorString))
| [
"os.path.realpath",
"PIL.Image.new",
"PIL.Image.open",
"glob.glob"
] | [((975, 1010), 'glob.glob', 'glob.glob', (['"""*_diffuse_template.jpg"""'], {}), "('*_diffuse_template.jpg')\n", (984, 1010), False, 'import glob\n'), ((1915, 1938), 'PIL.Image.open', 'Image.open', (['template[0]'], {}), '(template[0])\n', (1925, 1938), False, 'from PIL import Image\n'), ((1950, 1973), 'PIL.Image.open', 'Image.open', (['template[1]'], {}), '(template[1])\n', (1960, 1973), False, 'from PIL import Image\n'), ((860, 886), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (876, 886), False, 'import os\n'), ((2143, 2173), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'diffuse.size'], {}), "('RGB', diffuse.size)\n", (2152, 2173), False, 'from PIL import Image\n')] |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Optional
import pangu.core.backend as B
from pangu.core.backend import Tensor, tensor
from torchmetrics.functional.retrieval.fall_out import retrieval_fall_out
from torchmetrics.retrieval.retrieval_metric import RetrievalMetric
from torchmetrics.utilities.data import get_group_indexes
class RetrievalFallOut(RetrievalMetric):
"""Computes `Fall-out`_.
Works with binary target data. Accepts float predictions from a model output.
Forward accepts:
- ``preds`` (float tensor): ``(N, ...)``
- ``target`` (long or bool tensor): ``(N, ...)``
- ``indexes`` (long tensor): ``(N, ...)``
``indexes``, ``preds`` and ``target`` must have the same dimension.
``indexes`` indicate to which query a prediction belongs.
Predictions will be first grouped by ``indexes`` and then `Fall-out` will be computed as the mean
of the `Fall-out` over each query.
Args:
empty_target_action:
Specify what to do with queries that do not have at least a negative ``target``. Choose from:
- ``'neg'``: those queries count as ``0.0`` (default)
- ``'pos'``: those queries count as ``1.0``
- ``'skip'``: skip those queries; if all queries are skipped, ``0.0`` is returned
- ``'error'``: raise a ``ValueError``
k: consider only the top k elements for each query (default: None, which considers them all)
compute_on_step:
Forward only calls ``update()`` and return None if this is set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step. default: False
process_group:
Specify the process group on which synchronization is called. default: None (which selects
the entire world)
dist_sync_fn:
Callback that performs the allgather operation on the metric state. When `None`, DDP
will be used to perform the allgather. default: None
Raises:
ValueError:
If ``k`` parameter is not `None` or an integer larger than 0
Example:
>>> from torchmetrics import RetrievalFallOut
>>> indexes = tensor([0, 0, 0, 1, 1, 1, 1])
>>> preds = tensor([0.2, 0.3, 0.5, 0.1, 0.3, 0.5, 0.2])
>>> target = tensor([False, False, True, False, True, False, True])
>>> fo = RetrievalFallOut(k=2)
>>> fo(preds, target, indexes=indexes)
tensor(0.5000)
"""
higher_is_better = False
def __init__(
self,
empty_target_action: str = "pos",
k: int = None,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Callable = None,
) -> None:
super().__init__(
empty_target_action=empty_target_action,
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
dist_sync_fn=dist_sync_fn,
)
if (k is not None) and not (isinstance(k, int) and k > 0):
raise ValueError("`k` has to be a positive integer or None")
self.k = k
def compute(self) -> Tensor:
"""First concat state `indexes`, `preds` and `target` since they were stored as lists.
After that, compute list of groups that will help in keeping together predictions about the same query. Finally,
for each group compute the `_metric` if the number of negative targets is at least 1, otherwise behave as
specified by `self.empty_target_action`.
"""
indexes = B.cat(self.indexes, dim=0)
preds = B.cat(self.preds, dim=0)
target = B.cat(self.target, dim=0)
res = []
groups = get_group_indexes(indexes)
for group in groups:
mini_preds = preds[group]
mini_target = target[group]
if not (1 - mini_target).sum():
if self.empty_target_action == "error":
raise ValueError("`compute` method was provided with a query with no negative target.")
if self.empty_target_action == "pos":
res.append(tensor(1.0))
elif self.empty_target_action == "neg":
res.append(tensor(0.0))
else:
# ensure list containt only float tensors
res.append(self._metric(mini_preds, mini_target))
return B.stack([x.to(preds) for x in res]).mean() if res else tensor(0.0).to(preds)
def _metric(self, preds: Tensor, target: Tensor) -> Tensor:
return retrieval_fall_out(preds, target, k=self.k)
| [
"torchmetrics.utilities.data.get_group_indexes",
"pangu.core.backend.cat",
"torchmetrics.functional.retrieval.fall_out.retrieval_fall_out",
"pangu.core.backend.tensor"
] | [((4312, 4338), 'pangu.core.backend.cat', 'B.cat', (['self.indexes'], {'dim': '(0)'}), '(self.indexes, dim=0)\n', (4317, 4338), True, 'import pangu.core.backend as B\n'), ((4355, 4379), 'pangu.core.backend.cat', 'B.cat', (['self.preds'], {'dim': '(0)'}), '(self.preds, dim=0)\n', (4360, 4379), True, 'import pangu.core.backend as B\n'), ((4397, 4422), 'pangu.core.backend.cat', 'B.cat', (['self.target'], {'dim': '(0)'}), '(self.target, dim=0)\n', (4402, 4422), True, 'import pangu.core.backend as B\n'), ((4458, 4484), 'torchmetrics.utilities.data.get_group_indexes', 'get_group_indexes', (['indexes'], {}), '(indexes)\n', (4475, 4484), False, 'from torchmetrics.utilities.data import get_group_indexes\n'), ((5315, 5358), 'torchmetrics.functional.retrieval.fall_out.retrieval_fall_out', 'retrieval_fall_out', (['preds', 'target'], {'k': 'self.k'}), '(preds, target, k=self.k)\n', (5333, 5358), False, 'from torchmetrics.functional.retrieval.fall_out import retrieval_fall_out\n'), ((5213, 5224), 'pangu.core.backend.tensor', 'tensor', (['(0.0)'], {}), '(0.0)\n', (5219, 5224), False, 'from pangu.core.backend import Tensor, tensor\n'), ((4887, 4898), 'pangu.core.backend.tensor', 'tensor', (['(1.0)'], {}), '(1.0)\n', (4893, 4898), False, 'from pangu.core.backend import Tensor, tensor\n'), ((4987, 4998), 'pangu.core.backend.tensor', 'tensor', (['(0.0)'], {}), '(0.0)\n', (4993, 4998), False, 'from pangu.core.backend import Tensor, tensor\n')] |
from arcapix.fs.gpfs.policy import PlacementPolicy
from arcapix.fs.gpfs.rule import MigrateRule
# load placement policy for mmfs1
policy = PlacementPolicy('mmfs1')
# create a new migrate rule for 'sata1'
r = MigrateRule(source='sata1', threshold=(90, 50))
# add rule to start of the policy
policy.rules.insert(r, 0)
# save changes
policy.save()
| [
"arcapix.fs.gpfs.policy.PlacementPolicy",
"arcapix.fs.gpfs.rule.MigrateRule"
] | [((140, 164), 'arcapix.fs.gpfs.policy.PlacementPolicy', 'PlacementPolicy', (['"""mmfs1"""'], {}), "('mmfs1')\n", (155, 164), False, 'from arcapix.fs.gpfs.policy import PlacementPolicy\n'), ((210, 257), 'arcapix.fs.gpfs.rule.MigrateRule', 'MigrateRule', ([], {'source': '"""sata1"""', 'threshold': '(90, 50)'}), "(source='sata1', threshold=(90, 50))\n", (221, 257), False, 'from arcapix.fs.gpfs.rule import MigrateRule\n')] |
from flask import Blueprint, request, render_template, \
flash, g, session, redirect, url_for, jsonify
from app import db, requires_auth
from flask_cors import CORS
from .models import Paste
import uuid
from datetime import datetime
from app.user.models import User
from pygments import highlight
from pygments.lexers import get_lexer_by_name, guess_lexer
from pygments.formatters import HtmlFormatter
from functools import wraps
from datetime import datetime
from dateutil import parser
def requires_admin(f):
@wraps(f)
def decorated(*args, **kwargs):
if 'user_id' not in session:
return jsonify(message="Unauthorized", success=False), 401
user_id = session['user_id']
user = User.query.filter(User.id == user_id).first()
if(user.user_type != 2):
return jsonify(message="Unauthorized", success=False), 401
return f(*args, **kwargs)
return decorated
mod_paste = Blueprint('paste', __name__)
CORS(mod_paste)
def is_active(paste):
return parser.parse(paste.expire_time) > datetime.now()
@mod_paste.route('/create_paste', methods=['GET'])
@requires_auth
def create_form():
curr_id = session['user_id']
user = User.query.filter(User.id == curr_id).first()
return render_template('user.html', username=user.username)
@mod_paste.route('/create_paste', methods=['POST'])
def create_paste():
title = request.form['title']
text = request.form['text']
paste_type = request.form['type']
if 'user_id' in session:
user_id = session['user_id']
else:
user = User.query.filter(User.username == 'Guest').first()
user_id = user.id
lang = request.form['lang']
time_form = request.form['time']
expire_time = str(time_form)
add_time = str(datetime.now())
url = str(uuid.uuid4())
report_count = 0
try:
paste = Paste(title, text, lang, add_time,
expire_time, user_id, url, report_count, paste_type)
user = User.query.filter(User.id == user_id).first()
x = user.paste_count
user.paste_count = x + 1
db.session.add(paste)
db.session.commit()
# jsonify(success=True, paste=paste.to_dict())
return jsonify({'url': url}), 200
except:
return jsonify({'error': 'Error while creating Paste, Please check if all fields are filled'}), 400
@mod_paste.route('/paste', methods=['GET'])
@requires_auth
def get_all_pastes():
# user_id = session['user_id']
# pastes = paste.query.filter(paste.user_id == user_id).all()
if 'user_id' in session:
curr_id = session['user_id']
user = User.query.filter(curr_id == User.id).first()
if user.user_type == 2:
return render_template('admin_mypaste.html')
return render_template("mypaste.html")
else:
return jsonify({'error': 'Please Login to Continue'}), 400
# return jsonify(success=True, pastes=[paste.to_dict() for paste in
# pastes])
@mod_paste.route('/api/paste', methods=['POST'])
@requires_auth
def get_all_pastes_object():
user_id = session['user_id']
user = User.query.filter(user_id == User.id).first()
pastes = Paste.query.filter(Paste.user_id == user_id).all()
active = []
for paste in pastes:
if is_active(paste):
active.append(paste.to_dict())
else:
userid_to_red = paste.user_id
user_to_red = User.query.filter(userid_to_red == User.id)
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return jsonify({'paste_list': active, 'username': user.username}), 200
@mod_paste.route('/<url>/embed', methods=['GET'])
def embed_code_form(url):
paste = Paste.query.filter(Paste.url == url).first()
if is_active(paste):
return render_template('embed.html', paste_text=paste.text, paste_link="http://127.0.0.1:8080/" + url)
else:
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return render_template("index.html"), 404
# @mod_paste.route('/<url>/embed', methods=['POST'])
# def embed_code(url):
# paste = Paste.query.filter(Paste.url == url).first()
# return jsonify(paste_text = paste.text,paste_link = url)
@mod_paste.route('/<url>/embed/output', methods=['GET'])
def embed_code_disp(url):
paste = Paste.query.filter(Paste.url == url).first()
if is_active(paste):
return render_template('embed_output.html')
else:
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return render_template("index.html"), 404
# @mod_paste.route('/paste', methods=['GET'])
# @requires_auth
# def get_all_pastes():
# # user_id = session['user_id']
# # pastes = paste.query.filter(paste.user_id == user_id).all()
# curr_id = session['user_id']
# user = User.query.filter(User.id == curr_id).first()
# paste_list = Paste.query.filter(curr_id == Paste.user_id).all()
# url_pre = "/"
# for paste in paste_list:
# paste.url = url_pre + paste.url
# if user.user_type == 1:
# return render_template('mypaste.html', paste_list=paste_list)
# return render_template('admin_mypaste.html',paste_list = paste_list)
# # return jsonify(success=True, pastes=[paste.to_dict() for paste in
# # pastes])
#
#
# @mod_paste.route('/api/paste', methods=['POST'])
# @requires_auth
# def get_all_pastes_object():
# user_id = session['user_id']
# user = User.query.filter(user_id == User.id).first()
# pastes = Paste.query.filter(Paste.user_id == user_id).all()
# active = []
# for paste in pastes:
# temp_paste = {}
# if paste.is_active():
# temp_paste['title'] = paste.title
# temp_paste['add_time']=paste.add_time
# temp_paste['expire_time']=paste.expire_time
# temp_paste['lang']=paste.lang
# temp_paste['url']=paste.url
# active.append(temp_paste)
#
# return jsonify({'paste_list':active,'username':user.username}),200
# @mod_paste.route('/paste/<id>', methods=['GET'])
# @requires_auth
# def get_paste(id):
# user_id = session['user_id']
# paste = paste.query.filter(
# Paste.id == id, Paste.user_id == user_id).first()
# if paste is None:
# return render_template("index.html"),4044
# else:
# return jsonify(success=True, paste=paste.to_dict())
# @mod_paste.route('/paste/<id>', methods=['POST'])
# @requires_auth
# def edit_paste(id):
# user_id = session['user_id']
# paste = Paste.query.filter(
# Paste.id == id, Paste.user_id == user_id).first()
# if paste is None:
# return render_template("index.html"),4044
# else:
# paste.title = request.form['title']
# paste.text = request.form['text']
# paste.color = request.form['color']
# paste.lang = request.form['lang']
# db.session.commit()
# return jsonify(success=True)
@mod_paste.route('/<url>/delete', methods=['POST'])
@requires_auth
def delete_paste(url):
user_id = session['user_id']
# print(user_id)
paste = Paste.query.filter(Paste.url == url).first()
user = User.query.filter(User.id == user_id).first()
if paste is None:
return render_template("index.html"), 404
if is_active(paste):
if paste.user_id == user_id or user.user_type == 2:
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return jsonify(success=True, user_type=user.user_type), 200
else:
return jsonify(success=False), 400
else:
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return render_template("index.html"), 404
# @mod_paste.route('/<url>', methods=['GET'])
# def display_paste(url):
# paste = Paste.query.filter(Paste.url == url).first()
# style = HtmlFormatter().get_style_defs('.highlight')
# lexer = get_lexer_by_name(paste.lang)
# formatter = HtmlFormatter(linenos=True, cssclass="highlight")
# result = highlight(paste.text, lexer, formatter)
# return render_template("view_paste.html", paste_title=paste.title,
# paste_lang=paste.lang, highlight_style=style,
@mod_paste.route('/<url>', methods=['GET'])
# paste_text=result,paste_rawdata = paste.text)
def display_paste(url):
paste = Paste.query.filter(Paste.url == url).first()
if Paste.query.filter(Paste.url == url).first() != None:
if is_active(paste):
if 'user_id' in session:
if(paste.paste_type == "1" and session['user_id'] != paste.user_id):
return render_template("index.html"), 200
user_id = session['user_id']
user = User.query.filter(User.id == user_id).first()
if user.user_type == 1:
return render_template('view_paste.html')
if user.user_type == 2:
return render_template('view_paste_admin.html')
return render_template("view_paste_guest.html")
else:
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return render_template("index.html"), 404
else:
return render_template("index.html"), 404
@mod_paste.route('/api/<url>', methods=['POST'])
def ret_paste(url):
paste = Paste.query.filter(Paste.url == url).first()
user = User.query.filter(paste.user_id == User.id).first()
if is_active(paste):
return jsonify({'paste_owner': user.username, 'paste_text': paste.text, 'paste_title': paste.title, 'paste_lang': paste.lang, 'paste_add': paste.add_time, 'paste_expire': paste.expire_time}), 200
else:
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return render_template("index.html"), 404
# @mod_paste.route('/<url>/add_report', methods=['POST'])
# @requires_auth
# def to_delete(url):
# paste_to_delete = Paste.query.filter(Paste.url == url).first()
# if paste_to_delete.report_count > 5:
# db.session.delete(paste_to_delete)
# else:
# paste_to_delete.report_count = paste_to_delete.report_count + 1
# db.session.commit()
# curr_id = session['user_id']
# paste_list = Paste.query.filter(Paste.user_id == curr_id).all()
# url_pre = "/"
# for paste in paste_list:
# paste.url = url_pre + paste.url
# return render_template('mypaste.html', paste_list=paste_list)
@mod_paste.route('/<url>/edit', methods=['GET'])
@requires_auth
def edit_form(url):
if 'user_id' in session:
user_id = session['user_id']
paste = Paste.query.filter(Paste.url == url).first()
if is_active(paste):
if paste.user_id == user_id:
return render_template('editpaste.html')
return jsonify(success=False, reply="Not Authorized"), 400
else:
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return render_template("index.html"), 404
return jsonify(success=False, reply="Please Login"), 400
@mod_paste.route('/<url>/edit', methods=['POST'])
@requires_auth
def edit_paste(url):
if 'user_id' in session:
user_id = session['user_id']
paste = Paste.query.filter(Paste.url == url).first()
if not is_active(paste):
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return render_template('index.html'), 404
if paste.user_id != user_id:
return jsonify(success=False, reply="Not Authorized"), 400
title = request.form['title']
text = request.form['text']
lang = request.form['lang']
time_form = request.form['time']
paste_type = request.form['type']
expire_time = str(time_form)
paste.title = title
paste.text = text
paste.lang = lang
paste.expire_time = expire_time
paste.paste_type = paste_type
db.session.commit()
return jsonify(success=True, url=url)
return jsonify(success=False, reply="Please Login")
@mod_paste.route('/admin/pastes', methods=['GET'])
@requires_admin
def all_pastes():
paste_list = db.session.all()
url_pre = "/"
for paste in paste_list:
if is_active(paste):
paste.url = url_pre + paste.url
else:
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return render_template('allpaste.html', paste_list=paste_list)
@mod_paste.route('/<username>/paste', methods=['GET'])
@requires_admin
def get_user_pastes(username):
# user_id = session['user_id']
# pastes = paste.query.filter(paste.user_id == user_id).all()
if 'user_id' in session:
return render_template('user_paste.html')
else:
return jsonify({'error': 'Please Login to Continue'}), 400
# return jsonify(success=True, pastes=[paste.to_dict() for paste in
# pastes])
@mod_paste.route('/<username>/api/paste', methods=['POST'])
#@requires_admin
def get_user_pastes_object(username):
# admin_id = session['user_id']
# admin = User.query.filter(admin_id == User.id).first()
user = User.query.filter(User.username == username).first()
pastes = Paste.query.filter(Paste.user_id == user.id).all()
active = []
for paste in pastes:
if is_active(paste):
active.append(paste.to_dict())
else:
userid_to_red = paste.user_id
user_to_red = User.query.filter(User.id == userid_to_red).first()
user_to_red.paste_count = user_to_red.paste_count - 1
db.session.delete(paste)
db.session.commit()
return jsonify({'paste_list': active, 'username': user.username}), 200
| [
"flask.render_template",
"dateutil.parser.parse",
"app.db.session.commit",
"app.db.session.delete",
"flask_cors.CORS",
"functools.wraps",
"uuid.uuid4",
"datetime.datetime.now",
"app.db.session.all",
"app.db.session.add",
"flask.Blueprint",
"app.user.models.User.query.filter",
"flask.jsonify"
] | [((885, 913), 'flask.Blueprint', 'Blueprint', (['"""paste"""', '__name__'], {}), "('paste', __name__)\n", (894, 913), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((914, 929), 'flask_cors.CORS', 'CORS', (['mod_paste'], {}), '(mod_paste)\n', (918, 929), False, 'from flask_cors import CORS\n'), ((516, 524), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (521, 524), False, 'from functools import wraps\n'), ((1190, 1242), 'flask.render_template', 'render_template', (['"""user.html"""'], {'username': 'user.username'}), "('user.html', username=user.username)\n", (1205, 1242), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((12287, 12331), 'flask.jsonify', 'jsonify', ([], {'success': '(False)', 'reply': '"""Please Login"""'}), "(success=False, reply='Please Login')\n", (12294, 12331), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((12433, 12449), 'app.db.session.all', 'db.session.all', ([], {}), '()\n', (12447, 12449), False, 'from app import db, requires_auth\n'), ((12775, 12830), 'flask.render_template', 'render_template', (['"""allpaste.html"""'], {'paste_list': 'paste_list'}), "('allpaste.html', paste_list=paste_list)\n", (12790, 12830), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((962, 993), 'dateutil.parser.parse', 'parser.parse', (['paste.expire_time'], {}), '(paste.expire_time)\n', (974, 993), False, 'from dateutil import parser\n'), ((996, 1010), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1008, 1010), False, 'from datetime import datetime\n'), ((1666, 1680), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1678, 1680), False, 'from datetime import datetime\n'), ((1693, 1705), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1703, 1705), False, 'import uuid\n'), ((1943, 1964), 'app.db.session.add', 'db.session.add', (['paste'], {}), '(paste)\n', (1957, 1964), False, 'from app import db, requires_auth\n'), ((1967, 1986), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1984, 1986), False, 'from app import db, requires_auth\n'), ((2556, 2587), 'flask.render_template', 'render_template', (['"""mypaste.html"""'], {}), "('mypaste.html')\n", (2571, 2587), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((3289, 3347), 'flask.jsonify', 'jsonify', (["{'paste_list': active, 'username': user.username}"], {}), "({'paste_list': active, 'username': user.username})\n", (3296, 3347), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((3516, 3616), 'flask.render_template', 'render_template', (['"""embed.html"""'], {'paste_text': 'paste.text', 'paste_link': "('http://127.0.0.1:8080/' + url)"}), "('embed.html', paste_text=paste.text, paste_link=\n 'http://127.0.0.1:8080/' + url)\n", (3531, 3616), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((3777, 3801), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (3794, 3801), False, 'from app import db, requires_auth\n'), ((3804, 3823), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3821, 3823), False, 'from app import db, requires_auth\n'), ((4231, 4267), 'flask.render_template', 'render_template', (['"""embed_output.html"""'], {}), "('embed_output.html')\n", (4246, 4267), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((4433, 4457), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (4450, 4457), False, 'from app import db, requires_auth\n'), ((4460, 4479), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (4477, 4479), False, 'from app import db, requires_auth\n'), ((7767, 7791), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (7784, 7791), False, 'from app import db, requires_auth\n'), ((7794, 7813), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (7811, 7813), False, 'from app import db, requires_auth\n'), ((9914, 9938), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (9931, 9938), False, 'from app import db, requires_auth\n'), ((9941, 9960), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (9958, 9960), False, 'from app import db, requires_auth\n'), ((11269, 11313), 'flask.jsonify', 'jsonify', ([], {'success': '(False)', 'reply': '"""Please Login"""'}), "(success=False, reply='Please Login')\n", (11276, 11313), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((12219, 12238), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (12236, 12238), False, 'from app import db, requires_auth\n'), ((12248, 12278), 'flask.jsonify', 'jsonify', ([], {'success': '(True)', 'url': 'url'}), '(success=True, url=url)\n', (12255, 12278), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((13065, 13099), 'flask.render_template', 'render_template', (['"""user_paste.html"""'], {}), "('user_paste.html')\n", (13080, 13099), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((13897, 13955), 'flask.jsonify', 'jsonify', (["{'paste_list': active, 'username': user.username}"], {}), "({'paste_list': active, 'username': user.username})\n", (13904, 13955), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((1136, 1173), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == curr_id)'], {}), '(User.id == curr_id)\n', (1153, 1173), False, 'from app.user.models import User\n'), ((2045, 2066), 'flask.jsonify', 'jsonify', (["{'url': url}"], {}), "({'url': url})\n", (2052, 2066), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((2509, 2546), 'flask.render_template', 'render_template', (['"""admin_mypaste.html"""'], {}), "('admin_mypaste.html')\n", (2524, 2546), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((2604, 2650), 'flask.jsonify', 'jsonify', (["{'error': 'Please Login to Continue'}"], {}), "({'error': 'Please Login to Continue'})\n", (2611, 2650), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((2870, 2907), 'app.user.models.User.query.filter', 'User.query.filter', (['(user_id == User.id)'], {}), '(user_id == User.id)\n', (2887, 2907), False, 'from app.user.models import User\n'), ((3128, 3171), 'app.user.models.User.query.filter', 'User.query.filter', (['(userid_to_red == User.id)'], {}), '(userid_to_red == User.id)\n', (3145, 3171), False, 'from app.user.models import User\n'), ((3232, 3256), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (3249, 3256), False, 'from app import db, requires_auth\n'), ((3260, 3279), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3277, 3279), False, 'from app import db, requires_auth\n'), ((3833, 3862), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (3848, 3862), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((4489, 4518), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (4504, 4518), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((7098, 7135), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == user_id)'], {}), '(User.id == user_id)\n', (7115, 7135), False, 'from app.user.models import User\n'), ((7172, 7201), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (7187, 7201), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((7445, 7469), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (7462, 7469), False, 'from app import db, requires_auth\n'), ((7473, 7492), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (7490, 7492), False, 'from app import db, requires_auth\n'), ((7823, 7852), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (7838, 7852), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((8989, 9029), 'flask.render_template', 'render_template', (['"""view_paste_guest.html"""'], {}), "('view_paste_guest.html')\n", (9004, 9029), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((9200, 9224), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (9217, 9224), False, 'from app import db, requires_auth\n'), ((9228, 9247), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (9245, 9247), False, 'from app import db, requires_auth\n'), ((9309, 9338), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (9324, 9338), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((9477, 9520), 'app.user.models.User.query.filter', 'User.query.filter', (['(paste.user_id == User.id)'], {}), '(paste.user_id == User.id)\n', (9494, 9520), False, 'from app.user.models import User\n'), ((9560, 9751), 'flask.jsonify', 'jsonify', (["{'paste_owner': user.username, 'paste_text': paste.text, 'paste_title':\n paste.title, 'paste_lang': paste.lang, 'paste_add': paste.add_time,\n 'paste_expire': paste.expire_time}"], {}), "({'paste_owner': user.username, 'paste_text': paste.text,\n 'paste_title': paste.title, 'paste_lang': paste.lang, 'paste_add':\n paste.add_time, 'paste_expire': paste.expire_time})\n", (9567, 9751), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((9970, 9999), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (9985, 9999), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((11168, 11192), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (11185, 11192), False, 'from app import db, requires_auth\n'), ((11196, 11215), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (11213, 11215), False, 'from app import db, requires_auth\n'), ((11708, 11732), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (11725, 11732), False, 'from app import db, requires_auth\n'), ((11736, 11755), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (11753, 11755), False, 'from app import db, requires_auth\n'), ((12719, 12743), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (12736, 12743), False, 'from app import db, requires_auth\n'), ((12747, 12766), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (12764, 12766), False, 'from app import db, requires_auth\n'), ((13116, 13162), 'flask.jsonify', 'jsonify', (["{'error': 'Please Login to Continue'}"], {}), "({'error': 'Please Login to Continue'})\n", (13123, 13162), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((13465, 13509), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.username == username)'], {}), '(User.username == username)\n', (13482, 13509), False, 'from app.user.models import User\n'), ((13841, 13865), 'app.db.session.delete', 'db.session.delete', (['paste'], {}), '(paste)\n', (13858, 13865), False, 'from app import db, requires_auth\n'), ((13869, 13888), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (13886, 13888), False, 'from app import db, requires_auth\n'), ((599, 645), 'flask.jsonify', 'jsonify', ([], {'message': '"""Unauthorized"""', 'success': '(False)'}), "(message='Unauthorized', success=False)\n", (606, 645), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((691, 728), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == user_id)'], {}), '(User.id == user_id)\n', (708, 728), False, 'from app.user.models import User\n'), ((774, 820), 'flask.jsonify', 'jsonify', ([], {'message': '"""Unauthorized"""', 'success': '(False)'}), "(message='Unauthorized', success=False)\n", (781, 820), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((1485, 1528), 'app.user.models.User.query.filter', 'User.query.filter', (["(User.username == 'Guest')"], {}), "(User.username == 'Guest')\n", (1502, 1528), False, 'from app.user.models import User\n'), ((1845, 1882), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == user_id)'], {}), '(User.id == user_id)\n', (1862, 1882), False, 'from app.user.models import User\n'), ((2090, 2181), 'flask.jsonify', 'jsonify', (["{'error': 'Error while creating Paste, Please check if all fields are filled'}"], {}), "({'error':\n 'Error while creating Paste, Please check if all fields are filled'})\n", (2097, 2181), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((2427, 2464), 'app.user.models.User.query.filter', 'User.query.filter', (['(curr_id == User.id)'], {}), '(curr_id == User.id)\n', (2444, 2464), False, 'from app.user.models import User\n'), ((3667, 3710), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (3684, 3710), False, 'from app.user.models import User\n'), ((4323, 4366), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (4340, 4366), False, 'from app.user.models import User\n'), ((7503, 7550), 'flask.jsonify', 'jsonify', ([], {'success': '(True)', 'user_type': 'user.user_type'}), '(success=True, user_type=user.user_type)\n', (7510, 7550), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((7574, 7596), 'flask.jsonify', 'jsonify', ([], {'success': '(False)'}), '(success=False)\n', (7581, 7596), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((7657, 7700), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (7674, 7700), False, 'from app.user.models import User\n'), ((9258, 9287), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (9273, 9287), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((9804, 9847), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (9821, 9847), False, 'from app.user.models import User\n'), ((10902, 10935), 'flask.render_template', 'render_template', (['"""editpaste.html"""'], {}), "('editpaste.html')\n", (10917, 10935), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((10946, 10992), 'flask.jsonify', 'jsonify', ([], {'success': '(False)', 'reply': '"""Not Authorized"""'}), "(success=False, reply='Not Authorized')\n", (10953, 10992), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((11226, 11255), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (11241, 11255), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((11766, 11795), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (11781, 11795), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((11842, 11888), 'flask.jsonify', 'jsonify', ([], {'success': '(False)', 'reply': '"""Not Authorized"""'}), "(success=False, reply='Not Authorized')\n", (11849, 11888), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((7333, 7376), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (7350, 7376), False, 'from app.user.models import User\n'), ((8863, 8897), 'flask.render_template', 'render_template', (['"""view_paste.html"""'], {}), "('view_paste.html')\n", (8878, 8897), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((8938, 8978), 'flask.render_template', 'render_template', (['"""view_paste_admin.html"""'], {}), "('view_paste_admin.html')\n", (8953, 8978), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((9088, 9131), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (9105, 9131), False, 'from app.user.models import User\n'), ((11056, 11099), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (11073, 11099), False, 'from app.user.models import User\n'), ((11596, 11639), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (11613, 11639), False, 'from app.user.models import User\n'), ((12607, 12650), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (12624, 12650), False, 'from app.user.models import User\n'), ((13729, 13772), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == userid_to_red)'], {}), '(User.id == userid_to_red)\n', (13746, 13772), False, 'from app.user.models import User\n'), ((8698, 8727), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (8713, 8727), False, 'from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for, jsonify\n'), ((8777, 8814), 'app.user.models.User.query.filter', 'User.query.filter', (['(User.id == user_id)'], {}), '(User.id == user_id)\n', (8794, 8814), False, 'from app.user.models import User\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script run neural network model on a camera live stream
"""
import argparse
import cv2
import numpy as np
import os
import time
import sys
COMMANDS = {0: "move_forward", 1: "go_down", 2: "rot_10_deg",
3: "go_up", 4: "take_off", 5: "land", 6: "idle"}
def send_command(anafi, command_id):
"""
Function to send commands to an Anafi drone in function of the command id
"""
if command_id not in COMMANDS:
raise f"Command id not in COMMANDS choices: {command_id}"
print("The following command will be sent: ", COMMANDS[command_id])
if COMMANDS[command_id] == "move_forward":
anafi.move_relative(dx=1, dy=0, dz=0, dradians=0)
if COMMANDS[command_id] == "go_down":
anafi.move_relative(dx=0, dy=0, dz=-0.5, dradians=0)
if COMMANDS[command_id] == "rot_10_deg":
anafi.move_relative(dx=0, dy=0, dz=0, dradians=0.785)
if COMMANDS[command_id] == "go_up":
anafi.move_relative(dx=0, dy=0, dz=0.5, dradians=0)
if COMMANDS[command_id] == "take_off":
anafi.safe_takeoff(5)
if COMMANDS[command_id] == "land":
anafi.safe_land(5)
return
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-p",
"--weight_path",
required=True,
type=str,
help="Path to load weights for the model."
)
parser.add_argument(
"-a",
"--pyparrot_path",
required=True,
type=str,
help="Path to pyparrot module downloaded from amymcgovern on github."
)
parser.add_argument(
"-w",
"--img_width",
required=False,
default=28,
type=int,
help="Image width."
)
parser.add_argument(
"-n",
"--num_classes",
required=False,
default=7,
type=int,
help="Number of classes."
)
parser.add_argument(
"-c",
"--crop",
required=False,
default=None,
type=str,
help="Crop image, format: MinWidth,MaxWidth,MinHeight,MaxHeight.\
Set -1 for the unchanged ones"
)
parser.add_argument(
"-r",
"--resize",
required=False,
default=None,
type=str,
help="Resize shape, format: height,width"
)
parser.add_argument(
"-b",
"--binarize",
required=False,
default=None,
type=str,
help="To binarize images, format for thresholding: min,max"
)
parser.add_argument(
"-g",
"--gray",
required=False,
action="store_true",
help="To save 1-channel images"
)
parser.add_argument(
"-e",
"--erode",
required=False,
default=None,
type=str,
help="Erode option, format: kernel_size,iteration"
)
parser.add_argument(
"-d",
"--dilate",
required=False,
default=None,
type=str,
help="Dilate option, format: kernel_size,iteration"
)
parser.add_argument(
"-m",
"--camid",
required=False,
default=0,
type=int,
help="Camera ID, default is 0"
)
parser.add_argument(
"-t",
"--tensorflow",
required=False,
action="store_true",
help="To specify if Tensorflow model is used."
)
parser.add_argument(
"-z",
"--number_of_confimation",
required=False,
default=3,
type=int,
help="Minimum number of identical commands before sending to drone."
)
args = parser.parse_args()
"""
Drone connection
"""
sys.path.append(args.pyparrot_path)
from pyparrot.Anafi import Anafi
print("Connecting to drone...")
anafi = Anafi(drone_type="Anafi", ip_address="192.168.42.1")
success = anafi.connect(10)
print(success)
print("Sleeping few seconds...")
anafi.smart_sleep(3)
"""
Load model
"""
print("Loading model...")
input_size = args.img_width**2
num_class = args.num_classes
hidden_size = 128
if args.tensorflow:
import tensorflow as tf
model = tf.keras.models.load_model(args.weight_path)
else:
script_path = os.path.realpath(__file__)
sys.path.append(os.path.dirname(script_path) + "/../")
from homemade_framework import framework as NN
model = NN.Sequential([NN.Linear(input_size, hidden_size),
NN.LeakyReLU(), NN.BatchNorm(),
NN.Linear(hidden_size, hidden_size),
NN.LeakyReLU(), NN.BatchNorm(),
NN.Linear(hidden_size, num_class),
NN.Softmax()], NN.LossMSE())
model.load(args.weight_path)
"""
Webcam process
"""
print("Start webcam...")
cam = cv2.VideoCapture(args.camid)
ret, frame = cam.read()
min_height, max_height = 0, frame.shape[0]
min_width, max_width = 0, frame.shape[1]
print("Cam resolution: {}x{}".format(max_width, max_height))
if args.crop is not None:
res = [int(x) for x in args.crop.split(',')]
if res[0] != -1:
min_width = res[0]
if res[1] != -1:
max_width = res[1]
if res[2] != -1:
min_height = res[2]
if res[3] != -1:
max_height = res[3]
print("Image cropped to minWidth:maxWidth, minHeight:maxHeight: {}:{}\
, {},{}".format(min_width, max_width, min_height, max_height))
pause = False
imgs = []
while True:
ret, frame = cam.read()
if not ret:
print("failed to grab frame")
break
if args.crop is not None:
frame = frame[min_height:max_height, min_width:max_width]
cv2.imshow("Original image", frame)
k = cv2.waitKey(1)
if k % 256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k % 256 == ord('p'):
# p pressed
if pause:
pause = False
else:
pause = True
if not pause:
if args.gray:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if args.binarize:
frame = cv2.medianBlur(frame, 5)
min_thresh, max_thresh = [int(x) for x in
args.binarize.split(',')]
ret, frame = cv2.threshold(frame, min_thresh, max_thresh,
cv2.THRESH_BINARY)
if args.erode is not None:
k_size, iteration = [int(x) for x in args.erode.split(',')]
kernel = np.ones((k_size, k_size), np.uint8)
frame = cv2.erode(frame, kernel, iterations=int(iteration))
if args.dilate is not None:
k_size, iteration = [int(x) for x in args.dilate.split(',')]
kernel = np.ones((k_size, k_size), np.uint8)
frame = cv2.dilate(frame, kernel, iterations=int(iteration))
if args.resize:
height, width = [int(size) for size in args.resize.split(',')]
frame = cv2.resize(frame, (height, width),
interpolation=cv2.INTER_AREA)
image = np.asarray(frame)/255.
cv2.imshow("Input image for the model", frame)
image = image.reshape([np.prod(image.shape)])
if len(imgs) < args.number_of_confimation:
imgs.append(image)
else:
if args.tensorflow:
results = np.argmax(model(np.asarray(imgs)), axis=1)
else:
results = NN.get_inferences(model, np.asarray(imgs))
print("Model's output on buffer: ", results)
if np.unique(results).size == 1 and\
COMMANDS[results[0]] != "idle":
send_command(anafi, results[0])
imgs = []
imgs = imgs[1:]
imgs.append(image)
time.sleep(0.3)
cam.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| [
"numpy.prod",
"time.sleep",
"cv2.imshow",
"homemade_framework.framework.LossMSE",
"homemade_framework.framework.Softmax",
"cv2.destroyAllWindows",
"tensorflow.keras.models.load_model",
"sys.path.append",
"homemade_framework.framework.LeakyReLU",
"argparse.ArgumentParser",
"cv2.threshold",
"numpy.asarray",
"cv2.medianBlur",
"homemade_framework.framework.Linear",
"cv2.waitKey",
"numpy.ones",
"pyparrot.Anafi.Anafi",
"os.path.dirname",
"cv2.cvtColor",
"cv2.resize",
"numpy.unique",
"homemade_framework.framework.BatchNorm",
"os.path.realpath",
"cv2.VideoCapture"
] | [((1221, 1246), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1244, 1246), False, 'import argparse\n'), ((3712, 3747), 'sys.path.append', 'sys.path.append', (['args.pyparrot_path'], {}), '(args.pyparrot_path)\n', (3727, 3747), False, 'import sys\n'), ((3833, 3885), 'pyparrot.Anafi.Anafi', 'Anafi', ([], {'drone_type': '"""Anafi"""', 'ip_address': '"""192.168.42.1"""'}), "(drone_type='Anafi', ip_address='192.168.42.1')\n", (3838, 3885), False, 'from pyparrot.Anafi import Anafi\n'), ((4945, 4973), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.camid'], {}), '(args.camid)\n', (4961, 4973), False, 'import cv2\n'), ((8269, 8292), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (8290, 8292), False, 'import cv2\n'), ((4224, 4268), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['args.weight_path'], {}), '(args.weight_path)\n', (4250, 4268), True, 'import tensorflow as tf\n'), ((4301, 4327), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (4317, 4327), False, 'import os\n'), ((5898, 5933), 'cv2.imshow', 'cv2.imshow', (['"""Original image"""', 'frame'], {}), "('Original image', frame)\n", (5908, 5933), False, 'import cv2\n'), ((5947, 5961), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5958, 5961), False, 'import cv2\n'), ((4819, 4831), 'homemade_framework.framework.LossMSE', 'NN.LossMSE', ([], {}), '()\n', (4829, 4831), True, 'from homemade_framework import framework as NN\n'), ((7482, 7528), 'cv2.imshow', 'cv2.imshow', (['"""Input image for the model"""', 'frame'], {}), "('Input image for the model', frame)\n", (7492, 7528), False, 'import cv2\n'), ((8230, 8245), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (8240, 8245), False, 'import time\n'), ((4352, 4380), 'os.path.dirname', 'os.path.dirname', (['script_path'], {}), '(script_path)\n', (4367, 4380), False, 'import os\n'), ((4477, 4511), 'homemade_framework.framework.Linear', 'NN.Linear', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (4486, 4511), True, 'from homemade_framework import framework as NN\n'), ((4544, 4558), 'homemade_framework.framework.LeakyReLU', 'NN.LeakyReLU', ([], {}), '()\n', (4556, 4558), True, 'from homemade_framework import framework as NN\n'), ((4560, 4574), 'homemade_framework.framework.BatchNorm', 'NN.BatchNorm', ([], {}), '()\n', (4572, 4574), True, 'from homemade_framework import framework as NN\n'), ((4607, 4642), 'homemade_framework.framework.Linear', 'NN.Linear', (['hidden_size', 'hidden_size'], {}), '(hidden_size, hidden_size)\n', (4616, 4642), True, 'from homemade_framework import framework as NN\n'), ((4675, 4689), 'homemade_framework.framework.LeakyReLU', 'NN.LeakyReLU', ([], {}), '()\n', (4687, 4689), True, 'from homemade_framework import framework as NN\n'), ((4691, 4705), 'homemade_framework.framework.BatchNorm', 'NN.BatchNorm', ([], {}), '()\n', (4703, 4705), True, 'from homemade_framework import framework as NN\n'), ((4738, 4771), 'homemade_framework.framework.Linear', 'NN.Linear', (['hidden_size', 'num_class'], {}), '(hidden_size, num_class)\n', (4747, 4771), True, 'from homemade_framework import framework as NN\n'), ((4804, 4816), 'homemade_framework.framework.Softmax', 'NN.Softmax', ([], {}), '()\n', (4814, 4816), True, 'from homemade_framework import framework as NN\n'), ((6306, 6345), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (6318, 6345), False, 'import cv2\n'), ((6400, 6424), 'cv2.medianBlur', 'cv2.medianBlur', (['frame', '(5)'], {}), '(frame, 5)\n', (6414, 6424), False, 'import cv2\n'), ((6580, 6643), 'cv2.threshold', 'cv2.threshold', (['frame', 'min_thresh', 'max_thresh', 'cv2.THRESH_BINARY'], {}), '(frame, min_thresh, max_thresh, cv2.THRESH_BINARY)\n', (6593, 6643), False, 'import cv2\n'), ((6827, 6862), 'numpy.ones', 'np.ones', (['(k_size, k_size)', 'np.uint8'], {}), '((k_size, k_size), np.uint8)\n', (6834, 6862), True, 'import numpy as np\n'), ((7081, 7116), 'numpy.ones', 'np.ones', (['(k_size, k_size)', 'np.uint8'], {}), '((k_size, k_size), np.uint8)\n', (7088, 7116), True, 'import numpy as np\n'), ((7326, 7390), 'cv2.resize', 'cv2.resize', (['frame', '(height, width)'], {'interpolation': 'cv2.INTER_AREA'}), '(frame, (height, width), interpolation=cv2.INTER_AREA)\n', (7336, 7390), False, 'import cv2\n'), ((7447, 7464), 'numpy.asarray', 'np.asarray', (['frame'], {}), '(frame)\n', (7457, 7464), True, 'import numpy as np\n'), ((7564, 7584), 'numpy.prod', 'np.prod', (['image.shape'], {}), '(image.shape)\n', (7571, 7584), True, 'import numpy as np\n'), ((7881, 7897), 'numpy.asarray', 'np.asarray', (['imgs'], {}), '(imgs)\n', (7891, 7897), True, 'import numpy as np\n'), ((7777, 7793), 'numpy.asarray', 'np.asarray', (['imgs'], {}), '(imgs)\n', (7787, 7793), True, 'import numpy as np\n'), ((7979, 7997), 'numpy.unique', 'np.unique', (['results'], {}), '(results)\n', (7988, 7997), True, 'import numpy as np\n')] |
from allennlp.common import JsonDict
from allennlp.data import DatasetReader, Instance
from allennlp.models import Model
from allennlp.predictors import Predictor
from overrides import overrides
@Predictor.register("sentence_classifier")
class SentenceClassifierPredictor(Predictor):
def predict(self, sentence: str) -> JsonDict:
return self.predict_json({"sentence": sentence})
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
sentence = json_dict["sentence"]
return self._dataset_reader.text_to_instance(sentence)
| [
"allennlp.predictors.Predictor.register"
] | [((198, 239), 'allennlp.predictors.Predictor.register', 'Predictor.register', (['"""sentence_classifier"""'], {}), "('sentence_classifier')\n", (216, 239), False, 'from allennlp.predictors import Predictor\n')] |
# Created on Sep 7, 2020
# author: <NAME>
# contact: <EMAIL>
import os
output_dir = os.path.curdir
def skinnytk2(R=1):
"""
This function generates the relations of Skinny-n-n for R rounds.
tk ================================================> TWEAKEY_P(tk) ===> ---
SB AC | P MC SB AC |
x_0 ===> x_0 ===> x_0 ===> + ===> y_0 ===> P(y_0) ===> x_1 ===> x_1 ===> x_1 ===> + ===> y_1 ===> ---
"""
cipher_name = 'skinnytk2'
P = [0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12]
TKP = [9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7]
tk1 = ['tk1_%d' % i for i in range(16)]
tk2 = ['tk2_%d' % i for i in range(16)]
# 1 round
# recommended_mg = 8
# recommended_ms = 4
# 2 rounds
# recommended_mg = 16
# recommended_ms = 8
# 3 rounds
# recommended_mg = 19
# recommended_ms = 24
# 4 rounds
# recommended_mg = 21
# recommended_ms = 27
# 5 rounds
# recommended_mg = 22
# recommended_ms = 35
# 6 rounds
# recommended_mg = 25
# recommended_ms = 40
# 7 rounds
# recommended_mg = 26
# recommended_ms = 70
# 8 rounds
# recommended_mg = 28
# recommended_ms = 80
# 9 rounds
# recommended_mg = 28
# recommended_ms = 100
# 10 rounds
recommended_mg = 30
recommended_ms = 100
# 11 rounds
# recommended_mg = 31
# recommended_ms = 100
eqs = '#%s %d Rounds\n' % (cipher_name, R)
eqs += 'connection relations\n'
for r in range(R):
xin = ['x_%d_%d' % (r, i) for i in range(16)]
xout = ['x_%d_%d' % (r + 1, i) for i in range(16)]
y = ['y_%d_%d' % (r, i) for i in range(16)]
tk = ['tk_%d_%d' % (r, i) for i in range(8)]
# Generaete AddTweakey relations
for i in range(4):
for j in range(4):
if i < 2:
eqs += '%s, %s, %s\n' % (tk1[j + 4*i], tk2[j + 4*i], tk[j + 4*i])
eqs += '%s, %s, %s\n' % (xin[j + 4*i], tk[j + 4*i], y[j + 4*i])
else:
eqs += '%s, %s\n' % (xin[j + 4*i], y[j + 4*i])
# Apply ShiftRows
py = [y[P[i]] for i in range(16)]
# Generate MixColumn relations
for j in range(4):
eqs += '%s, %s, %s, %s\n' % (py[j + 0*4], py[j + 2*4], py[j + 3*4], xout[j + 0*4])
eqs += '%s, %s\n' % (py[j], xout[j + 1*4])
eqs += '%s, %s, %s\n' % (py[j + 1*4], py[j + 2*4], xout[j + 2*4])
eqs += '%s, %s, %s\n' % (py[j + 0*4], py[j + 2*4], xout[j + 3*4])
# Update Tweakey
temp1 = tk1.copy()
temp2 = tk2.copy()
tk1 = [temp1[TKP[i]] for i in range(16)]
tk2 = [temp2[TKP[i]] for i in range(16)]
plaintext = ['x_0_%d' % i for i in range(16)]
ciphertext = ['x_%d_%d' % (R, i) for i in range(16)]
eqs += 'known\n' + '\n'.join(plaintext + ciphertext)
eqs += '\nend'
relation_file_path = os.path.join(output_dir, 'relationfile_%s_%dr_mg%d_ms%d.txt' % (cipher_name, R, recommended_mg, recommended_ms))
with open(relation_file_path, 'w') as relation_file:
relation_file.write(eqs)
def main():
skinnytk2(R=10)
if __name__ == '__main__':
main()
| [
"os.path.join"
] | [((3054, 3170), 'os.path.join', 'os.path.join', (['output_dir', "('relationfile_%s_%dr_mg%d_ms%d.txt' % (cipher_name, R, recommended_mg,\n recommended_ms))"], {}), "(output_dir, 'relationfile_%s_%dr_mg%d_ms%d.txt' % (cipher_name,\n R, recommended_mg, recommended_ms))\n", (3066, 3170), False, 'import os\n')] |
import urllib.request
import json
from .models import News
# Getting api key
api_key = None
# Getting the movie base url
base_url = None
def configure_request(app):
global api_key,base_url
api_key = app.config['NEWS_API_KEY']
base_url = app.config['NEWS_API_BASE_URL']
def get_news_source(country,category):
'''
Function that gets the json response to our url request
'''
get_news_source_url = base_url.format(country,category,api_key)
with urllib.request.urlopen(get_news_source_url)as url:
get_news_source_data = url.read()
get_news_source_response = json.loads(get_news_source_data)
print(get_news_source_response)
source_result = None
if get_news_source_response['articles']:
source_result_list = get_news_source_response['articles']
source_result = process_result(source_result_list)
return source_result
def process_result(source_list):
'''
this function processes the results and converts them into a list
the source list is a list of dictionaries containing news results
'''
source_result= []
for source_item in source_list:
source = source_item.get('source')
author = source_item.get('author')
title = source_item.get('title')
description = source_item.get('description')
url = source_item.get('url')
urlToImage = source_item.get('urlToImage')
publishedAt = source_item.get('publishedAt')
if urlToImage:
source_object = News(source,author,title,description,url,urlToImage,publishedAt)
source_result.append(source_object)
return source_result
def get_news(source):
get_news_details_url = base_url.format(source,api_key)
with urllib.request.urlopen(get_news_details_url) as url:
news_details_data = url.read()
news_details_response = json.loads(news_details_data)
news_object = None
if news_details_response:
source = news_details_response.get('source')
author = news_details_response.get('original_author')
title = news_details_response.get('title')
description = news_details_response.get('description')
url = news_details_response.get('url')
urlToImage = news_details_response.get('urlToImage')
news_object = news(source,author,title,description,url,urlToImage,publishedAt)
return news_object
| [
"json.loads"
] | [((604, 636), 'json.loads', 'json.loads', (['get_news_source_data'], {}), '(get_news_source_data)\n', (614, 636), False, 'import json\n'), ((1880, 1909), 'json.loads', 'json.loads', (['news_details_data'], {}), '(news_details_data)\n', (1890, 1909), False, 'import json\n')] |
import numpy as np
from hypernet.src.general import const
from hypernet.src.general import utils
from hypernet.src.thermophysicalModels.reactionThermo.mixture import Basic
class MultiComponent(Basic):
# Initialization
###########################################################################
def __init__(
self,
specieThermos,
*args,
**kwargs
):
super(MultiComponent, self).__init__(specieThermos)
# Methods
###########################################################################
# Mixture properties ------------------------------------------------------
def update(self, XY, var='Y'):
# Update mass/molar fractions
for name, value in XY.items():
value = utils.check_XY(utils.convert_to_array(value))
setattr(self.spTh[name].specie, var, value)
# Update mixture/species properties
self.M = self.M_(var=var)
if var == 'Y':
self.Xi_()
elif var == 'X':
self.Yi_()
self.R = self.R_()
# Mixture properties ------------------------------------------------------
# Mass
def M_(self, var='Y'):
# [kg/mol]
if var == 'Y':
M = [spTh.specie.Y / spTh.specie.M for spTh in self.spTh.values()]
return 1./np.sum(np.concatenate(M))
elif var == 'X':
M = [spTh.specie.X * spTh.specie.M for spTh in self.spTh.values()]
return np.sum(np.concatenate(M))
# Specific gas constant
def R_(self):
R = [spTh.specie.Y * spTh.specie.R for spTh in self.spTh.values()]
return np.sum(np.concatenate(R))
# Pressure
def p_(self, rho, T):
return rho*self.R*T
# Density
def rho_(self, p, T):
return p/(self.R*T)
# Number density
def n_(self, rho):
self.ni_(rho=rho, var='Y')
n = [spTh.specie.n for spTh in self.spTh.values()]
return np.sum(np.concatenate(n))
# Enthalpy/Energy
def he_(self):
# [J/kg]
he = [spTh.specie.Y * spTh.thermo.he for spTh in self.spTh.values()]
return np.sum(np.concatenate(he))
def cpv_(self):
# [J/(kg K)]
cpv = [spTh.specie.Y * spTh.thermo.cpv for spTh in self.spTh.values()]
return np.sum(np.concatenate(cpv))
def dcpvdT_(self):
# [J/kg]
dcpvdT = [
spTh.specie.Y * spTh.thermo.dcpvdT for spTh in self.spTh.values()
]
return np.sum(np.concatenate(dcpvdT))
def dhedY_(self, dY):
# [J/kg]
dhedY = [
np.sum(dY[name] * spTh.thermo.he) \
for name, spTh in self.spTh.items()
]
return np.sum(dhedY)
# Species properties ------------------------------------------------------
def Yi_(self):
for spTh_ in self.spTh.values():
sp = spTh_.specie
sp.Y = sp.X * sp.M / self.M
def Xi_(self):
for spTh_ in self.spTh.values():
sp = spTh_.specie
sp.X = sp.Y * self.M / sp.M
def ni_(self, rho=None, n=None, var='Y'):
for spTh_ in self.spTh.values():
sp = spTh_.specie
if var == 'Y':
sp.n = sp.Y * rho / sp.M * const.UNA
elif var == 'X':
sp.n = sp.X * n
def rhoi_(self, rho=None, n=None, var='Y'):
for spTh_ in self.spTh.values():
sp = spTh_.specie
if var == 'Y':
sp.rho = sp.Y * rho
elif var == 'X':
sp.rho = sp.X * n * sp.M / const.UNA
| [
"hypernet.src.general.utils.convert_to_array",
"numpy.sum",
"numpy.concatenate"
] | [((2710, 2723), 'numpy.sum', 'np.sum', (['dhedY'], {}), '(dhedY)\n', (2716, 2723), True, 'import numpy as np\n'), ((1649, 1666), 'numpy.concatenate', 'np.concatenate', (['R'], {}), '(R)\n', (1663, 1666), True, 'import numpy as np\n'), ((1968, 1985), 'numpy.concatenate', 'np.concatenate', (['n'], {}), '(n)\n', (1982, 1985), True, 'import numpy as np\n'), ((2145, 2163), 'numpy.concatenate', 'np.concatenate', (['he'], {}), '(he)\n', (2159, 2163), True, 'import numpy as np\n'), ((2308, 2327), 'numpy.concatenate', 'np.concatenate', (['cpv'], {}), '(cpv)\n', (2322, 2327), True, 'import numpy as np\n'), ((2499, 2521), 'numpy.concatenate', 'np.concatenate', (['dcpvdT'], {}), '(dcpvdT)\n', (2513, 2521), True, 'import numpy as np\n'), ((2597, 2630), 'numpy.sum', 'np.sum', (['(dY[name] * spTh.thermo.he)'], {}), '(dY[name] * spTh.thermo.he)\n', (2603, 2630), True, 'import numpy as np\n'), ((782, 811), 'hypernet.src.general.utils.convert_to_array', 'utils.convert_to_array', (['value'], {}), '(value)\n', (804, 811), False, 'from hypernet.src.general import utils\n'), ((1337, 1354), 'numpy.concatenate', 'np.concatenate', (['M'], {}), '(M)\n', (1351, 1354), True, 'import numpy as np\n'), ((1486, 1503), 'numpy.concatenate', 'np.concatenate', (['M'], {}), '(M)\n', (1500, 1503), True, 'import numpy as np\n')] |
from __future__ import print_function
from scipy.linalg import block_diag
from scipy.stats import norm as ndist
from scipy.interpolate import interp1d
import collections
import numpy as np
from numpy import log
from numpy.linalg import norm, qr, inv, eig
import pandas as pd
import regreg.api as rr
from .randomization import randomization
from ..base import restricted_estimator
from ..algorithms.barrier_affine import solve_barrier_affine_py as solver
from ..distributions.discrete_family import discrete_family
class group_lasso(object):
def __init__(self,
loglike,
groups,
weights,
ridge_term,
randomizer,
use_lasso=True, # should lasso solver be used where applicable - defaults to True
perturb=None):
_check_groups(groups) # make sure groups looks sensible
# log likelihood : quadratic loss
self.loglike = loglike
self.nfeature = self.loglike.shape[0]
# ridge parameter
self.ridge_term = ridge_term
# group lasso penalty (from regreg)
# use regular lasso penalty if all groups are size 1
if use_lasso and groups.size == np.unique(groups).size:
# need to provide weights an an np.array rather than a dictionary
weights_np = np.array([w[1] for w in sorted(weights.items())])
self.penalty = rr.weighted_l1norm(weights=weights_np,
lagrange=1.)
else:
self.penalty = rr.group_lasso(groups,
weights=weights,
lagrange=1.)
# store groups as a class variable since the non-group lasso doesn't
self.groups = groups
self._initial_omega = perturb
# gaussian randomization
self.randomizer = randomizer
def fit(self,
solve_args={'tol': 1.e-12, 'min_its': 50},
perturb=None):
# solve the randomized version of group lasso
(self.initial_soln,
self.initial_subgrad) = self._solve_randomized_problem(perturb=perturb,
solve_args=solve_args)
# initialize variables
active_groups = [] # active group labels
active_dirs = {} # dictionary: keys are group labels, values are unit-norm coefficients
unpenalized = [] # selected groups with no penalty
overall = np.ones(self.nfeature, np.bool) # mask of active features
ordered_groups = [] # active group labels sorted by label
ordered_opt = [] # gamma's ordered by group labels
ordered_vars = [] # indices "ordered" by sorting group labels
tol = 1.e-20
_, self.randomizer_prec = self.randomizer.cov_prec
# now we are collecting the directions and norms of the active groups
for g in sorted(np.unique(self.groups)): # g is group label
group_mask = self.groups == g
soln = self.initial_soln # do not need to keep setting this
if norm(soln[group_mask]) > tol * norm(soln): # is group g appreciably nonzero
ordered_groups.append(g)
# variables in active group
ordered_vars.extend(np.flatnonzero(group_mask))
if self.penalty.weights[g] == 0:
unpenalized.append(g)
else:
active_groups.append(g)
active_dirs[g] = soln[group_mask] / norm(soln[group_mask])
ordered_opt.append(norm(soln[group_mask]))
else:
overall[group_mask] = False
self.selection_variable = {'directions': active_dirs,
'active_groups': active_groups} # kind of redundant with keys of active_dirs
self._ordered_groups = ordered_groups
# exception if no groups are selected
if len(self.selection_variable['active_groups']) == 0:
return np.sign(soln), soln
# otherwise continue as before
self.observed_opt_state = np.hstack(ordered_opt) # gammas as array
_beta_unpenalized = restricted_estimator(self.loglike, # refit OLS on E
overall,
solve_args=solve_args)
beta_bar = np.zeros(self.nfeature)
beta_bar[overall] = _beta_unpenalized # refit OLS beta with zeros
self._beta_full = beta_bar
X, y = self.loglike.data
W = self._W = self.loglike.saturated_loss.hessian(X.dot(beta_bar)) # all 1's for LS
opt_linearNoU = np.dot(X.T, X[:, ordered_vars] * W[:, np.newaxis])
for i, var in enumerate(ordered_vars):
opt_linearNoU[var, i] += self.ridge_term
opt_offset = self.initial_subgrad
self.observed_score_state = -opt_linearNoU.dot(_beta_unpenalized)
self.observed_score_state[~overall] += self.loglike.smooth_objective(beta_bar, 'grad')[~overall]
active_signs = np.sign(self.initial_soln)
active = np.flatnonzero(active_signs)
self.active = active
def compute_Vg(ug):
pg = ug.size # figure out size of g'th group
if pg > 1:
Z = np.column_stack((ug, np.eye(pg, pg - 1)))
Q, _ = qr(Z)
Vg = Q[:, 1:] # drop the first column
else:
Vg = np.zeros((1, 0)) # if the group is size one, the orthogonal complement is empty
return Vg
def compute_Lg(g):
pg = active_dirs[g].size
Lg = self.penalty.weights[g] * np.eye(pg)
return Lg
sorted_active_dirs = collections.OrderedDict(sorted(active_dirs.items()))
Vs = [compute_Vg(ug) for ug in sorted_active_dirs.values()]
V = block_diag(*Vs) # unpack the list
Ls = [compute_Lg(g) for g in sorted_active_dirs]
L = block_diag(*Ls) # unpack the list
XE = X[:, ordered_vars] # changed to ordered_vars
Q = XE.T.dot(self._W[:, None] * XE)
QI = inv(Q)
C = V.T.dot(QI).dot(L).dot(V)
self.XE = XE
self.Q = Q
self.QI = QI
self.C = C
U = block_diag(*[ug for ug in sorted_active_dirs.values()]).T
self.opt_linear = opt_linearNoU.dot(U)
self.active_dirs = active_dirs
self.opt_offset = opt_offset
self.ordered_vars = ordered_vars
self.linear_part = -np.eye(self.observed_opt_state.shape[0])
self.offset = np.zeros(self.observed_opt_state.shape[0])
return active_signs, soln
def _solve_randomized_problem(self,
perturb=None,
solve_args={'tol': 1.e-15, 'min_its': 100}):
# take a new perturbation if supplied
if perturb is not None:
self._initial_omega = perturb
if self._initial_omega is None:
self._initial_omega = self.randomizer.sample()
quad = rr.identity_quadratic(self.ridge_term,
0,
-self._initial_omega,
0)
problem = rr.simple_problem(self.loglike, self.penalty)
# if all groups are size 1, set up lasso penalty and run usual lasso solver... (see existing code)...
initial_soln = problem.solve(quad, **solve_args)
initial_subgrad = -(self.loglike.smooth_objective(initial_soln,
'grad') +
quad.objective(initial_soln, 'grad'))
return initial_soln, initial_subgrad
@staticmethod
def gaussian(X,
Y,
groups,
weights,
sigma=1.,
quadratic=None,
ridge_term=0.,
perturb=None,
use_lasso=True, # should lasso solver be used when applicable - defaults to True
randomizer_scale=None):
loglike = rr.glm.gaussian(X, Y, coef=1. / sigma ** 2, quadratic=quadratic)
n, p = X.shape
mean_diag = np.mean((X ** 2).sum(0))
if ridge_term is None:
ridge_term = np.std(Y) * np.sqrt(mean_diag) / np.sqrt(n - 1)
if randomizer_scale is None:
randomizer_scale = np.sqrt(mean_diag) * 0.5 * np.std(Y) * np.sqrt(n / (n - 1.))
randomizer = randomization.isotropic_gaussian((p,), randomizer_scale)
return group_lasso(loglike,
groups,
weights,
ridge_term,
randomizer,
use_lasso,
perturb)
def _setup_implied_gaussian(self):
_, prec = self.randomizer.cov_prec
if np.asarray(prec).shape in [(), (0,)]:
cond_precision = self.opt_linear.T.dot(self.opt_linear) * prec
cond_cov = inv(cond_precision)
logdens_linear = cond_cov.dot(self.opt_linear.T) * prec
else:
cond_precision = self.opt_linear.T.dot(prec.dot(self.opt_linear))
cond_cov = inv(cond_precision)
logdens_linear = cond_cov.dot(self.opt_linear.T).dot(prec)
cond_mean = -logdens_linear.dot(self.observed_score_state + self.opt_offset)
self.cond_mean = cond_mean
self.cond_cov = cond_cov
self.cond_precision = cond_precision
self.logdens_linear = logdens_linear
return cond_mean, cond_cov, cond_precision, logdens_linear
def selective_MLE(self,
solve_args={'tol': 1.e-12},
level=0.9,
useJacobian=True,
dispersion=None):
"""Do selective_MLE for group_lasso
Note: this masks the selective_MLE inherited from query
because that is not adapted for the group_lasso. Also, assumes
you have already run the fit method since this uses results
from that method.
Parameters
----------
observed_target: from selected_targets
target_cov: from selected_targets
target_cov_score: from selected_targets
init_soln: (opt_state) initial (observed) value of optimization variables
cond_mean: conditional mean of optimization variables (model on _setup_implied_gaussian)
cond_cov: conditional variance of optimization variables (model on _setup_implied_gaussian)
logdens_linear: (model on _setup_implied_gaussian)
linear_part: like A_scaling (from lasso)
offset: like b_scaling (from lasso)
solve_args: passed on to solver
level: level of confidence intervals
useC: whether to use python or C solver
JacobianPieces: (use self.C defined in fitting)
"""
self._setup_implied_gaussian() # Calculate useful quantities
(observed_target, target_cov, target_score_cov, alternatives) = self.selected_targets(dispersion)
init_soln = self.observed_opt_state # just the gammas
cond_mean = self.cond_mean
cond_cov = self.cond_cov
logdens_linear = self.logdens_linear
linear_part = self.linear_part
offset = self.offset
if np.asarray(observed_target).shape in [(), (0,)]:
raise ValueError('no target specified')
observed_target = np.atleast_1d(observed_target)
prec_target = inv(target_cov)
prec_opt = self.cond_precision
score_offset = self.observed_score_state + self.opt_offset
# target_lin determines how the conditional mean of optimization variables
# vary with target
# logdens_linear determines how the argument of the optimization density
# depends on the score, not how the mean depends on score, hence the minus sign
target_linear = target_score_cov.T.dot(prec_target)
target_offset = score_offset - target_linear.dot(observed_target)
target_lin = - logdens_linear.dot(target_linear)
target_off = cond_mean - target_lin.dot(observed_target)
if np.asarray(self.randomizer_prec).shape in [(), (0,)]:
_P = target_linear.T.dot(target_offset) * self.randomizer_prec
_prec = prec_target + (target_linear.T.dot(target_linear) * self.randomizer_prec) - target_lin.T.dot(
prec_opt).dot(
target_lin)
else:
_P = target_linear.T.dot(self.randomizer_prec).dot(target_offset)
_prec = prec_target + (target_linear.T.dot(self.randomizer_prec).dot(target_linear)) - target_lin.T.dot(
prec_opt).dot(target_lin)
C = target_cov.dot(_P - target_lin.T.dot(prec_opt).dot(target_off))
conjugate_arg = prec_opt.dot(cond_mean)
val, soln, hess = solve_barrier_affine_jacobian_py(conjugate_arg,
prec_opt,
init_soln,
linear_part,
offset,
self.C,
self.active_dirs,
useJacobian,
**solve_args)
final_estimator = target_cov.dot(_prec).dot(observed_target) \
+ target_cov.dot(target_lin.T.dot(prec_opt.dot(cond_mean - soln))) + C
unbiased_estimator = target_cov.dot(_prec).dot(observed_target) + target_cov.dot(
_P - target_lin.T.dot(prec_opt).dot(target_off))
L = target_lin.T.dot(prec_opt)
observed_info_natural = _prec + L.dot(target_lin) - L.dot(hess.dot(L.T))
observed_info_mean = target_cov.dot(observed_info_natural.dot(target_cov))
Z_scores = final_estimator / np.sqrt(np.diag(observed_info_mean))
pvalues = ndist.cdf(Z_scores)
pvalues = 2 * np.minimum(pvalues, 1 - pvalues)
alpha = 1 - level
quantile = ndist.ppf(1 - alpha / 2.)
intervals = np.vstack([final_estimator -
quantile * np.sqrt(np.diag(observed_info_mean)),
final_estimator +
quantile * np.sqrt(np.diag(observed_info_mean))]).T
log_ref = val + conjugate_arg.T.dot(cond_cov).dot(conjugate_arg) / 2.
result = pd.DataFrame({'MLE': final_estimator,
'SE': np.sqrt(np.diag(observed_info_mean)),
'Zvalue': Z_scores,
'pvalue': pvalues,
'lower_confidence': intervals[:, 0],
'upper_confidence': intervals[:, 1],
'unbiased': unbiased_estimator})
return result, observed_info_mean, log_ref
def selected_targets(self,
dispersion=None,
solve_args={'tol': 1.e-12, 'min_its': 50}):
X, y = self.loglike.data
n, p = X.shape
XE = self.XE
Q = self.Q
observed_target = restricted_estimator(self.loglike, self.ordered_vars, solve_args=solve_args)
_score_linear = -XE.T.dot(self._W[:, None] * X).T
alternatives = ['twosided'] * len(self.active)
if dispersion is None: # use Pearson's X^2
dispersion = ((y - self.loglike.saturated_loss.mean_function(
XE.dot(observed_target))) ** 2 / self._W).sum() / (n - XE.shape[1])
cov_target = self.QI * dispersion
crosscov_target_score = _score_linear.dot(self.QI).T * dispersion
return (observed_target,
cov_target,
crosscov_target_score,
alternatives)
class approximate_grid_inference(object):
def __init__(self,
query,
dispersion,
solve_args={'tol': 1.e-12},
useIP=True):
"""
Produce p-values and confidence intervals for targets
of model including selected features
Parameters
----------
query : `gaussian_query`
A Gaussian query which has information
to describe implied Gaussian.
observed_target : ndarray
Observed estimate of target.
target_cov : ndarray
Estimated covaraince of target.
target_score_cov : ndarray
Estimated covariance of target and score of randomized query.
solve_args : dict, optional
Arguments passed to solver.
"""
self.solve_args = solve_args
result, inverse_info = query.selective_MLE(dispersion=dispersion)[:2]
self.linear_part = query.linear_part
self.offset = query.offset
self.logdens_linear = query.logdens_linear
self.cond_mean = query.cond_mean
self.prec_opt = np.linalg.inv(query.cond_cov)
self.cond_cov = query.cond_cov
self.C = query.C
self.active_dirs = query.active_dirs
(observed_target, target_cov, target_score_cov, alternatives) = query.selected_targets(dispersion)
self.observed_target = observed_target
self.target_score_cov = target_score_cov
self.target_cov = target_cov
self.init_soln = query.observed_opt_state
self.randomizer_prec = query.randomizer_prec
self.score_offset = query.observed_score_state + query.opt_offset
self.ntarget = ntarget = target_cov.shape[0]
_scale = 4 * np.sqrt(np.diag(inverse_info))
if useIP == False:
ngrid = 1000
self.stat_grid = np.zeros((ntarget, ngrid))
for j in range(ntarget):
self.stat_grid[j, :] = np.linspace(observed_target[j] - 1.5 * _scale[j],
observed_target[j] + 1.5 * _scale[j],
num=ngrid)
else:
ngrid = 100
self.stat_grid = np.zeros((ntarget, ngrid))
for j in range(ntarget):
self.stat_grid[j, :] = np.linspace(observed_target[j] - 1.5 * _scale[j],
observed_target[j] + 1.5 * _scale[j],
num=ngrid)
self.opt_linear = query.opt_linear
self.useIP = useIP
def summary(self,
alternatives=None,
parameter=None,
level=0.9):
"""
Produce p-values and confidence intervals for targets
of model including selected features
Parameters
----------
alternatives : [str], optional
Sequence of strings describing the alternatives,
should be values of ['twosided', 'less', 'greater']
parameter : np.array
Hypothesized value for parameter -- defaults to 0.
level : float
Confidence level.
"""
if parameter is not None:
pivots = self._approx_pivots(parameter,
alternatives=alternatives)
else:
pivots = None
pvalues = self._approx_pivots(np.zeros_like(self.observed_target),
alternatives=alternatives)
lower, upper = self._approx_intervals(level=level)
result = pd.DataFrame({'target': self.observed_target,
'pvalue': pvalues,
'lower_confidence': lower,
'upper_confidence': upper})
if not np.all(parameter == 0):
result.insert(4, 'pivot', pivots)
result.insert(5, 'parameter', parameter)
return result
def log_reference(self,
observed_target,
target_cov,
target_score_cov,
grid):
"""
Approximate the log of the reference density on a grid.
"""
if np.asarray(observed_target).shape in [(), (0,)]:
raise ValueError('no target specified')
prec_target = np.linalg.inv(target_cov)
target_lin = - self.logdens_linear.dot(target_score_cov.T.dot(prec_target))
ref_hat = []
for k in range(grid.shape[0]):
# in the usual D = N + Gamma theta.hat,
# target_lin is "something" times Gamma,
# where "something" comes from implied Gaussian
# cond_mean is "something" times D
# Gamma is target_score_cov.T.dot(prec_target)
num_opt = self.prec_opt.shape[0]
num_con = self.linear_part.shape[0]
cond_mean_grid = (target_lin.dot(np.atleast_1d(grid[k] - observed_target)) +
self.cond_mean)
#direction for decomposing o
eta = -self.prec_opt.dot(self.logdens_linear.dot(target_score_cov.T))
implied_mean = np.asscalar(eta.T.dot(cond_mean_grid))
implied_cov = np.asscalar(eta.T.dot(self.cond_cov).dot(eta))
implied_prec = 1./implied_cov
_A = self.cond_cov.dot(eta) * implied_prec
R = np.identity(num_opt) - _A.dot(eta.T)
A = self.linear_part.dot(_A).reshape((-1,))
b = self.offset-self.linear_part.dot(R).dot(self.init_soln)
conjugate_arg = implied_mean * implied_prec
val, soln, _ = solver(np.asarray([conjugate_arg]),
np.reshape(implied_prec, (1,1)),
eta.T.dot(self.init_soln),
A.reshape((A.shape[0],1)),
b,
**self.solve_args)
gamma_ = _A.dot(soln) + R.dot(self.init_soln)
log_jacob = jacobian_grad_hess(gamma_, self.C, self.active_dirs)
ref_hat.append(-val - ((conjugate_arg ** 2) * implied_cov)/ 2. + log_jacob[0])
return np.asarray(ref_hat)
def _construct_families(self):
self._construct_density()
self._families = []
for m in range(self.ntarget):
p = self.target_score_cov.shape[1]
observed_target_uni = (self.observed_target[m]).reshape((1,))
target_cov_uni = (np.diag(self.target_cov)[m]).reshape((1, 1))
target_score_cov_uni = self.target_score_cov[m, :].reshape((1, p))
var_target = 1. / ((self.precs[m])[0, 0])
log_ref = self.log_reference(observed_target_uni,
target_cov_uni,
target_score_cov_uni,
self.stat_grid[m])
if self.useIP == False:
logW = (log_ref - 0.5 * (self.stat_grid[m] - self.observed_target[m]) ** 2 / var_target)
logW -= logW.max()
self._families.append(discrete_family(self.stat_grid[m],
np.exp(logW)))
else:
approx_fn = interp1d(self.stat_grid[m],
log_ref,
kind='quadratic',
bounds_error=False,
fill_value='extrapolate')
grid = np.linspace(self.stat_grid[m].min(), self.stat_grid[m].max(), 1000)
logW = (approx_fn(grid) -
0.5 * (grid - self.observed_target[m]) ** 2 / var_target)
logW -= logW.max()
self._families.append(discrete_family(grid,
np.exp(logW)))
def _approx_pivots(self,
mean_parameter,
alternatives=None):
if not hasattr(self, "_families"):
self._construct_families()
if alternatives is None:
alternatives = ['twosided'] * self.ntarget
pivot = []
for m in range(self.ntarget):
family = self._families[m]
var_target = 1. / ((self.precs[m])[0, 0])
mean = self.S[m].dot(mean_parameter[m].reshape((1,))) + self.r[m]
_cdf = family.cdf((mean[0] - self.observed_target[m]) / var_target, x=self.observed_target[m])
print("variable completed ", m)
if alternatives[m] == 'twosided':
pivot.append(2 * min(_cdf, 1 - _cdf))
elif alternatives[m] == 'greater':
pivot.append(1 - _cdf)
elif alternatives[m] == 'less':
pivot.append(_cdf)
else:
raise ValueError('alternative should be in ["twosided", "less", "greater"]')
return pivot
def _approx_intervals(self,
level=0.9):
if not hasattr(self, "_families"):
self._construct_families()
lower, upper = [], []
for m in range(self.ntarget):
# construction of intervals from families follows `selectinf.learning.core`
family = self._families[m]
observed_target = self.observed_target[m]
l, u = family.equal_tailed_interval(observed_target,
alpha=1 - level)
var_target = 1. / ((self.precs[m])[0, 0])
lower.append(l * var_target + observed_target)
upper.append(u * var_target + observed_target)
return np.asarray(lower), np.asarray(upper)
### Private method
def _construct_density(self):
precs = {}
S = {}
r = {}
p = self.target_score_cov.shape[1]
for m in range(self.ntarget):
observed_target_uni = (self.observed_target[m]).reshape((1,))
target_cov_uni = (np.diag(self.target_cov)[m]).reshape((1, 1))
prec_target = 1. / target_cov_uni
target_score_cov_uni = self.target_score_cov[m, :].reshape((1, p))
target_linear = target_score_cov_uni.T.dot(prec_target)
target_offset = (self.score_offset - target_linear.dot(observed_target_uni)).reshape(
(target_linear.shape[0],))
target_lin = -self.logdens_linear.dot(target_linear)
target_off = (self.cond_mean - target_lin.dot(observed_target_uni)).reshape((target_lin.shape[0],))
_prec = prec_target + (target_linear.T.dot(target_linear) * self.randomizer_prec) - target_lin.T.dot(
self.prec_opt).dot(target_lin)
_P = target_linear.T.dot(target_offset) * self.randomizer_prec
_r = (1. / _prec).dot(target_lin.T.dot(self.prec_opt).dot(target_off) - _P)
_S = np.linalg.inv(_prec).dot(prec_target)
S[m] = _S
r[m] = _r
precs[m] = _prec
self.precs = precs
self.S = S
self.r = r
def solve_barrier_affine_jacobian_py(conjugate_arg,
precision,
feasible_point,
con_linear,
con_offset,
C,
active_dirs,
useJacobian=True,
step=1,
nstep=2000,
min_its=500,
tol=1.e-12):
"""
This needs to be updated to actually use the Jacobian information (in self.C)
arguments
conjugate_arg: \\bar{\\Sigma}^{-1} \bar{\\mu}
precision: \\bar{\\Sigma}^{-1}
feasible_point: gamma's from fitting
con_linear: linear part of affine constraint used for barrier function
con_offset: offset part of affine constraint used for barrier function
C: V^T Q^{-1} \\Lambda V
active_dirs:
"""
scaling = np.sqrt(np.diag(con_linear.dot(precision).dot(con_linear.T)))
if feasible_point is None:
feasible_point = 1. / scaling
def objective(gs):
p1 = -gs.T.dot(conjugate_arg)
p2 = gs.T.dot(precision).dot(gs) / 2.
if useJacobian:
p3 = - jacobian_grad_hess(gs, C, active_dirs)[0]
else:
p3 = 0
p4 = log(1. + 1. / ((con_offset - con_linear.dot(gs)) / scaling)).sum()
return p1 + p2 + p3 + p4
def grad(gs):
p1 = -conjugate_arg + precision.dot(gs)
p2 = -con_linear.T.dot(1. / (scaling + con_offset - con_linear.dot(gs)))
if useJacobian:
p3 = - jacobian_grad_hess(gs, C, active_dirs)[1]
else:
p3 = 0
p4 = 1. / (con_offset - con_linear.dot(gs))
return p1 + p2 + p3 + p4
def barrier_hessian(gs): # contribution of barrier and jacobian to hessian
p1 = con_linear.T.dot(np.diag(-1. / ((scaling + con_offset - con_linear.dot(gs)) ** 2.)
+ 1. / ((con_offset - con_linear.dot(gs)) ** 2.))).dot(con_linear)
if useJacobian:
p2 = - jacobian_grad_hess(gs, C, active_dirs)[2]
else:
p2 = 0
return p1 + p2
current = feasible_point
current_value = np.inf
for itercount in range(nstep):
cur_grad = grad(current)
# make sure proposal is feasible
count = 0
while True:
count += 1
proposal = current - step * cur_grad
if np.all(con_offset - con_linear.dot(proposal) > 0):
break
step *= 0.5
if count >= 40:
raise ValueError('not finding a feasible point')
# make sure proposal is a descent
count = 0
while True:
count += 1
proposal = current - step * cur_grad
proposed_value = objective(proposal)
if proposed_value <= current_value:
break
step *= 0.5
if count >= 20:
if not (np.isnan(proposed_value) or np.isnan(current_value)):
break
else:
raise ValueError('value is NaN: %f, %f' % (proposed_value, current_value))
# stop if relative decrease is small
if np.fabs(current_value - proposed_value) < tol * np.fabs(current_value) and itercount >= min_its:
current = proposal
current_value = proposed_value
break
current = proposal
current_value = proposed_value
if itercount % 4 == 0:
step *= 2
hess = inv(precision + barrier_hessian(current))
return current_value, current, hess
# Jacobian calculations
def calc_GammaMinus(gamma, active_dirs):
"""Calculate Gamma^minus (as a function of gamma vector, active directions)
"""
to_diag = [[g] * (ug.size - 1) for (g, ug) in zip(gamma, active_dirs.values())]
return block_diag(*[i for gp in to_diag for i in gp])
def jacobian_grad_hess(gamma, C, active_dirs):
""" Calculate the log-Jacobian (scalar), gradient (gamma.size vector) and hessian (gamma.size square matrix)
"""
if C.shape == (0, 0): # when all groups are size one, C will be an empty array
return 0, 0, 0
else:
GammaMinus = calc_GammaMinus(gamma, active_dirs)
# eigendecomposition
#evalues, evectors = eig(GammaMinus + C)
# log Jacobian
#J = log(evalues).sum()
J = np.log(np.linalg.det(GammaMinus + C))
# inverse
#GpC_inv = evectors.dot(np.diag(1 / evalues).dot(evectors.T))
GpC_inv = np.linalg.inv(GammaMinus + C)
# summing matrix (gamma.size by C.shape[0])
S = block_diag(*[np.ones((1, ug.size - 1)) for ug in active_dirs.values()])
# gradient
grad_J = S.dot(GpC_inv.diagonal())
# hessian
hess_J = -S.dot(np.multiply(GpC_inv, GpC_inv.T).dot(S.T))
return J, grad_J, hess_J
def _check_groups(groups):
"""Make sure that the user-specific groups are ok
There are a number of assumptions that group_lasso makes about
how groups are specified. Specifically, we assume that
`groups` is a 1-d array_like of integers that are sorted in
increasing order, start at 0, and have no gaps (e.g., if there
is a group 2 and a group 4, there must also be at least one
feature in group 3).
This function checks the user-specified group scheme and
raises an exception if it finds any problems.
Sorting feature groups is potentially tedious for the user and
in future we might do this for them.
"""
# check array_like
agroups = np.array(groups)
# check dimension
if len(agroups.shape) != 1:
raise ValueError("Groups are not a 1D array_like")
# check sorted
if np.any(agroups[:-1] > agroups[1:]) < 0:
raise ValueError("Groups are not sorted")
# check integers
if not np.issubdtype(agroups.dtype, np.integer):
raise TypeError("Groups are not integers")
# check starts with 0
if not np.amin(agroups) == 0:
raise ValueError("First group is not 0")
# check for no skipped groups
if not np.all(np.diff(np.unique(agroups)) == 1):
raise ValueError("Some group is skipped")
| [
"regreg.api.simple_problem",
"numpy.sqrt",
"numpy.hstack",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.linalg.norm",
"scipy.stats.norm.cdf",
"numpy.multiply",
"numpy.linalg.qr",
"numpy.reshape",
"regreg.api.identity_quadratic",
"regreg.api.group_lasso",
"numpy.flatnonzero",
"numpy.asarray",
"regreg.api.weighted_l1norm",
"numpy.exp",
"numpy.issubdtype",
"numpy.dot",
"regreg.api.glm.gaussian",
"numpy.linspace",
"pandas.DataFrame",
"numpy.identity",
"numpy.eye",
"numpy.all",
"numpy.ones",
"numpy.amin",
"scipy.stats.norm.ppf",
"numpy.any",
"numpy.isnan",
"numpy.sign",
"numpy.std",
"numpy.atleast_1d",
"numpy.fabs",
"numpy.unique",
"numpy.minimum",
"numpy.linalg.det",
"numpy.diag",
"numpy.zeros",
"numpy.linalg.inv",
"scipy.linalg.block_diag",
"numpy.zeros_like"
] | [((31360, 31406), 'scipy.linalg.block_diag', 'block_diag', (['*[i for gp in to_diag for i in gp]'], {}), '(*[i for gp in to_diag for i in gp])\n', (31370, 31406), False, 'from scipy.linalg import block_diag\n'), ((33085, 33101), 'numpy.array', 'np.array', (['groups'], {}), '(groups)\n', (33093, 33101), True, 'import numpy as np\n'), ((2537, 2568), 'numpy.ones', 'np.ones', (['self.nfeature', 'np.bool'], {}), '(self.nfeature, np.bool)\n', (2544, 2568), True, 'import numpy as np\n'), ((4189, 4211), 'numpy.hstack', 'np.hstack', (['ordered_opt'], {}), '(ordered_opt)\n', (4198, 4211), True, 'import numpy as np\n'), ((4463, 4486), 'numpy.zeros', 'np.zeros', (['self.nfeature'], {}), '(self.nfeature)\n', (4471, 4486), True, 'import numpy as np\n'), ((4748, 4798), 'numpy.dot', 'np.dot', (['X.T', '(X[:, ordered_vars] * W[:, np.newaxis])'], {}), '(X.T, X[:, ordered_vars] * W[:, np.newaxis])\n', (4754, 4798), True, 'import numpy as np\n'), ((5147, 5173), 'numpy.sign', 'np.sign', (['self.initial_soln'], {}), '(self.initial_soln)\n', (5154, 5173), True, 'import numpy as np\n'), ((5191, 5219), 'numpy.flatnonzero', 'np.flatnonzero', (['active_signs'], {}), '(active_signs)\n', (5205, 5219), True, 'import numpy as np\n'), ((5952, 5967), 'scipy.linalg.block_diag', 'block_diag', (['*Vs'], {}), '(*Vs)\n', (5962, 5967), False, 'from scipy.linalg import block_diag\n'), ((6056, 6071), 'scipy.linalg.block_diag', 'block_diag', (['*Ls'], {}), '(*Ls)\n', (6066, 6071), False, 'from scipy.linalg import block_diag\n'), ((6207, 6213), 'numpy.linalg.inv', 'inv', (['Q'], {}), '(Q)\n', (6210, 6213), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((6661, 6703), 'numpy.zeros', 'np.zeros', (['self.observed_opt_state.shape[0]'], {}), '(self.observed_opt_state.shape[0])\n', (6669, 6703), True, 'import numpy as np\n'), ((7143, 7209), 'regreg.api.identity_quadratic', 'rr.identity_quadratic', (['self.ridge_term', '(0)', '(-self._initial_omega)', '(0)'], {}), '(self.ridge_term, 0, -self._initial_omega, 0)\n', (7164, 7209), True, 'import regreg.api as rr\n'), ((7340, 7385), 'regreg.api.simple_problem', 'rr.simple_problem', (['self.loglike', 'self.penalty'], {}), '(self.loglike, self.penalty)\n', (7357, 7385), True, 'import regreg.api as rr\n'), ((8199, 8264), 'regreg.api.glm.gaussian', 'rr.glm.gaussian', (['X', 'Y'], {'coef': '(1.0 / sigma ** 2)', 'quadratic': 'quadratic'}), '(X, Y, coef=1.0 / sigma ** 2, quadratic=quadratic)\n', (8214, 8264), True, 'import regreg.api as rr\n'), ((11580, 11610), 'numpy.atleast_1d', 'np.atleast_1d', (['observed_target'], {}), '(observed_target)\n', (11593, 11610), True, 'import numpy as np\n'), ((11633, 11648), 'numpy.linalg.inv', 'inv', (['target_cov'], {}), '(target_cov)\n', (11636, 11648), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((14248, 14267), 'scipy.stats.norm.cdf', 'ndist.cdf', (['Z_scores'], {}), '(Z_scores)\n', (14257, 14267), True, 'from scipy.stats import norm as ndist\n'), ((14370, 14396), 'scipy.stats.norm.ppf', 'ndist.ppf', (['(1 - alpha / 2.0)'], {}), '(1 - alpha / 2.0)\n', (14379, 14396), True, 'from scipy.stats import norm as ndist\n'), ((17277, 17306), 'numpy.linalg.inv', 'np.linalg.inv', (['query.cond_cov'], {}), '(query.cond_cov)\n', (17290, 17306), True, 'import numpy as np\n'), ((19777, 19900), 'pandas.DataFrame', 'pd.DataFrame', (["{'target': self.observed_target, 'pvalue': pvalues, 'lower_confidence':\n lower, 'upper_confidence': upper}"], {}), "({'target': self.observed_target, 'pvalue': pvalues,\n 'lower_confidence': lower, 'upper_confidence': upper})\n", (19789, 19900), True, 'import pandas as pd\n'), ((20548, 20573), 'numpy.linalg.inv', 'np.linalg.inv', (['target_cov'], {}), '(target_cov)\n', (20561, 20573), True, 'import numpy as np\n'), ((22410, 22429), 'numpy.asarray', 'np.asarray', (['ref_hat'], {}), '(ref_hat)\n', (22420, 22429), True, 'import numpy as np\n'), ((32043, 32072), 'numpy.linalg.inv', 'np.linalg.inv', (['(GammaMinus + C)'], {}), '(GammaMinus + C)\n', (32056, 32072), True, 'import numpy as np\n'), ((33243, 33277), 'numpy.any', 'np.any', (['(agroups[:-1] > agroups[1:])'], {}), '(agroups[:-1] > agroups[1:])\n', (33249, 33277), True, 'import numpy as np\n'), ((33366, 33406), 'numpy.issubdtype', 'np.issubdtype', (['agroups.dtype', 'np.integer'], {}), '(agroups.dtype, np.integer)\n', (33379, 33406), True, 'import numpy as np\n'), ((1435, 1487), 'regreg.api.weighted_l1norm', 'rr.weighted_l1norm', ([], {'weights': 'weights_np', 'lagrange': '(1.0)'}), '(weights=weights_np, lagrange=1.0)\n', (1453, 1487), True, 'import regreg.api as rr\n'), ((1574, 1627), 'regreg.api.group_lasso', 'rr.group_lasso', (['groups'], {'weights': 'weights', 'lagrange': '(1.0)'}), '(groups, weights=weights, lagrange=1.0)\n', (1588, 1627), True, 'import regreg.api as rr\n'), ((2979, 3001), 'numpy.unique', 'np.unique', (['self.groups'], {}), '(self.groups)\n', (2988, 3001), True, 'import numpy as np\n'), ((6598, 6638), 'numpy.eye', 'np.eye', (['self.observed_opt_state.shape[0]'], {}), '(self.observed_opt_state.shape[0])\n', (6604, 6638), True, 'import numpy as np\n'), ((9138, 9157), 'numpy.linalg.inv', 'inv', (['cond_precision'], {}), '(cond_precision)\n', (9141, 9157), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((9341, 9360), 'numpy.linalg.inv', 'inv', (['cond_precision'], {}), '(cond_precision)\n', (9344, 9360), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((14291, 14323), 'numpy.minimum', 'np.minimum', (['pvalues', '(1 - pvalues)'], {}), '(pvalues, 1 - pvalues)\n', (14301, 14323), True, 'import numpy as np\n'), ((18024, 18050), 'numpy.zeros', 'np.zeros', (['(ntarget, ngrid)'], {}), '((ntarget, ngrid))\n', (18032, 18050), True, 'import numpy as np\n'), ((18395, 18421), 'numpy.zeros', 'np.zeros', (['(ntarget, ngrid)'], {}), '((ntarget, ngrid))\n', (18403, 18421), True, 'import numpy as np\n'), ((19598, 19633), 'numpy.zeros_like', 'np.zeros_like', (['self.observed_target'], {}), '(self.observed_target)\n', (19611, 19633), True, 'import numpy as np\n'), ((20006, 20028), 'numpy.all', 'np.all', (['(parameter == 0)'], {}), '(parameter == 0)\n', (20012, 20028), True, 'import numpy as np\n'), ((25922, 25939), 'numpy.asarray', 'np.asarray', (['lower'], {}), '(lower)\n', (25932, 25939), True, 'import numpy as np\n'), ((25941, 25958), 'numpy.asarray', 'np.asarray', (['upper'], {}), '(upper)\n', (25951, 25958), True, 'import numpy as np\n'), ((31905, 31934), 'numpy.linalg.det', 'np.linalg.det', (['(GammaMinus + C)'], {}), '(GammaMinus + C)\n', (31918, 31934), True, 'import numpy as np\n'), ((33497, 33513), 'numpy.amin', 'np.amin', (['agroups'], {}), '(agroups)\n', (33504, 33513), True, 'import numpy as np\n'), ((3156, 3178), 'numpy.linalg.norm', 'norm', (['soln[group_mask]'], {}), '(soln[group_mask])\n', (3160, 3178), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((4095, 4108), 'numpy.sign', 'np.sign', (['soln'], {}), '(soln)\n', (4102, 4108), True, 'import numpy as np\n'), ((5444, 5449), 'numpy.linalg.qr', 'qr', (['Z'], {}), '(Z)\n', (5446, 5449), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((5544, 5560), 'numpy.zeros', 'np.zeros', (['(1, 0)'], {}), '((1, 0))\n', (5552, 5560), True, 'import numpy as np\n'), ((5755, 5765), 'numpy.eye', 'np.eye', (['pg'], {}), '(pg)\n', (5761, 5765), True, 'import numpy as np\n'), ((8422, 8436), 'numpy.sqrt', 'np.sqrt', (['(n - 1)'], {}), '(n - 1)\n', (8429, 8436), True, 'import numpy as np\n'), ((8545, 8567), 'numpy.sqrt', 'np.sqrt', (['(n / (n - 1.0))'], {}), '(n / (n - 1.0))\n', (8552, 8567), True, 'import numpy as np\n'), ((9002, 9018), 'numpy.asarray', 'np.asarray', (['prec'], {}), '(prec)\n', (9012, 9018), True, 'import numpy as np\n'), ((11452, 11479), 'numpy.asarray', 'np.asarray', (['observed_target'], {}), '(observed_target)\n', (11462, 11479), True, 'import numpy as np\n'), ((12307, 12339), 'numpy.asarray', 'np.asarray', (['self.randomizer_prec'], {}), '(self.randomizer_prec)\n', (12317, 12339), True, 'import numpy as np\n'), ((14200, 14227), 'numpy.diag', 'np.diag', (['observed_info_mean'], {}), '(observed_info_mean)\n', (14207, 14227), True, 'import numpy as np\n'), ((17919, 17940), 'numpy.diag', 'np.diag', (['inverse_info'], {}), '(inverse_info)\n', (17926, 17940), True, 'import numpy as np\n'), ((18127, 18229), 'numpy.linspace', 'np.linspace', (['(observed_target[j] - 1.5 * _scale[j])', '(observed_target[j] + 1.5 * _scale[j])'], {'num': 'ngrid'}), '(observed_target[j] - 1.5 * _scale[j], observed_target[j] + 1.5 *\n _scale[j], num=ngrid)\n', (18138, 18229), True, 'import numpy as np\n'), ((18498, 18600), 'numpy.linspace', 'np.linspace', (['(observed_target[j] - 1.5 * _scale[j])', '(observed_target[j] + 1.5 * _scale[j])'], {'num': 'ngrid'}), '(observed_target[j] - 1.5 * _scale[j], observed_target[j] + 1.5 *\n _scale[j], num=ngrid)\n', (18509, 18600), True, 'import numpy as np\n'), ((20424, 20451), 'numpy.asarray', 'np.asarray', (['observed_target'], {}), '(observed_target)\n', (20434, 20451), True, 'import numpy as np\n'), ((21600, 21620), 'numpy.identity', 'np.identity', (['num_opt'], {}), '(num_opt)\n', (21611, 21620), True, 'import numpy as np\n'), ((21858, 21885), 'numpy.asarray', 'np.asarray', (['[conjugate_arg]'], {}), '([conjugate_arg])\n', (21868, 21885), True, 'import numpy as np\n'), ((21921, 21953), 'numpy.reshape', 'np.reshape', (['implied_prec', '(1, 1)'], {}), '(implied_prec, (1, 1))\n', (21931, 21953), True, 'import numpy as np\n'), ((23507, 23611), 'scipy.interpolate.interp1d', 'interp1d', (['self.stat_grid[m]', 'log_ref'], {'kind': '"""quadratic"""', 'bounds_error': '(False)', 'fill_value': '"""extrapolate"""'}), "(self.stat_grid[m], log_ref, kind='quadratic', bounds_error=False,\n fill_value='extrapolate')\n", (23515, 23611), False, 'from scipy.interpolate import interp1d\n'), ((30706, 30745), 'numpy.fabs', 'np.fabs', (['(current_value - proposed_value)'], {}), '(current_value - proposed_value)\n', (30713, 30745), True, 'import numpy as np\n'), ((1231, 1248), 'numpy.unique', 'np.unique', (['groups'], {}), '(groups)\n', (1240, 1248), True, 'import numpy as np\n'), ((3187, 3197), 'numpy.linalg.norm', 'norm', (['soln'], {}), '(soln)\n', (3191, 3197), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((3355, 3381), 'numpy.flatnonzero', 'np.flatnonzero', (['group_mask'], {}), '(group_mask)\n', (3369, 3381), True, 'import numpy as np\n'), ((3657, 3679), 'numpy.linalg.norm', 'norm', (['soln[group_mask]'], {}), '(soln[group_mask])\n', (3661, 3679), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((8389, 8398), 'numpy.std', 'np.std', (['Y'], {}), '(Y)\n', (8395, 8398), True, 'import numpy as np\n'), ((8401, 8419), 'numpy.sqrt', 'np.sqrt', (['mean_diag'], {}), '(mean_diag)\n', (8408, 8419), True, 'import numpy as np\n'), ((8533, 8542), 'numpy.std', 'np.std', (['Y'], {}), '(Y)\n', (8539, 8542), True, 'import numpy as np\n'), ((14838, 14865), 'numpy.diag', 'np.diag', (['observed_info_mean'], {}), '(observed_info_mean)\n', (14845, 14865), True, 'import numpy as np\n'), ((21131, 21171), 'numpy.atleast_1d', 'np.atleast_1d', (['(grid[k] - observed_target)'], {}), '(grid[k] - observed_target)\n', (21144, 21171), True, 'import numpy as np\n'), ((27155, 27175), 'numpy.linalg.inv', 'np.linalg.inv', (['_prec'], {}), '(_prec)\n', (27168, 27175), True, 'import numpy as np\n'), ((30754, 30776), 'numpy.fabs', 'np.fabs', (['current_value'], {}), '(current_value)\n', (30761, 30776), True, 'import numpy as np\n'), ((32151, 32176), 'numpy.ones', 'np.ones', (['(1, ug.size - 1)'], {}), '((1, ug.size - 1))\n', (32158, 32176), True, 'import numpy as np\n'), ((33630, 33648), 'numpy.unique', 'np.unique', (['agroups'], {}), '(agroups)\n', (33639, 33648), True, 'import numpy as np\n'), ((3598, 3620), 'numpy.linalg.norm', 'norm', (['soln[group_mask]'], {}), '(soln[group_mask])\n', (3602, 3620), False, 'from numpy.linalg import norm, qr, inv, eig\n'), ((5400, 5418), 'numpy.eye', 'np.eye', (['pg', '(pg - 1)'], {}), '(pg, pg - 1)\n', (5406, 5418), True, 'import numpy as np\n'), ((8506, 8524), 'numpy.sqrt', 'np.sqrt', (['mean_diag'], {}), '(mean_diag)\n', (8513, 8524), True, 'import numpy as np\n'), ((22721, 22745), 'numpy.diag', 'np.diag', (['self.target_cov'], {}), '(self.target_cov)\n', (22728, 22745), True, 'import numpy as np\n'), ((23446, 23458), 'numpy.exp', 'np.exp', (['logW'], {}), '(logW)\n', (23452, 23458), True, 'import numpy as np\n'), ((24122, 24134), 'numpy.exp', 'np.exp', (['logW'], {}), '(logW)\n', (24128, 24134), True, 'import numpy as np\n'), ((26254, 26278), 'numpy.diag', 'np.diag', (['self.target_cov'], {}), '(self.target_cov)\n', (26261, 26278), True, 'import numpy as np\n'), ((30451, 30475), 'numpy.isnan', 'np.isnan', (['proposed_value'], {}), '(proposed_value)\n', (30459, 30475), True, 'import numpy as np\n'), ((30479, 30502), 'numpy.isnan', 'np.isnan', (['current_value'], {}), '(current_value)\n', (30487, 30502), True, 'import numpy as np\n'), ((32316, 32347), 'numpy.multiply', 'np.multiply', (['GpC_inv', 'GpC_inv.T'], {}), '(GpC_inv, GpC_inv.T)\n', (32327, 32347), True, 'import numpy as np\n'), ((14496, 14523), 'numpy.diag', 'np.diag', (['observed_info_mean'], {}), '(observed_info_mean)\n', (14503, 14523), True, 'import numpy as np\n'), ((14625, 14652), 'numpy.diag', 'np.diag', (['observed_info_mean'], {}), '(observed_info_mean)\n', (14632, 14652), True, 'import numpy as np\n')] |
import six
import json
import gzip
from exporters.default_retries import retry_long
from exporters.writers.base_writer import BaseWriter
class ODOWriter(BaseWriter):
"""
Writes items to a odo destination. https://odo.readthedocs.org/en/latest/
Needed parameters:
- schema (object)
schema object.
- odo_uri (str)
ODO valid destination uri.
"""
requirements = {
'schema': {'type': object, 'required': True},
'odo_uri': {'type': six.string_types, 'required': True}
}
def __init__(self, options):
super(ODOWriter, self).__init__(options)
from flatson import Flatson
schema = self.read_option('schema', None)
self.odo_uri = self.read_option('odo_uri', None)
self.flatson = Flatson(schema)
self.logger.info('ODOWriter has been initiated. Writing to: {}'.format(self.odo_uri))
@retry_long
def write(self, dump_path, group_key=''):
from odo import odo, resource, discover
import pandas as pd
with gzip.open(dump_path) as f:
lines = [json.loads(line.replace('\n', '')) for line in f.readlines()]
flattened_lines = (self.flatson.flatten(line) for line in lines)
pf = pd.DataFrame(flattened_lines, columns=self.flatson.fieldnames)
dshape = discover(pf)
odo(pf, resource(self.odo_uri), dshape=dshape)
| [
"odo.discover",
"gzip.open",
"flatson.Flatson",
"pandas.DataFrame",
"odo.resource"
] | [((800, 815), 'flatson.Flatson', 'Flatson', (['schema'], {}), '(schema)\n', (807, 815), False, 'from flatson import Flatson\n'), ((1258, 1320), 'pandas.DataFrame', 'pd.DataFrame', (['flattened_lines'], {'columns': 'self.flatson.fieldnames'}), '(flattened_lines, columns=self.flatson.fieldnames)\n', (1270, 1320), True, 'import pandas as pd\n'), ((1338, 1350), 'odo.discover', 'discover', (['pf'], {}), '(pf)\n', (1346, 1350), False, 'from odo import odo, resource, discover\n'), ((1062, 1082), 'gzip.open', 'gzip.open', (['dump_path'], {}), '(dump_path)\n', (1071, 1082), False, 'import gzip\n'), ((1367, 1389), 'odo.resource', 'resource', (['self.odo_uri'], {}), '(self.odo_uri)\n', (1375, 1389), False, 'from odo import odo, resource, discover\n')] |
'''
Written by: <NAME> <EMAIL> <EMAIL>
Last updated: 29.01.2021
'''
# the concept is to generate a side channel resistant initialisation of the hashing function based on
# one secret key and several openly known initialisation vectors (IV) in a manner that the same input
# is not hashed too more than two times, which is hopefully not sufficient for side channel
# measurements based computations: the number of consecutive measurements for a successful attack on
# the CHI function in a practically noiseless computer simulation (see "chi_cpa.py") takes around a
# 100 measurements
# this concept is achieved by taking a counter of a certain bitlength, and twice as many IVs as bits in
# the counter: "IV0s" and "IV1s" and compute a series of hashes starting with the secret key then with a
# correspong IV of the sets 0 and 1 based on whether the counter's corresponding bit - starting at MSB -
# is 0 or 1; this way every hash output is exactly used 2 times if the intermediate values are STORTED
# and the entire series of initial hashes are NOT fully recomputed only such whose corresponding
# counter bits has changed and all the next levels too down to the LSB of the counter
# the working solution is going to based on the algorithms presented here, although
# in this file the algorithm here does the full padding so the results won't equal to
# a scheme where the rate is fully filled with IVs and the data comes only afterwards...
import hashlib
# KEY DATA STRUCTURES' INTERPRETATION
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
IV0s = [658678, 6785697, 254376, 67856, 1432543, 786, 124345, 5443654]
IV1s = [2565, 256658, 985, 218996, 255, 685652, 28552, 3256565]
# LSB ... MSB
hash_copies = [None for i in range(len(IV0s))]
# LSB ... MSB
# counter
# MSB ... LSB
# COMPUTING HASHES FOR EVERY COUNTER VALUE INDIVIDUALLY
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
for counter in range(11):
hash = hashlib.sha3_512()
# looping from MSB to LSB in counter too
for i in range(len(IV0s)-1, -1, -1):
if (counter>>i) & 1 == 1:
IV = bytes(IV1s[i])
else:
IV = bytes(IV0s[i])
hash.update(IV)
print(hash.hexdigest())
print()
# COMPUTING HASHES BASED ON THE NATURE OF BINARY INCREMENTATION:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# only fewer values need to be recomputed, those whose corresponding
# bits have changed, down until LSB
# initialize
hash = hashlib.sha3_512()
# looping from MSB to LSB
for i in range(len(IV0s)-1, -1, -1):
# addressing "MSB" of IVs at first, "LSB" at last!
IV = bytes(IV0s[i])
hash.update(IV)
# index 0 of hash_copies changes the most frequently ie. according to counter's LSB
hash_copies[i] = hash.copy()
# compute
last_counter = 0
for counter in range(11):
IV_mask = last_counter ^ counter
last_counter = counter
# determine the highest non-zero bit of IV_mask, LSB is 1, 0 means there was no change
nz = 0
while IV_mask > 0:
IV_mask >>= 1
nz += 1
# initialize hash to the last value whose corresponding counter bit didn't switch
# have to copy object otherwise the originally pointed version gets updated!
hash = hash_copies[nz].copy() # LSB is index 0
# compute only the remaining hashes
while nz != 0: # nz=0 is the initial condition, nothing needs to be done
nz -= 1
if (counter>>nz) & 1 == 1:
IV = bytes(IV1s[nz])
else:
IV = bytes(IV0s[nz])
hash.update(IV)
# needs to be copied again because of object orientation
hash_copies[nz] = hash.copy()
# showing the hash copies' entire table after each computation
#for hashes in hash_copies:
# print(hashes.hexdigest())
print(hash_copies[0].hexdigest())
| [
"hashlib.sha3_512"
] | [((2526, 2544), 'hashlib.sha3_512', 'hashlib.sha3_512', ([], {}), '()\n', (2542, 2544), False, 'import hashlib\n'), ((1973, 1991), 'hashlib.sha3_512', 'hashlib.sha3_512', ([], {}), '()\n', (1989, 1991), False, 'import hashlib\n')] |
import os
def check_env(env_var_name):
"""
Check and return the type of an environment variable.
supported types:
None
Integer
String
@param env_var_name: environment variable name
@return: string of the type name.
"""
try:
val = os.getenv(env_var_name)
if val is None:
return 'None'
except Exception as ex:
return "None"
try:
int_val = int(val)
return 'Integer'
except ValueError:
return 'String'
| [
"os.getenv"
] | [((294, 317), 'os.getenv', 'os.getenv', (['env_var_name'], {}), '(env_var_name)\n', (303, 317), False, 'import os\n')] |
from .models import Sound , Album
from rest_framework import serializers
class SoundSerializer(serializers.ModelSerializer):
class Meta:
model = Sound
fields = ["name" , "song_image" , "pk" , "like" , "played" , "tag" , "singer" , "upload_date"]
class SoundDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Sound
fields = "__all__"
class AlbumSerializer(serializers.ModelSerializer):
sound = serializers.SerializerMethodField()
class Meta:
model = Album
fields = ["name" , "datepublish" , "category" , "sound"]
depth = 1
def get_sound(self , obj):
print("WORKING")
return SoundSerializer(instance=obj.sound , many=True).data
| [
"rest_framework.serializers.SerializerMethodField"
] | [((474, 509), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (507, 509), False, 'from rest_framework import serializers\n')] |
from datetime import datetime
from django.conf import settings
import pytz
def check_tracker(obj, simple=True):
if simple:
if obj.status > 0:
return True
return False
# we have a gatekeeper
now = datetime.now(pytz.utc)
if obj.tracker_publish_status < 0:
return False
if obj.tracker_publish_status > 0:
return True
# Checking live_as_of ...
# is live_as_of set?
if not obj.tracker_live_as_of: # No live_as_of --- bail
return False
# has it happened yet?
if now < obj.tracker_live_as_of: # live_as_of --- not yet!
return False
# is there an expiration date?
if obj.tracker_expires and now > obj.tracker_expires: # EXPIRED!
return False
# it's OK then
return True
DEFAULT_TRACKER_POSITIONS = [
('tracker-head-top', 'Head - near top'),
('tracker-head-bottom', 'Head - near bottom'),
('tracker-body-top', 'Body - near top'),
('tracker-body-bottom', 'Body - near bottom')
]
def get_tracker_position_options():
"""
This creates the dropdown in the Admin for where to put each tracker.
It defaults to the obvious 4 location (top/bottom of the head/body);
however the user can create more by adding a list of 3-ples in the settings
file under ADDITIONAL_TRACKER_POSITIONS.
(2-letter-code, description, block name), e.g.
('HN', 'Header Navigation', 'header-navigation-trackers')
would allow for the user to have tracking code in a navbar (no, I don't know
why they'd want this) if they put
{% block header-navigation-trackers %}{% generate_trackers 'HN' %}{% endblock %}
in their template.
"""
tracker_position_list = DEFAULT_TRACKER_POSITIONS
additional_tracker_positions = getattr(settings, "ADDITIONAL_TRACKER_POSITIONS", [])
full_list = list()
for x in (tracker_position_list + additional_tracker_positions):
full_list.append((x[0], x[1]))
return full_list | [
"datetime.datetime.now"
] | [((246, 268), 'datetime.datetime.now', 'datetime.now', (['pytz.utc'], {}), '(pytz.utc)\n', (258, 268), False, 'from datetime import datetime\n')] |
import datetime
from fastapi import APIRouter
router = APIRouter()
@router.get("", tags=["health"])
async def get_health():
return {
"results": [],
"status": "success",
"timestamp": datetime.datetime.now().timestamp()
}
| [
"datetime.datetime.now",
"fastapi.APIRouter"
] | [((57, 68), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (66, 68), False, 'from fastapi import APIRouter\n'), ((214, 237), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (235, 237), False, 'import datetime\n')] |
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from distutils.version import StrictVersion
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
import json
import time
import cv2
PATH_TO_FROZEN_GRAPH = '../data/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224_frozen.pb'
info='Time taken to load Model into memory:'
start_time=time.time()
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
end_time=time.time()
time_taken=end_time-start_time
print(info,time_taken)
# Load the labels
#Load categories
categories = []
with open('../data/' + 'categories.txt', 'r') as f:
for line in f:
cat = line.split('\n')[0]
if cat != 'classes':
categories.append(cat)
f.close()
print('Number of categories:', len(categories))
# Load image size
with open('../data/' + 'inputsize.txt', 'r') as f:
reqsize = int(f.readline().split('\n')[0])
#print(reqsize)
#image_filename = '../data/' + 'image1.jpg'
def Load_and_process_img(image_filename):
img = cv2.imread(image_filename)#.astype(numpy.float32)
img = cv2.resize(img, (reqsize, reqsize))
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = img.astype(float)
#img values are scaled from -1 to 1
img /= 255.0
img -= 0.5
img *= 2.0
return img
sess=tf.Session(graph=detection_graph)
def run_inference_b1(key_name,image, graph,no_of_run):
#model output layer name
ops = graph.get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
#print(all_tensor_names)
tensor_dict = {}
for key in [key_name]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = graph.get_tensor_by_name(tensor_name)
image=image.reshape(1,image.shape[0],image.shape[1],image.shape[2])
image_tensor = graph.get_tensor_by_name('input:0')
#Demo run, so that graph is loaded into TF memory
sess.run(tensor_dict,feed_dict={image_tensor: image})
# Run inference
info='Time taken to run inference: run_inference_b1:'+str(no_of_run)+' Times: '
start_time=time.time()
for i in range(no_of_run):
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: image})
end_time=time.time()
time_taken=end_time-start_time
print(info,time_taken)
#print(output_dict)
top_inds = output_dict[key_name][0].argsort()[::-1][:5]
result=[]
for i in range(5):
result.append([top_inds[i], categories[top_inds[i]], output_dict[key_name][0][top_inds[i]]])
return result, time_taken
image_filename = '../data/' + 'Tiger.jpg'
img = Load_and_process_img(image_filename)
key_name='MobilenetV2/Predictions/Reshape_1'
result,time_taken=run_inference_b1(key_name,img,detection_graph,1000)
print('Time Taken to run Inference is:',time_taken)
print(result)
| [
"tensorflow.Graph",
"tensorflow.Session",
"tensorflow.GraphDef",
"cv2.cvtColor",
"tensorflow.import_graph_def",
"tensorflow.gfile.GFile",
"cv2.resize",
"time.time",
"cv2.imread"
] | [((471, 482), 'time.time', 'time.time', ([], {}), '()\n', (480, 482), False, 'import time\n'), ((502, 512), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (510, 512), True, 'import tensorflow as tf\n'), ((779, 790), 'time.time', 'time.time', ([], {}), '()\n', (788, 790), False, 'import time\n'), ((1643, 1676), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'detection_graph'}), '(graph=detection_graph)\n', (1653, 1676), True, 'import tensorflow as tf\n'), ((565, 578), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (576, 578), True, 'import tensorflow as tf\n'), ((1366, 1392), 'cv2.imread', 'cv2.imread', (['image_filename'], {}), '(image_filename)\n', (1376, 1392), False, 'import cv2\n'), ((1426, 1461), 'cv2.resize', 'cv2.resize', (['img', '(reqsize, reqsize)'], {}), '(img, (reqsize, reqsize))\n', (1436, 1461), False, 'import cv2\n'), ((1470, 1506), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1482, 1506), False, 'import cv2\n'), ((2459, 2470), 'time.time', 'time.time', ([], {}), '()\n', (2468, 2470), False, 'import time\n'), ((2620, 2631), 'time.time', 'time.time', ([], {}), '()\n', (2629, 2631), False, 'import time\n'), ((586, 628), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['PATH_TO_FROZEN_GRAPH', '"""rb"""'], {}), "(PATH_TO_FROZEN_GRAPH, 'rb')\n", (600, 628), True, 'import tensorflow as tf\n'), ((726, 768), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (745, 768), True, 'import tensorflow as tf\n')] |
from __future__ import unicode_literals
import frappe, json
from frappe.model.utils.user_settings import update_user_settings, sync_user_settings
def execute():
users = frappe.db.sql("select distinct(user) from `__UserSettings`", as_dict=True)
for user in users:
user_settings = frappe.db.sql('''
select
* from `__UserSettings`
where
user="{user}"
'''.format(user = user.user), as_dict=True)
for setting in user_settings:
data = frappe.parse_json(setting.get('data'))
if data:
for key in data:
update_user_setting_filters(data, key, setting)
sync_user_settings()
def update_user_setting_filters(data, key, user_setting):
timespan_map = {
'1 week': 'week',
'1 month': 'month',
'3 months': 'quarter',
'6 months': '6 months',
'1 year': 'year',
}
period_map = {
'Previous': 'last',
'Next': 'next'
}
if data.get(key):
update = False
if isinstance(data.get(key), dict):
filters = data.get(key).get('filters')
if filters and isinstance(filters, list):
for f in filters:
if f[2] == 'Next' or f[2] == 'Previous':
update = True
f[3] = period_map[f[2]] + ' ' + timespan_map[f[3]]
f[2] = 'Timespan'
if update:
data[key]['filters'] = filters
update_user_settings(user_setting['doctype'], json.dumps(data), for_update=True)
| [
"json.dumps",
"frappe.db.sql",
"frappe.model.utils.user_settings.sync_user_settings"
] | [((171, 245), 'frappe.db.sql', 'frappe.db.sql', (['"""select distinct(user) from `__UserSettings`"""'], {'as_dict': '(True)'}), "('select distinct(user) from `__UserSettings`', as_dict=True)\n", (184, 245), False, 'import frappe, json\n'), ((584, 604), 'frappe.model.utils.user_settings.sync_user_settings', 'sync_user_settings', ([], {}), '()\n', (602, 604), False, 'from frappe.model.utils.user_settings import update_user_settings, sync_user_settings\n'), ((1289, 1305), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1299, 1305), False, 'import frappe, json\n')] |
import BoltzmannMachine as bm
import QHO as qho
import numpy as np
import datetime
# Visualization imports
from IPython.display import clear_output
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['figure.dpi']=300
def sigmoid(x):
return .5 * (1 + np.tanh(x / 2.))
# Set the quantum gas with N particles, a limit of 10 for the
# quantum numbers and default temperature and frequency
N = 10*10
gas = qho.QHOGas(N=N)
n_max = 10
training_size = 100000
# the amount of hidden units was set by trial and error
hidden_units = 70
# the recipe suggests to set the batchsize to 10, though it can range
# from 10 to 100
batchsize = 10
# the recipe suggests a learning rate that makes the weight updates about
# 1e-3 times the weights (to within an order of magnitude)
eta = 0.005
# the amount of steps was set by trial and error
nsteps = 300000
# define the validation set to be used in training_visualization
validation_set = gas.generate(amount=20)
def training_visualization(machine, current_step, total_steps, eta, a, b, w, da, db, dw):
# Every now and then (every 50k steps), let us know that the training
# is still running
if current_step%50000 == 0:
print("{:08d} / {:08d}".format(current_step, total_steps), end=" \r")
# After 'checkpoint_steps', show the suggested plots
checkpoint_steps = 10000
if current_step%checkpoint_steps == 0 or current_step == total_steps-1:
print(f"Showing at step {current_step}.")
# Produce a sample starting from the validation set after 100 steps
v_prime = machine.generate(validation_set, 100, a=a, b=b, w=w)
# print useful plots for training
plot_training(validation_set, v_prime, eta, a, b, w, da, db, dw)
def plot_training(v, v_prime, eta, a, b, w, da, db, dw):
clear_output(wait=True)
# Show how the weights light up for the state v
hMean = sigmoid(np.dot(v, w) + b)
image = Image.fromarray(hMean * 256).show()
# Create the grid for all the other plots we want
plt.rcParams.update({'font.size': 2})
# plot histogram of initial vs generated
n = np.arange(0,10)
generated_quantum_numbers = np.rint(v_prime*10)
plt.hist( generated_quantum_numbers.flatten(), bins=np.arange(0,10), density=True, label="Sampled" )
plt.plot( n, gas.p_n(n), label="Theor." )
plt.xlabel('n')
plt.ylabel('P(n)')
plt.legend()
# plot histogram of visible, hidden, weights
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(ncols=3, nrows=2)
def plotit(axis, values, title):
axis.hist(values)
axis.set_title(f"{title}: mm = {np.mean(np.fabs(values))}")
plotit(fig.add_subplot(gs[0,0]), a, 'a')
plotit(fig.add_subplot(gs[0,1]), w.flatten(), 'w')
plotit(fig.add_subplot(gs[0,2]), b, 'b')
# plot histogram of d_visible, d_hidden, d_weights
plotit(fig.add_subplot(gs[1,0]), eta*da, 'da')
plotit(fig.add_subplot(gs[1,1]), eta*dw.flatten(), 'dw')
plotit(fig.add_subplot(gs[1,2]), eta*db, 'db')
# show free energies of the average of samples
x = lambda vv : b + np.dot(vv, w)
free_training = -np.dot(v, a) - np.sum( np.log(1 + np.exp(x(v))), axis=1)
free_valdation = -np.dot(v_prime, a) - np.sum( np.log(1 + np.exp(x(v_prime))), axis=1)
print(f"\nF_training={np.average(free_training)} vs F_validation={np.average(free_valdation)}\n")
# Show.
# CAUTION! This will freeze the execution
plt.show()
# Init the boltzmann machine and train it while visualizing the suggested plots
training_set = gas.generate(amount=training_size, n_max=n_max)
m = bm.BoltzmannMachine(num_hidden=hidden_units)
a,b,w = m.train(training_set, batchsize=batchsize, eta=eta, nsteps=nsteps, do_while_training=None)
# Store in a file
run_id = int(datetime.datetime.now().timestamp())
np.savetxt(f"a_{run_id}.csv", a, delimiter=',')
np.savetxt(f"b_{run_id}.csv", b, delimiter=',')
np.savetxt(f"w_{run_id}.csv", w, delimiter=',')
| [
"numpy.fabs",
"QHO.QHOGas",
"PIL.Image.fromarray",
"matplotlib.pyplot.ylabel",
"numpy.average",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"numpy.tanh",
"IPython.display.clear_output",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"numpy.dot",
"BoltzmannMachine.BoltzmannMachine",
"numpy.rint",
"numpy.savetxt",
"datetime.datetime.now",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((447, 462), 'QHO.QHOGas', 'qho.QHOGas', ([], {'N': 'N'}), '(N=N)\n', (457, 462), True, 'import QHO as qho\n'), ((3637, 3681), 'BoltzmannMachine.BoltzmannMachine', 'bm.BoltzmannMachine', ([], {'num_hidden': 'hidden_units'}), '(num_hidden=hidden_units)\n', (3656, 3681), True, 'import BoltzmannMachine as bm\n'), ((3849, 3896), 'numpy.savetxt', 'np.savetxt', (['f"""a_{run_id}.csv"""', 'a'], {'delimiter': '""","""'}), "(f'a_{run_id}.csv', a, delimiter=',')\n", (3859, 3896), True, 'import numpy as np\n'), ((3897, 3944), 'numpy.savetxt', 'np.savetxt', (['f"""b_{run_id}.csv"""', 'b'], {'delimiter': '""","""'}), "(f'b_{run_id}.csv', b, delimiter=',')\n", (3907, 3944), True, 'import numpy as np\n'), ((3945, 3992), 'numpy.savetxt', 'np.savetxt', (['f"""w_{run_id}.csv"""', 'w'], {'delimiter': '""","""'}), "(f'w_{run_id}.csv', w, delimiter=',')\n", (3955, 3992), True, 'import numpy as np\n'), ((1829, 1852), 'IPython.display.clear_output', 'clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (1841, 1852), False, 'from IPython.display import clear_output\n'), ((2050, 2087), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 2}"], {}), "({'font.size': 2})\n", (2069, 2087), True, 'import matplotlib.pyplot as plt\n'), ((2142, 2158), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (2151, 2158), True, 'import numpy as np\n'), ((2190, 2211), 'numpy.rint', 'np.rint', (['(v_prime * 10)'], {}), '(v_prime * 10)\n', (2197, 2211), True, 'import numpy as np\n'), ((2365, 2380), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""n"""'], {}), "('n')\n", (2375, 2380), True, 'import matplotlib.pyplot as plt\n'), ((2385, 2403), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""P(n)"""'], {}), "('P(n)')\n", (2395, 2403), True, 'import matplotlib.pyplot as plt\n'), ((2408, 2420), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2418, 2420), True, 'import matplotlib.pyplot as plt\n'), ((2481, 2516), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'constrained_layout': '(True)'}), '(constrained_layout=True)\n', (2491, 2516), True, 'import matplotlib.pyplot as plt\n'), ((3478, 3488), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3486, 3488), True, 'import matplotlib.pyplot as plt\n'), ((295, 311), 'numpy.tanh', 'np.tanh', (['(x / 2.0)'], {}), '(x / 2.0)\n', (302, 311), True, 'import numpy as np\n'), ((1925, 1937), 'numpy.dot', 'np.dot', (['v', 'w'], {}), '(v, w)\n', (1931, 1937), True, 'import numpy as np\n'), ((1955, 1983), 'PIL.Image.fromarray', 'Image.fromarray', (['(hMean * 256)'], {}), '(hMean * 256)\n', (1970, 1983), False, 'from PIL import Image\n'), ((2266, 2282), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (2275, 2282), True, 'import numpy as np\n'), ((3130, 3143), 'numpy.dot', 'np.dot', (['vv', 'w'], {}), '(vv, w)\n', (3136, 3143), True, 'import numpy as np\n'), ((3166, 3178), 'numpy.dot', 'np.dot', (['v', 'a'], {}), '(v, a)\n', (3172, 3178), True, 'import numpy as np\n'), ((3245, 3263), 'numpy.dot', 'np.dot', (['v_prime', 'a'], {}), '(v_prime, a)\n', (3251, 3263), True, 'import numpy as np\n'), ((3812, 3835), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3833, 3835), False, 'import datetime\n'), ((3340, 3365), 'numpy.average', 'np.average', (['free_training'], {}), '(free_training)\n', (3350, 3365), True, 'import numpy as np\n'), ((3384, 3410), 'numpy.average', 'np.average', (['free_valdation'], {}), '(free_valdation)\n', (3394, 3410), True, 'import numpy as np\n'), ((2672, 2687), 'numpy.fabs', 'np.fabs', (['values'], {}), '(values)\n', (2679, 2687), True, 'import numpy as np\n')] |
import time
import os
import sys
import shutil
import json
import argparse
from zipfile import ZipFile
from contextlib import contextmanager
from datetime import datetime
from Tests.private_build.upload_packs_private import download_and_extract_index, update_index_with_priced_packs, \
extract_packs_artifacts
from Tests.Marketplace.marketplace_services import init_storage_client
from Tests.scripts.utils.log_util import install_logging
from Tests.scripts.utils import logging_wrapper as logging
MAX_SECONDS_TO_WAIT_FOR_LOCK = 600
LOCK_FILE_PATH = 'lock.txt'
@contextmanager
def lock_and_unlock_dummy_index(public_storage_bucket, dummy_index_lock_path):
try:
acquire_dummy_index_lock(public_storage_bucket, dummy_index_lock_path)
yield
except Exception:
logging.exception("Error in dummy index lock context manager.")
finally:
release_dummy_index_lock(public_storage_bucket, dummy_index_lock_path)
def change_pack_price_to_zero(path_to_pack_metadata):
with open(path_to_pack_metadata, 'r') as pack_metadata_file:
pack_metadata = json.load(pack_metadata_file)
pack_metadata['price'] = 0
with open(path_to_pack_metadata, 'w') as pack_metadata_file:
json.dump(pack_metadata, pack_metadata_file, indent=4)
def change_packs_price_to_zero(public_index_folder_path):
paths_to_packs_in_merged_index = [pack_dir.path for pack_dir in os.scandir(public_index_folder_path) if
pack_dir.is_dir()]
for path_to_pack in paths_to_packs_in_merged_index:
path_to_pack_metadata = os.path.join(path_to_pack, 'metadata.json')
change_pack_price_to_zero(path_to_pack_metadata)
def merge_private_index_into_public_index(public_index_folder_path, private_index_folder_path):
packs_in_private_index = [pack_dir.name for pack_dir in os.scandir(private_index_folder_path) if pack_dir.is_dir()]
for pack_name in packs_in_private_index:
path_to_pack_in_private_index = os.path.join(private_index_folder_path, pack_name)
path_to_pack_in_public_index = os.path.join(public_index_folder_path, pack_name)
shutil.copy(path_to_pack_in_private_index, path_to_pack_in_public_index)
def upload_modified_index(public_index_folder_path, extract_destination_path, public_ci_dummy_index_blob, build_number,
private_packs):
"""Upload updated index zip to cloud storage.
Args:
public_index_folder_path (str): public index folder full path.
extract_destination_path (str): extract folder full path.
public_ci_dummy_index_blob (Blob): google cloud storage object that represents the dummy index.zip blob.
build_number (str): circleCI build number, used as an index revision.
private_packs (list): List of private packs and their price.
"""
with open(os.path.join(public_index_folder_path, "index.json"), "w+") as index_file:
for private_pack in private_packs:
private_pack['price'] = 0
index = {
'revision': build_number,
'modified': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'),
'packs': private_packs
}
json.dump(index, index_file, indent=4)
index_zip_name = os.path.basename(public_index_folder_path)
index_zip_path = shutil.make_archive(base_name=public_index_folder_path, format="zip",
root_dir=extract_destination_path, base_dir=index_zip_name)
try:
public_ci_dummy_index_blob.reload()
public_ci_dummy_index_blob.cache_control = "no-cache,max-age=0" # disabling caching for index blob
public_ci_dummy_index_blob.upload_from_filename(index_zip_path)
logging.success("Finished uploading index.zip to storage.")
except Exception:
logging.exception("Failed in uploading index. Mismatch in index file generation.")
sys.exit(1)
finally:
shutil.rmtree(public_index_folder_path)
def option_handler():
"""Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description="Store packs in cloud storage.")
# disable-secrets-detection-start
parser.add_argument('-b', '--public_bucket_name', help="CI public bucket name", required=True)
parser.add_argument('-pb', '--private_bucket_name', help="CI private bucket name", required=True)
parser.add_argument('-s', '--service_account',
help=("Path to gcloud service account, is for circleCI usage. "
"For local development use your personal account and "
"authenticate using Google Cloud SDK by running: "
"`gcloud auth application-default login` and leave this parameter blank. "
"For more information go to: "
"https://googleapis.dev/python/google-api-core/latest/auth.html"),
required=False)
parser.add_argument('-n', '--ci_build_number',
help="CircleCi build number (will be used as hash revision at index file)", required=True)
parser.add_argument('-e', '--extract_public_index_path', help="Full path of folder to extract the public index",
required=True)
parser.add_argument('-sb', '--storage_base_path', help="Storage base path of the directory to upload to.",
required=False)
parser.add_argument('-p', '--pack_name', help="Modified pack to upload to gcs.")
parser.add_argument('-a', '--artifacts_path', help="The full path of packs artifacts", required=True)
parser.add_argument('-ea', '--extract_artifacts_path', help="Full path of folder to extract wanted packs",
required=True)
parser.add_argument('-di', '--dummy_index_dir_path', help="Full path to the dummy index in the private CI bucket",
required=True)
# disable-secrets-detection-end
return parser.parse_args()
def is_dummy_index_locked(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
return dummy_index_lock_blob.exists()
def lock_dummy_index(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
with open(LOCK_FILE_PATH, 'w') as lock_file:
lock_file.write('locked')
with open(LOCK_FILE_PATH, 'rb') as lock_file:
dummy_index_lock_blob.upload_from_file(lock_file)
def acquire_dummy_index_lock(public_storage_bucket, dummy_index_lock_path):
total_seconds_waited = 0
while is_dummy_index_locked(public_storage_bucket, dummy_index_lock_path):
if total_seconds_waited >= MAX_SECONDS_TO_WAIT_FOR_LOCK:
logging.critical("Error: Failed too long to acquire lock, exceeded max wait time.")
sys.exit(1)
if total_seconds_waited % 60 == 0:
# Printing a message every minute to keep the machine from dying due to no output
logging.info("Waiting to acquire lock.")
total_seconds_waited += 10
time.sleep(10)
lock_dummy_index(public_storage_bucket, dummy_index_lock_path)
def release_dummy_index_lock(public_storage_bucket, dummy_index_lock_path):
dummy_index_lock_blob = public_storage_bucket.blob(dummy_index_lock_path)
dummy_index_lock_blob.delete()
os.remove(LOCK_FILE_PATH)
def add_private_packs_from_dummy_index(private_packs, dummy_index_blob):
downloaded_dummy_index_path = 'current_dummy_index.zip'
extracted_dummy_index_path = 'dummy_index'
dummy_index_json_path = os.path.join(extracted_dummy_index_path, 'index', 'index.json')
dummy_index_blob.download_to_filename(downloaded_dummy_index_path)
os.mkdir(extracted_dummy_index_path)
if os.path.exists(downloaded_dummy_index_path):
with ZipFile(downloaded_dummy_index_path, 'r') as index_zip:
index_zip.extractall(extracted_dummy_index_path)
with open(dummy_index_json_path) as index_file:
index_json = json.load(index_file)
packs_from_dummy_index = index_json.get('packs', [])
for pack in private_packs:
is_pack_in_dummy_index = any(
[pack['id'] == dummy_index_pack['id'] for dummy_index_pack in packs_from_dummy_index])
if not is_pack_in_dummy_index:
packs_from_dummy_index.append(pack)
os.remove(downloaded_dummy_index_path)
shutil.rmtree(extracted_dummy_index_path)
return packs_from_dummy_index
def main():
install_logging('prepare_public_index_for_private_testing.log', logger=logging)
upload_config = option_handler()
service_account = upload_config.service_account
build_number = upload_config.ci_build_number
public_bucket_name = upload_config.public_bucket_name
private_bucket_name = upload_config.private_bucket_name
storage_base_path = upload_config.storage_base_path
extract_public_index_path = upload_config.extract_public_index_path
changed_pack = upload_config.pack_name
extract_destination_path = upload_config.extract_artifacts_path
packs_artifacts_path = upload_config.artifacts_path
dummy_index_dir_path = upload_config.dummy_index_dir_path
dummy_index_path = os.path.join(dummy_index_dir_path, 'index.zip')
dummy_index_lock_path = os.path.join(dummy_index_dir_path, 'lock.txt')
storage_client = init_storage_client(service_account)
public_storage_bucket = storage_client.bucket(public_bucket_name)
private_storage_bucket = storage_client.bucket(private_bucket_name)
dummy_index_blob = public_storage_bucket.blob(dummy_index_path)
with lock_and_unlock_dummy_index(public_storage_bucket, dummy_index_lock_path):
extract_packs_artifacts(packs_artifacts_path, extract_destination_path)
public_index_folder_path, public_index_blob, _ = download_and_extract_index(public_storage_bucket,
extract_public_index_path, storage_base_path)
# In order for the packs to be downloaded successfully, their price has to be 0
change_packs_price_to_zero(public_index_folder_path)
private_packs, private_index_path, private_index_blob = update_index_with_priced_packs(private_storage_bucket,
extract_destination_path,
public_index_folder_path,
changed_pack, True,
storage_base_path)
private_packs = add_private_packs_from_dummy_index(private_packs, dummy_index_blob)
upload_modified_index(public_index_folder_path, extract_public_index_path, dummy_index_blob, build_number,
private_packs)
if __name__ == '__main__':
main()
| [
"Tests.Marketplace.marketplace_services.init_storage_client",
"zipfile.ZipFile",
"Tests.scripts.utils.logging_wrapper.info",
"time.sleep",
"Tests.private_build.upload_packs_private.extract_packs_artifacts",
"sys.exit",
"Tests.private_build.upload_packs_private.download_and_extract_index",
"os.remove",
"os.path.exists",
"argparse.ArgumentParser",
"os.mkdir",
"Tests.scripts.utils.logging_wrapper.critical",
"os.scandir",
"Tests.scripts.utils.log_util.install_logging",
"shutil.copy",
"Tests.private_build.upload_packs_private.update_index_with_priced_packs",
"Tests.scripts.utils.logging_wrapper.success",
"Tests.scripts.utils.logging_wrapper.exception",
"shutil.make_archive",
"datetime.datetime.utcnow",
"os.path.join",
"os.path.basename",
"shutil.rmtree",
"json.load",
"json.dump"
] | [((3268, 3310), 'os.path.basename', 'os.path.basename', (['public_index_folder_path'], {}), '(public_index_folder_path)\n', (3284, 3310), False, 'import os\n'), ((3332, 3465), 'shutil.make_archive', 'shutil.make_archive', ([], {'base_name': 'public_index_folder_path', 'format': '"""zip"""', 'root_dir': 'extract_destination_path', 'base_dir': 'index_zip_name'}), "(base_name=public_index_folder_path, format='zip',\n root_dir=extract_destination_path, base_dir=index_zip_name)\n", (3351, 3465), False, 'import shutil\n'), ((4149, 4217), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Store packs in cloud storage."""'}), "(description='Store packs in cloud storage.')\n", (4172, 4217), False, 'import argparse\n'), ((7524, 7549), 'os.remove', 'os.remove', (['LOCK_FILE_PATH'], {}), '(LOCK_FILE_PATH)\n', (7533, 7549), False, 'import os\n'), ((7760, 7823), 'os.path.join', 'os.path.join', (['extracted_dummy_index_path', '"""index"""', '"""index.json"""'], {}), "(extracted_dummy_index_path, 'index', 'index.json')\n", (7772, 7823), False, 'import os\n'), ((7899, 7935), 'os.mkdir', 'os.mkdir', (['extracted_dummy_index_path'], {}), '(extracted_dummy_index_path)\n', (7907, 7935), False, 'import os\n'), ((7943, 7986), 'os.path.exists', 'os.path.exists', (['downloaded_dummy_index_path'], {}), '(downloaded_dummy_index_path)\n', (7957, 7986), False, 'import os\n'), ((8555, 8593), 'os.remove', 'os.remove', (['downloaded_dummy_index_path'], {}), '(downloaded_dummy_index_path)\n', (8564, 8593), False, 'import os\n'), ((8598, 8639), 'shutil.rmtree', 'shutil.rmtree', (['extracted_dummy_index_path'], {}), '(extracted_dummy_index_path)\n', (8611, 8639), False, 'import shutil\n'), ((8692, 8771), 'Tests.scripts.utils.log_util.install_logging', 'install_logging', (['"""prepare_public_index_for_private_testing.log"""'], {'logger': 'logging'}), "('prepare_public_index_for_private_testing.log', logger=logging)\n", (8707, 8771), False, 'from Tests.scripts.utils.log_util import install_logging\n'), ((9408, 9455), 'os.path.join', 'os.path.join', (['dummy_index_dir_path', '"""index.zip"""'], {}), "(dummy_index_dir_path, 'index.zip')\n", (9420, 9455), False, 'import os\n'), ((9484, 9530), 'os.path.join', 'os.path.join', (['dummy_index_dir_path', '"""lock.txt"""'], {}), "(dummy_index_dir_path, 'lock.txt')\n", (9496, 9530), False, 'import os\n'), ((9553, 9589), 'Tests.Marketplace.marketplace_services.init_storage_client', 'init_storage_client', (['service_account'], {}), '(service_account)\n', (9572, 9589), False, 'from Tests.Marketplace.marketplace_services import init_storage_client\n'), ((1095, 1124), 'json.load', 'json.load', (['pack_metadata_file'], {}), '(pack_metadata_file)\n', (1104, 1124), False, 'import json\n'), ((1230, 1284), 'json.dump', 'json.dump', (['pack_metadata', 'pack_metadata_file'], {'indent': '(4)'}), '(pack_metadata, pack_metadata_file, indent=4)\n', (1239, 1284), False, 'import json\n'), ((1598, 1641), 'os.path.join', 'os.path.join', (['path_to_pack', '"""metadata.json"""'], {}), "(path_to_pack, 'metadata.json')\n", (1610, 1641), False, 'import os\n'), ((2002, 2052), 'os.path.join', 'os.path.join', (['private_index_folder_path', 'pack_name'], {}), '(private_index_folder_path, pack_name)\n', (2014, 2052), False, 'import os\n'), ((2092, 2141), 'os.path.join', 'os.path.join', (['public_index_folder_path', 'pack_name'], {}), '(public_index_folder_path, pack_name)\n', (2104, 2141), False, 'import os\n'), ((2150, 2222), 'shutil.copy', 'shutil.copy', (['path_to_pack_in_private_index', 'path_to_pack_in_public_index'], {}), '(path_to_pack_in_private_index, path_to_pack_in_public_index)\n', (2161, 2222), False, 'import shutil\n'), ((3207, 3245), 'json.dump', 'json.dump', (['index', 'index_file'], {'indent': '(4)'}), '(index, index_file, indent=4)\n', (3216, 3245), False, 'import json\n'), ((3745, 3804), 'Tests.scripts.utils.logging_wrapper.success', 'logging.success', (['"""Finished uploading index.zip to storage."""'], {}), "('Finished uploading index.zip to storage.')\n", (3760, 3804), True, 'from Tests.scripts.utils import logging_wrapper as logging\n'), ((3959, 3998), 'shutil.rmtree', 'shutil.rmtree', (['public_index_folder_path'], {}), '(public_index_folder_path)\n', (3972, 3998), False, 'import shutil\n'), ((7246, 7260), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (7256, 7260), False, 'import time\n'), ((8192, 8213), 'json.load', 'json.load', (['index_file'], {}), '(index_file)\n', (8201, 8213), False, 'import json\n'), ((9895, 9966), 'Tests.private_build.upload_packs_private.extract_packs_artifacts', 'extract_packs_artifacts', (['packs_artifacts_path', 'extract_destination_path'], {}), '(packs_artifacts_path, extract_destination_path)\n', (9918, 9966), False, 'from Tests.private_build.upload_packs_private import download_and_extract_index, update_index_with_priced_packs, extract_packs_artifacts\n'), ((10024, 10123), 'Tests.private_build.upload_packs_private.download_and_extract_index', 'download_and_extract_index', (['public_storage_bucket', 'extract_public_index_path', 'storage_base_path'], {}), '(public_storage_bucket, extract_public_index_path,\n storage_base_path)\n', (10050, 10123), False, 'from Tests.private_build.upload_packs_private import download_and_extract_index, update_index_with_priced_packs, extract_packs_artifacts\n'), ((10419, 10572), 'Tests.private_build.upload_packs_private.update_index_with_priced_packs', 'update_index_with_priced_packs', (['private_storage_bucket', 'extract_destination_path', 'public_index_folder_path', 'changed_pack', '(True)', 'storage_base_path'], {}), '(private_storage_bucket,\n extract_destination_path, public_index_folder_path, changed_pack, True,\n storage_base_path)\n', (10449, 10572), False, 'from Tests.private_build.upload_packs_private import download_and_extract_index, update_index_with_priced_packs, extract_packs_artifacts\n'), ((794, 857), 'Tests.scripts.utils.logging_wrapper.exception', 'logging.exception', (['"""Error in dummy index lock context manager."""'], {}), "('Error in dummy index lock context manager.')\n", (811, 857), True, 'from Tests.scripts.utils import logging_wrapper as logging\n'), ((1413, 1449), 'os.scandir', 'os.scandir', (['public_index_folder_path'], {}), '(public_index_folder_path)\n', (1423, 1449), False, 'import os\n'), ((1857, 1894), 'os.scandir', 'os.scandir', (['private_index_folder_path'], {}), '(private_index_folder_path)\n', (1867, 1894), False, 'import os\n'), ((2868, 2920), 'os.path.join', 'os.path.join', (['public_index_folder_path', '"""index.json"""'], {}), "(public_index_folder_path, 'index.json')\n", (2880, 2920), False, 'import os\n'), ((3835, 3922), 'Tests.scripts.utils.logging_wrapper.exception', 'logging.exception', (['"""Failed in uploading index. Mismatch in index file generation."""'], {}), "(\n 'Failed in uploading index. Mismatch in index file generation.')\n", (3852, 3922), True, 'from Tests.scripts.utils import logging_wrapper as logging\n'), ((3926, 3937), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3934, 3937), False, 'import sys\n'), ((6903, 6991), 'Tests.scripts.utils.logging_wrapper.critical', 'logging.critical', (['"""Error: Failed too long to acquire lock, exceeded max wait time."""'], {}), "(\n 'Error: Failed too long to acquire lock, exceeded max wait time.')\n", (6919, 6991), True, 'from Tests.scripts.utils import logging_wrapper as logging\n'), ((6999, 7010), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7007, 7010), False, 'import sys\n'), ((7161, 7201), 'Tests.scripts.utils.logging_wrapper.info', 'logging.info', (['"""Waiting to acquire lock."""'], {}), "('Waiting to acquire lock.')\n", (7173, 7201), True, 'from Tests.scripts.utils import logging_wrapper as logging\n'), ((8001, 8042), 'zipfile.ZipFile', 'ZipFile', (['downloaded_dummy_index_path', '"""r"""'], {}), "(downloaded_dummy_index_path, 'r')\n", (8008, 8042), False, 'from zipfile import ZipFile\n'), ((3104, 3121), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (3119, 3121), False, 'from datetime import datetime\n')] |
import os
import shutil
from dataclasses import dataclass
from datetime import datetime
from typing import Dict, List, Optional
from huggingface_hub import Repository
from loguru import logger
from prettytable import PrettyTable
from .splits import TEST_SPLIT, TRAIN_SPLIT, VALID_SPLIT
from .tasks import TASKS
from .utils import BOLD_TAG, CYAN_TAG, GREEN_TAG, PURPLE_TAG, RESET_TAG, YELLOW_TAG, http_get, http_post
from .validation import validate_file
FILE_STATUS = (
"☁ Uploaded",
"⌚ Queued",
"⚙ In Progress...",
"✅ Success!",
"❌ Failed: file not found",
"❌ Failed: unsupported file type",
"❌ Failed: server error",
"❌ Invalid column mapping, please fix it and re-upload the file.",
)
JOB_STATUS = (
("⌚", "queued"),
("🚀", "start"),
("⚙", "data_munging"),
("🏃", "model_training"),
("✅", "success"),
("❌", "failed"),
)
PROJECT_STATUS = (
("✨", "Created"),
("🚀", "Data processing started"),
("✅", "Data processing successful"),
("❌", "Failed to download data files from the huggingface hub"),
("❌", "Missing 'train' or 'valid' split in data files"),
("❌", "Failed to process data files"),
("❌", "Failed to upload processed data files to the huggingface hub"),
)
SPLITS = (TRAIN_SPLIT, VALID_SPLIT, TEST_SPLIT)
@dataclass
class TrainingJob:
"""A training job in AutoNLP"""
job_id: int
status: str
status_emoji: str
created_at: datetime
updated_at: datetime
@classmethod
def from_json_resp(cls, json_resp: dict):
return cls(
job_id=json_resp["id"],
status_emoji=JOB_STATUS[json_resp["status"] - 1][0],
status=JOB_STATUS[json_resp["status"] - 1][1],
created_at=datetime.fromisoformat(json_resp["created_at"]),
updated_at=datetime.fromisoformat(json_resp["updated_at"]),
)
def __str__(self):
return "\n".join(
[
f"📚 Model # {self.job_id}",
f" • {BOLD_TAG}Status{RESET_TAG}: {self.status_emoji} {self.status}",
f" • {BOLD_TAG}Created at{RESET_TAG}: {self.created_at.strftime('%Y-%m-%d %H:%M Z')}",
f" • {BOLD_TAG}Last update{RESET_TAG}: {self.updated_at.strftime('%Y-%m-%d %H:%M Z')}",
]
)
@dataclass
class UploadedFile:
"""A file uploaded to an AutoNLP project"""
file_id: int
filename: str
processing_status: str
split: str
col_mapping: Dict[str, str]
created_at: datetime
updated_at: datetime
@classmethod
def from_json_resp(cls, json_resp: dict):
return cls(
file_id=json_resp["data_file_id"],
filename=json_resp["fname"],
processing_status=FILE_STATUS[json_resp["download_status"] - 1],
split=SPLITS[json_resp["split"] - 1],
col_mapping=json_resp["col_mapping"],
created_at=datetime.fromisoformat(json_resp["created_at"]),
updated_at=datetime.fromisoformat(json_resp["updated_at"]),
)
def __str__(self):
return "\n".join(
[
f"📁 {CYAN_TAG}{self.filename}{RESET_TAG} (id # {self.file_id})",
f" • {BOLD_TAG}Split{RESET_TAG}: {self.split}",
f" • {BOLD_TAG}Processing status{RESET_TAG}: {self.processing_status}",
f" • {BOLD_TAG}Last update{RESET_TAG}: {self.updated_at.strftime('%Y-%m-%d %H:%M Z')}",
]
)
@dataclass
class Project:
"""An AutoNLP project"""
_token: str
proj_id: int
name: str
user: str
task: str
status_emoji: str
status: str
language: str
created_at: datetime
updated_at: datetime
dataset_id: str
files: Optional[List[UploadedFile]] = None
training_jobs: Optional[List] = None
@classmethod
def from_json_resp(cls, json_resp: dict, token: str):
"""Build a Project from the API response, JSON-encoded"""
return cls(
proj_id=json_resp["id"],
name=json_resp["proj_name"],
user=json_resp["username"],
task=list(filter(lambda key: TASKS[key] == json_resp["task"], TASKS.keys()))[0],
status_emoji=PROJECT_STATUS[json_resp["status"] - 1][0],
status=PROJECT_STATUS[json_resp["status"] - 1][1],
created_at=datetime.fromisoformat(json_resp["created_at"]),
updated_at=datetime.fromisoformat(json_resp["updated_at"]),
dataset_id=json_resp["dataset_id"],
language=json_resp["config"]["language"],
_token=token,
)
def refresh(self):
"""Update information about uploaded files and models attached to the project"""
logger.info("🔄 Refreshing uploaded files information...")
resp = http_get(path=f"/projects/{self.proj_id}/data", token=self._token)
json_files = resp.json()
self.files = [UploadedFile.from_json_resp(file) for file in json_files]
logger.info("🔄 Refreshing models information...")
resp = http_get(path=f"/projects/{self.proj_id}/jobs", token=self._token)
json_jobs = resp.json()
self.training_jobs = [TrainingJob.from_json_resp(job) for job in json_jobs]
def upload(self, filepaths: List[str], split: str, col_mapping: Dict[str, str]):
"""Uploads files to the project"""
local_dataset_dir = os.path.expanduser(f"~/.huggingface/autonlp/projects/{self.dataset_id}")
if os.path.exists(local_dataset_dir):
if os.path.isdir(os.path.join(local_dataset_dir, "git")):
clone_from = None
else:
shutil.rmtree(local_dataset_dir)
clone_from = "https://huggingface.co/datasets/" + self.dataset_id
else:
clone_from = "https://huggingface.co/datasets/" + self.dataset_id
dataset_repo = Repository(
local_dir=local_dataset_dir,
clone_from=clone_from,
use_auth_token=self._token,
)
dataset_repo.git_pull()
for idx, file_path in enumerate(filepaths):
if not os.path.isfile(file_path):
logger.error(f"[{idx + 1}/{len(filepaths)}] ❌ '{file_path}' does not exist or is not a file!")
continue
file_name = os.path.basename(file_path)
file_extension = file_name.split(".")[-1]
src = os.path.expanduser(file_path)
dst = os.path.join(local_dataset_dir, "raw", file_name)
logger.info(f"[{idx + 1}/{len(filepaths)}] 📦 Copying {src} to {dst}...")
os.makedirs(os.path.dirname(dst), exist_ok=True)
shutil.copyfile(src, dst)
logger.info(f"[{idx + 1}/{len(filepaths)}] 🔎 Validating {dst} and column mapping...")
validate_file(path=dst, task=self.task, file_ext=file_extension, col_mapping=col_mapping)
dataset_repo.lfs_track(patterns=[f"raw/*.{file_extension}"])
dataset_repo.git_pull()
try:
logger.info("☁ Uploading files to the dataset hub...")
dataset_repo.push_to_hub(commit_message="Upload from AutoNLP CLI")
logger.info("✅ Successfully uploaded the files!")
except OSError as err:
if "nothing to commit, working tree clean" in err.args[0]:
logger.info("❔ Files did not change since last upload!")
dataset_repo.git_push()
return
logger.error("❌ Something went wrong when uploading the files!")
raise
for idx, file_path in enumerate(filepaths):
file_name = os.path.basename(file_path)
logger.info(f"[{idx + 1}/{len(filepaths)}] 📁 Registering file {file_name} into project '{file_name}'...")
payload = {
"split": split,
"col_mapping": col_mapping,
"data_files": [{"fname": file_name, "username": self.user}],
}
http_post(path=f"/projects/{self.proj_id}/data/add", payload=payload, token=self._token)
logger.info(f"[{idx + 1}/{len(filepaths)}] ✅ Success!")
def train(self):
"""Starts training on the models"""
http_get(path=f"/projects/{self.proj_id}/data/start_process", token=self._token)
logger.info("🔥🔥 Training started!")
def __str__(self):
header = "\n".join(
[
f"AutoNLP Project (id # {self.proj_id})",
"~" * 35,
f" • {BOLD_TAG}Name{RESET_TAG}: {PURPLE_TAG}{self.name}{RESET_TAG}",
f" • {BOLD_TAG}Owner{RESET_TAG}: {GREEN_TAG}{self.user}{RESET_TAG}",
f" • {BOLD_TAG}Status{RESET_TAG}: {BOLD_TAG}{self.status_emoji} {self.status}{RESET_TAG}",
f" • {BOLD_TAG}Task{RESET_TAG}: {YELLOW_TAG}{self.task.title().replace('_', ' ')}{RESET_TAG}",
f" • {BOLD_TAG}Created at{RESET_TAG}: {self.created_at.strftime('%Y-%m-%d %H:%M Z')}",
f" • {BOLD_TAG}Last update{RESET_TAG}: {self.updated_at.strftime('%Y-%m-%d %H:%M Z')}",
"",
]
)
printout = [header]
# Uploaded files information
if self.files is None:
descriptions = ["❓ Files information unknown, update the project"]
else:
if len(self.files) == 0:
descriptions = ["🤷 No files uploaded yet!"]
else:
sorted_files = sorted(self.files, key=lambda file: file.split) # Sort by split
descriptions = [str(file) for file in sorted_files]
printout.append(
"\n".join(
[
"~" * 14 + f" {BOLD_TAG}Files{RESET_TAG} " + "~" * 14,
"",
"Dataset ID:",
f"{CYAN_TAG}{self.dataset_id}{RESET_TAG}",
"",
]
+ descriptions
)
)
# Training jobs information
if self.training_jobs is None:
jobs_str = "❓ Models information unknown, update the project"
else:
if len(self.training_jobs) == 0:
jobs_str = "🤷 No train jobs started yet!"
else:
model_table = PrettyTable(["", "ID", "Status", "Creation date", "Last update"])
for job in sorted(self.training_jobs, key=lambda job: job.job_id):
model_table.add_row(
[
job.status_emoji,
job.job_id,
job.status,
job.created_at.strftime("%Y-%m-%d %H:%M Z"),
job.updated_at.strftime("%Y-%m-%d %H:%M Z"),
]
)
jobs_str = str(model_table)
printout.append("\n".join(["", "~" * 12 + f" {BOLD_TAG}Models{RESET_TAG} " + "~" * 11, "", jobs_str]))
return "\n".join(printout)
| [
"prettytable.PrettyTable",
"os.path.exists",
"loguru.logger.info",
"os.path.join",
"huggingface_hub.Repository",
"os.path.isfile",
"os.path.dirname",
"shutil.copyfile",
"loguru.logger.error",
"os.path.basename",
"datetime.datetime.fromisoformat",
"shutil.rmtree",
"os.path.expanduser"
] | [((4759, 4816), 'loguru.logger.info', 'logger.info', (['"""🔄 Refreshing uploaded files information..."""'], {}), "('🔄 Refreshing uploaded files information...')\n", (4770, 4816), False, 'from loguru import logger\n'), ((5021, 5070), 'loguru.logger.info', 'logger.info', (['"""🔄 Refreshing models information..."""'], {}), "('🔄 Refreshing models information...')\n", (5032, 5070), False, 'from loguru import logger\n'), ((5426, 5498), 'os.path.expanduser', 'os.path.expanduser', (['f"""~/.huggingface/autonlp/projects/{self.dataset_id}"""'], {}), "(f'~/.huggingface/autonlp/projects/{self.dataset_id}')\n", (5444, 5498), False, 'import os\n'), ((5510, 5543), 'os.path.exists', 'os.path.exists', (['local_dataset_dir'], {}), '(local_dataset_dir)\n', (5524, 5543), False, 'import os\n'), ((5913, 6007), 'huggingface_hub.Repository', 'Repository', ([], {'local_dir': 'local_dataset_dir', 'clone_from': 'clone_from', 'use_auth_token': 'self._token'}), '(local_dir=local_dataset_dir, clone_from=clone_from,\n use_auth_token=self._token)\n', (5923, 6007), False, 'from huggingface_hub import Repository\n'), ((8334, 8369), 'loguru.logger.info', 'logger.info', (['"""🔥🔥 Training started!"""'], {}), "('🔥🔥 Training started!')\n", (8345, 8369), False, 'from loguru import logger\n'), ((6342, 6369), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (6358, 6369), False, 'import os\n'), ((6442, 6471), 'os.path.expanduser', 'os.path.expanduser', (['file_path'], {}), '(file_path)\n', (6460, 6471), False, 'import os\n'), ((6490, 6539), 'os.path.join', 'os.path.join', (['local_dataset_dir', '"""raw"""', 'file_name'], {}), "(local_dataset_dir, 'raw', file_name)\n", (6502, 6539), False, 'import os\n'), ((6698, 6723), 'shutil.copyfile', 'shutil.copyfile', (['src', 'dst'], {}), '(src, dst)\n', (6713, 6723), False, 'import shutil\n'), ((7058, 7112), 'loguru.logger.info', 'logger.info', (['"""☁ Uploading files to the dataset hub..."""'], {}), "('☁ Uploading files to the dataset hub...')\n", (7069, 7112), False, 'from loguru import logger\n'), ((7204, 7254), 'loguru.logger.info', 'logger.info', (['"""✅ Successfully uploaded the files!"""'], {}), "('✅ Successfully uploaded the files!')\n", (7215, 7254), False, 'from loguru import logger\n'), ((7665, 7692), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (7681, 7692), False, 'import os\n'), ((1743, 1790), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["json_resp['created_at']"], {}), "(json_resp['created_at'])\n", (1765, 1790), False, 'from datetime import datetime\n'), ((1815, 1862), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["json_resp['updated_at']"], {}), "(json_resp['updated_at'])\n", (1837, 1862), False, 'from datetime import datetime\n'), ((2924, 2971), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["json_resp['created_at']"], {}), "(json_resp['created_at'])\n", (2946, 2971), False, 'from datetime import datetime\n'), ((2996, 3043), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["json_resp['updated_at']"], {}), "(json_resp['updated_at'])\n", (3018, 3043), False, 'from datetime import datetime\n'), ((4379, 4426), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["json_resp['created_at']"], {}), "(json_resp['created_at'])\n", (4401, 4426), False, 'from datetime import datetime\n'), ((4451, 4498), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (["json_resp['updated_at']"], {}), "(json_resp['updated_at'])\n", (4473, 4498), False, 'from datetime import datetime\n'), ((5574, 5612), 'os.path.join', 'os.path.join', (['local_dataset_dir', '"""git"""'], {}), "(local_dataset_dir, 'git')\n", (5586, 5612), False, 'import os\n'), ((5683, 5715), 'shutil.rmtree', 'shutil.rmtree', (['local_dataset_dir'], {}), '(local_dataset_dir)\n', (5696, 5715), False, 'import shutil\n'), ((6155, 6180), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (6169, 6180), False, 'import os\n'), ((6649, 6669), 'os.path.dirname', 'os.path.dirname', (['dst'], {}), '(dst)\n', (6664, 6669), False, 'import os\n'), ((7505, 7569), 'loguru.logger.error', 'logger.error', (['"""❌ Something went wrong when uploading the files!"""'], {}), "('❌ Something went wrong when uploading the files!')\n", (7517, 7569), False, 'from loguru import logger\n'), ((10329, 10394), 'prettytable.PrettyTable', 'PrettyTable', (["['', 'ID', 'Status', 'Creation date', 'Last update']"], {}), "(['', 'ID', 'Status', 'Creation date', 'Last update'])\n", (10340, 10394), False, 'from prettytable import PrettyTable\n'), ((7373, 7429), 'loguru.logger.info', 'logger.info', (['"""❔ Files did not change since last upload!"""'], {}), "('❔ Files did not change since last upload!')\n", (7384, 7429), False, 'from loguru import logger\n')] |
r"""Training and evaluating quantum kernels
===========================================
.. meta::
:property="og:description": Kernels and alignment training with Pennylane.
:property="og:image": https://pennylane.ai/qml/_images/QEK_thumbnail.png
.. related::
tutorial_kernel_based_training Kernel-based training with scikit-learn
tutorial_data_reuploading_classifier Classification with data reuploading
*Authors: <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. Posted: 24 June 2021*
Kernel methods are one of the cornerstones of classical machine learning.
Here we are concerned with kernels that can be evaluated on quantum computers,
*quantum kernels* for short.
In this tutorial you will learn how to evaluate kernels, use them for classification
and train them with gradient-based optimization, and all that using the
functionality of PennyLane's
`kernels module <https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html>`__.
The demo is based on Ref. [#Training_QEKs]_, a project from Xanadu's own
`QHack <https://qhack.ai/>`__ hackathon.
What are kernel methods?
------------------------
To understand what a kernel method does, let's first revisit
one of the simplest methods to assign binary labels to datapoints:
linear classification.
Imagine we want to discern two different classes of points that lie in
different corners of the plane. A linear classifier corresponds to
drawing a line and assigning different labels to the regions on opposing
sides of the line:
.. figure:: ../demonstrations/kernels_module/linear_classification.png
:align: center
:width: 30%
We can mathematically formalize this by assigning the label :math:`y`
via
.. math::
y(\boldsymbol{x}) = \operatorname{sgn}(\langle \boldsymbol{w}, \boldsymbol{x}\rangle + b).
The vector :math:`\boldsymbol{w}` points perpendicular to the line and
thus determine its slope. The independent term :math:`b` specifies the
position on the plane. In this form, linear classification can also be
extended to higher dimensional vectors :math:`\boldsymbol{x}`, where a
line does not divide the entire space into two regions anymore. Instead
one needs a *hyperplane*. It is immediately clear that this method is
not very powerful, as datasets that are not separable by a hyperplane
can't be classified without error.
We can actually sneak around this limitation by performing a neat trick:
if we define some map :math:`\phi(\boldsymbol{x})` that *embeds* our
datapoints into a larger *feature space* and then perform linear
classification there, we could actually realise non-linear
classification in our original space!
.. figure:: ../demonstrations/kernels_module/embedding_nonlinear_classification.png
:align: center
:width: 65%
If we go back to the expression for our prediction and include the
embedding, we get
.. math::
y(\boldsymbol{x}) = \operatorname{sgn}(\langle \boldsymbol{w}, \phi(\boldsymbol{x})\rangle + b).
We will forgo one tiny step, but it can be shown that for the purpose
of optimal classification, we can choose the vector defining the
decision boundary as a linear combination of the embedded datapoints
:math:`\boldsymbol{w} = \sum_i \alpha_i \phi(\boldsymbol{x}_i)`. Putting
this into the formula yields
.. math::
y(\boldsymbol{x}) = \operatorname{sgn}\left(\sum_i \alpha_i \langle \phi(\boldsymbol{x}_i), \phi(\boldsymbol{x})\rangle + b\right).
This rewriting might not seem useful at first, but notice the above
formula only contains inner products between vectors in the embedding
space:
.. math::
k(\boldsymbol{x}_i, \boldsymbol{x}_j) = \langle \phi(\boldsymbol{x}_i), \phi(\boldsymbol{x}_j)\rangle.
We call this function the *kernel*. It provides the advantage that we can often
find an explicit formula for the kernel :math:`k` that makes it
superfluous to actually perform the (potentially expensive) embedding
:math:`\phi`. Consider for example the following embedding and the
associated kernel:
.. math::
\phi((x_1, x_2)) &= (x_1^2, \sqrt{2} x_1 x_2, x_2^2) \\
k(\boldsymbol{x}, \boldsymbol{y}) &= x_1^2 y_1^2 + 2 x_1 x_2 y_1 y_2 + x_2^2 y_2^2 = \langle \boldsymbol{x}, \boldsymbol{y} \rangle^2.
This means by just replacing the regular scalar product in our linear
classification with the map :math:`k`, we can actually express much more
intricate decision boundaries!
This is very important, because in many interesting cases the embedding :math:`\phi`
will be much costlier to compute than the kernel :math:`k`.
In this demo, we will explore one particular kind of kernel
that can be realized on near-term quantum computers, namely *Quantum
Embedding Kernels (QEKs)*. These are kernels that arise from embedding
data into the space of quantum states. We formalize this by considering
a parameterised quantum circuit :math:`U(\boldsymbol{x})` that maps
a datapoint :math:`\boldsymbol{x}` to the state
.. math::
|\psi(\boldsymbol{x})\rangle = U(\boldsymbol{x}) |0 \rangle.
The kernel value is then given by the *overlap* of the associated
embedded quantum states
.. math::
k(\boldsymbol{x}_i, \boldsymbol{x}_j) = | \langle\psi(\boldsymbol{x}_i)|\psi(\boldsymbol{x}_j)\rangle|^2.
"""
##############################################################################
# A toy problem
# -------------
# In this demo, we will treat a toy problem that showcases the
# inner workings of classification with quantum embedding kernels,
# training variational embedding kernels and the available functionalities
# to do both in PennyLane. We of course need to start with some imports:
from pennylane import numpy as np
import matplotlib as mpl
np.random.seed(1359)
##############################################################################
# And we proceed right away to create a dataset to work with, the
# ``DoubleCake`` dataset. Firstly, we define two functions to enable us to
# generate the data.
# The details of these functions are not essential for understanding the demo,
# so don't mind them if they are confusing.
def _make_circular_data(num_sectors):
"""Generate datapoints arranged in an even circle."""
center_indices = np.array(range(0, num_sectors))
sector_angle = 2 * np.pi / num_sectors
angles = (center_indices + 0.5) * sector_angle
x = 0.7 * np.cos(angles)
y = 0.7 * np.sin(angles)
labels = 2 * np.remainder(np.floor_divide(angles, sector_angle), 2) - 1
return x, y, labels
def make_double_cake_data(num_sectors):
x1, y1, labels1 = _make_circular_data(num_sectors)
x2, y2, labels2 = _make_circular_data(num_sectors)
# x and y coordinates of the datapoints
x = np.hstack([x1, 0.5 * x2])
y = np.hstack([y1, 0.5 * y2])
# Canonical form of dataset
X = np.vstack([x, y]).T
labels = np.hstack([labels1, -1 * labels2])
# Canonical form of labels
Y = labels.astype(int)
return X, Y
##############################################################################
# Next, we define a function to help plot the ``DoubleCake`` data:
def plot_double_cake_data(X, Y, ax, num_sectors=None):
"""Plot double cake data and corresponding sectors."""
x, y = X.T
cmap = mpl.colors.ListedColormap(["#FF0000", "#0000FF"])
ax.scatter(x, y, c=Y, cmap=cmap, s=25, marker="s")
if num_sectors is not None:
sector_angle = 360 / num_sectors
for i in range(num_sectors):
color = ["#FF0000", "#0000FF"][(i % 2)]
other_color = ["#FF0000", "#0000FF"][((i + 1) % 2)]
ax.add_artist(
mpl.patches.Wedge(
(0, 0),
1,
i * sector_angle,
(i + 1) * sector_angle,
lw=0,
color=color,
alpha=0.1,
width=0.5,
)
)
ax.add_artist(
mpl.patches.Wedge(
(0, 0),
0.5,
i * sector_angle,
(i + 1) * sector_angle,
lw=0,
color=other_color,
alpha=0.1,
)
)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_aspect("equal")
ax.axis("off")
return ax
##############################################################################
# Let's now have a look at our dataset. In our example, we will work with
# 3 sectors:
import matplotlib.pyplot as plt
num_sectors = 3
X, Y = make_double_cake_data(num_sectors)
ax = plot_double_cake_data(X, Y, plt.gca(), num_sectors=num_sectors)
##############################################################################
# Defining a Quantum Embedding Kernel
# -----------------------------------
# PennyLane's `kernels module <https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html>`__
# allows for a particularly simple
# implementation of Quantum Embedding Kernels. The first ingredient we
# need for this is an *ansatz*, which we will construct by repeating a
# layer as building block. Let's start by defining this layer:
import pennylane as qml
def layer(x, params, wires, i0=0, inc=1):
"""Building block of the embedding ansatz"""
i = i0
for j, wire in enumerate(wires):
qml.Hadamard(wires=[wire])
qml.RZ(x[i % len(x)], wires=[wire])
i += inc
qml.RY(params[0, j], wires=[wire])
qml.broadcast(unitary=qml.CRZ, pattern="ring", wires=wires, parameters=params[1])
##############################################################################
# To construct the ansatz, this layer is repeated multiple times, reusing
# the datapoint ``x`` but feeding different variational
# parameters ``params`` into each of them.
# Together, the datapoint and the variational parameters fully determine
# the embedding ansatz :math:`U(\boldsymbol{x})`.
# In order to construct the full kernel circuit, we also require its adjoint
# :math:`U(\boldsymbol{x})^\dagger`, which we can obtain via ``qml.adjoint``.
def ansatz(x, params, wires):
"""The embedding ansatz"""
for j, layer_params in enumerate(params):
layer(x, layer_params, wires, i0=j * len(wires))
adjoint_ansatz = qml.adjoint(ansatz)
def random_params(num_wires, num_layers):
"""Generate random variational parameters in the shape for the ansatz."""
return np.random.uniform(0, 2 * np.pi, (num_layers, 2, num_wires), requires_grad=True)
##############################################################################
# Together with the ansatz we only need a device to run the quantum circuit on.
# For the purpose of this tutorial we will use PennyLane's ``default.qubit``
# device with 5 wires in analytic mode.
dev = qml.device("default.qubit", wires=5, shots=None)
wires = dev.wires.tolist()
##############################################################################
# Let us now define the quantum circuit that realizes the kernel. We will compute
# the overlap of the quantum states by first applying the embedding of the first
# datapoint and then the adjoint of the embedding of the second datapoint. We
# finally extract the probabilities of observing each basis state.
@qml.qnode(dev)
def kernel_circuit(x1, x2, params):
ansatz(x1, params, wires=wires)
adjoint_ansatz(x2, params, wires=wires)
return qml.probs(wires=wires)
##############################################################################
# The kernel function itself is now obtained by looking at the probability
# of observing the all-zero state at the end of the kernel circuit -- because
# of the ordering in ``qml.probs``, this is the first entry:
def kernel(x1, x2, params):
return kernel_circuit(x1, x2, params)[0]
##############################################################################
#
# .. note::
# An alternative way to set up the kernel circuit in PennyLane would be
# to use the observable type
# `Projector <https://pennylane.readthedocs.io/en/latest/code/api/pennylane.Projector.html>`__.
# This is shown in the
# `demo on kernel-based training of quantum models <https://pennylane.ai/qml/demos/tutorial_kernel_based_training.html>`__, where you will also find more
# background information on the kernel circuit structure itself.
#
# Before focusing on the kernel values we have to provide values for the
# variational parameters. At this point we fix the number of layers in the
# ansatz circuit to :math:`6`.
init_params = random_params(num_wires=5, num_layers=6)
##############################################################################
# Now we can have a look at the kernel value between the first and the
# second datapoint:
kernel_value = kernel(X[0], X[1], init_params)
print(f"The kernel value between the first and second datapoint is {kernel_value:.3f}")
##############################################################################
# The mutual kernel values between all elements of the dataset form the
# *kernel matrix*. We can inspect it via the ``qml.kernels.square_kernel_matrix``
# method, which makes use of symmetry of the kernel,
# :math:`k(\boldsymbol{x}_i,\boldsymbol{x}_j) = k(\boldsymbol{x}_j, \boldsymbol{x}_i)`.
# In addition, the option ``assume_normalized_kernel=True`` ensures that we do not
# calculate the entries between the same datapoints, as we know them to be 1
# for our noiseless simulation. Overall this means that we compute
# :math:`\frac{1}{2}(N^2-N)` kernel values for :math:`N` datapoints.
# To include the variational parameters, we construct a ``lambda`` function that
# fixes them to the values we sampled above.
init_kernel = lambda x1, x2: kernel(x1, x2, init_params)
K_init = qml.kernels.square_kernel_matrix(X, init_kernel, assume_normalized_kernel=True)
with np.printoptions(precision=3, suppress=True):
print(K_init)
##############################################################################
# Using the Quantum Embedding Kernel for predictions
# --------------------------------------------------
# The quantum kernel alone can not be used to make predictions on a
# dataset, becaues it is essentially just a tool to measure the similarity
# between two datapoints. To perform an actual prediction we will make use
# of scikit-learn's Support Vector Classifier (SVC).
from sklearn.svm import SVC
##############################################################################
# To construct the SVM, we need to supply ``sklearn.svm.SVC`` with a function
# that takes two sets of datapoints and returns the associated kernel matrix.
# We can make use of the function ``qml.kernels.kernel_matrix`` that provides
# this functionality. It expects the kernel to not have additional parameters
# besides the datapoints, which is why we again supply the variational
# parameters via the ``lambda`` function from above.
# Once we have this, we can let scikit-learn adjust the SVM from our Quantum
# Embedding Kernel.
#
# .. note::
# This step does *not* modify the variational parameters in our circuit
# ansatz. What it does is solving a different optimization task for the
# :math:`\alpha` and :math:`b` vectors we introduced in the beginning.
svm = SVC(kernel=lambda X1, X2: qml.kernels.kernel_matrix(X1, X2, init_kernel)).fit(X, Y)
##############################################################################
# To see how well our classifier performs we will measure which percentage
# of the dataset it classifies correctly.
def accuracy(classifier, X, Y_target):
return 1 - np.count_nonzero(classifier.predict(X) - Y_target) / len(Y_target)
accuracy_init = accuracy(svm, X, Y)
print(f"The accuracy of the kernel with random parameters is {accuracy_init:.3f}")
##############################################################################
# We are also interested in seeing what the decision boundaries in this
# classification look like. This could help us spotting overfitting issues
# visually in more complex data sets. To this end we will introduce a
# second helper method.
def plot_decision_boundaries(classifier, ax, N_gridpoints=14):
_xx, _yy = np.meshgrid(np.linspace(-1, 1, N_gridpoints), np.linspace(-1, 1, N_gridpoints))
_zz = np.zeros_like(_xx)
for idx in np.ndindex(*_xx.shape):
_zz[idx] = classifier.predict(np.array([_xx[idx], _yy[idx]])[np.newaxis, :])
plot_data = {"_xx": _xx, "_yy": _yy, "_zz": _zz}
ax.contourf(
_xx,
_yy,
_zz,
cmap=mpl.colors.ListedColormap(["#FF0000", "#0000FF"]),
alpha=0.2,
levels=[-1, 0, 1],
)
plot_double_cake_data(X, Y, ax)
return plot_data
##############################################################################
# With that done, let's have a look at the decision boundaries for our
# initial classifier:
init_plot_data = plot_decision_boundaries(svm, plt.gca())
##############################################################################
# We see the outer points in the dataset can be correctly classified, but
# we still struggle with the inner circle. But remember we have a circuit
# with many free parameters! It is reasonable to believe we can give
# values to those variational parameters which improve the overall accuracy
# of our SVC.
#
# Training the Quantum Embedding Kernel
# -------------------------------------
#
# To be able to train the Quantum Embedding Kernel we need some measure of
# how well it fits the dataset in question. Performing an exhaustive
# search in parameter space is not a good solution because it is very
# resource intensive, and since the accuracy is a discrete quantity we
# would not be able to detect small improvements.
#
# We can, however, resort to a more specialized measure, the
# *kernel-target alignment* [#Alignment]_. The kernel-target alignment compares the
# similarity predicted by the quantum kernel to the actual labels of the
# training data. It is based on *kernel alignment*, a similiarity measure
# between two kernels with given kernel matrices :math:`K_1` and
# :math:`K_2`:
#
# .. math::
# \operatorname{KA}(K_1, K_2) = \frac{\operatorname{Tr}(K_1 K_2)}{\sqrt{\operatorname{Tr}(K_1^2)\operatorname{Tr}(K_2^2)}}.
#
# .. note::
# Seen from a more theoretical side, :math:`\operatorname{KA}`
# is nothing else than the cosine of the angle between the kernel
# matrices :math:`K_1` and :math:`K_2` if we see them as vectors
# in the space of matrices with the Hilbert-Schmidt (or
# Frobenius) scalar product
# :math:`\langle A, B \rangle = \operatorname{Tr}(A^T B)`. This
# reinforces the geometric picture of how this measure relates
# to objects, namely two kernels, being aligned in a vector space.
#
# The training data enters the picture by defining an *ideal* kernel
# function that expresses the original labelling in the vector
# :math:`\boldsymbol{y}` by assigning to two datapoints the product
# of the corresponding labels:
#
# .. math::
# k_{\boldsymbol{y}}(\boldsymbol{x}_i, \boldsymbol{x}_j) = y_i y_j.
#
# The assigned kernel is thus :math:`+1` if both datapoints lie in the
# same class and :math:`-1` otherwise and its kernel matrix is simply
# given by the outer product :math:`\boldsymbol{y}\boldsymbol{y}^T`.
# The kernel-target alignment is then defined as the kernel alignment
# of the kernel matrix :math:`K` generated by the
# quantum kernel and :math:`\boldsymbol{y}\boldsymbol{y}^T`:
#
# .. math::
# \operatorname{KTA}_{\boldsymbol{y}}(K)
# = \frac{\operatorname{Tr}(K \boldsymbol{y}\boldsymbol{y}^T)}{\sqrt{\operatorname{Tr}(K^2)\operatorname{Tr}((\boldsymbol{y}\boldsymbol{y}^T)^2)}}
# = \frac{\boldsymbol{y}^T K \boldsymbol{y}}{\sqrt{\operatorname{Tr}(K^2)} N}
#
# where :math:`N` is the number of elements in :math:`\boldsymbol{y}`,
# that is the number of datapoints in the dataset.
#
# In summary, the kernel-target alignment effectively captures how well
# the kernel you chose reproduces the actual similarities of the data. It
# does have one drawback, however: having a high kernel-target alignment
# is only a necessary but not a sufficient condition for a good
# performance of the kernel [#Alignment]_. This means having good alignment is
# guaranteed for good performance, but optimal alignment will not always
# bring optimal training accuracy with it.
#
# Let's now come back to the actual implementation. PennyLane's
# ``kernels`` module allows you to easily evaluate the kernel
# target alignment:
kta_init = qml.kernels.target_alignment(X, Y, init_kernel, assume_normalized_kernel=True)
print(f"The kernel-target alignment for our dataset and random parameters is {kta_init:.3f}")
##############################################################################
# Now let's code up an optimization loop and improve the kernel-target alignment!
#
# We will make use of regular gradient descent optimization. To speed up
# the optimization we will not use the entire training set to compute
# :math:`\operatorname{KTA}` but rather
# sample smaller subsets of the data at each step, we choose :math:`4`
# datapoints at random. Remember that PennyLane's built-in optimizer works
# to *minimize* the cost function that is given to it, which is why we
# have to multiply the kernel target alignment by :math:`-1` to actually
# *maximize* it in the process.
#
# .. note::
# Currently, the function ``qml.kernels.target_alignment`` is not
# differentiable yet, making it unfit for gradient descent optimization.
# We therefore first define a differentiable version of this function.
def target_alignment(
X,
Y,
kernel,
assume_normalized_kernel=False,
rescale_class_labels=True,
):
"""Kernel-target alignment between kernel and labels."""
K = qml.kernels.square_kernel_matrix(
X,
kernel,
assume_normalized_kernel=assume_normalized_kernel,
)
if rescale_class_labels:
nplus = np.count_nonzero(np.array(Y) == 1)
nminus = len(Y) - nplus
_Y = np.array([y / nplus if y == 1 else y / nminus for y in Y])
else:
_Y = np.array(Y)
T = np.outer(_Y, _Y)
inner_product = np.sum(K * T)
norm = np.sqrt(np.sum(K * K) * np.sum(T * T))
inner_product = inner_product / norm
return inner_product
params = init_params
opt = qml.GradientDescentOptimizer(0.2)
for i in range(500):
# Choose subset of datapoints to compute the KTA on.
subset = np.random.choice(list(range(len(X))), 4)
# Define the cost function for optimization
cost = lambda _params: -target_alignment(
X[subset],
Y[subset],
lambda x1, x2: kernel(x1, x2, _params),
assume_normalized_kernel=True,
)
# Optimization step
params = opt.step(cost, params)
# Report the alignment on the full dataset every 50 steps.
if (i + 1) % 50 == 0:
current_alignment = target_alignment(
X,
Y,
lambda x1, x2: kernel(x1, x2, params),
assume_normalized_kernel=True,
)
print(f"Step {i+1} - Alignment = {current_alignment:.3f}")
##############################################################################
# We want to assess the impact of training the parameters of the quantum
# kernel. Thus, let's build a second support vector classifier with the
# trained kernel:
# First create a kernel with the trained parameter baked into it.
trained_kernel = lambda x1, x2: kernel(x1, x2, params)
# Second create a kernel matrix function using the trained kernel.
trained_kernel_matrix = lambda X1, X2: qml.kernels.kernel_matrix(X1, X2, trained_kernel)
# Note that SVC expects the kernel argument to be a kernel matrix function.
svm_trained = SVC(kernel=trained_kernel_matrix).fit(X, Y)
##############################################################################
# We expect to see an accuracy improvement vs. the SVM with random
# parameters:
accuracy_trained = accuracy(svm_trained, X, Y)
print(f"The accuracy of a kernel with trained parameters is {accuracy_trained:.3f}")
##############################################################################
# We have now achieved perfect classification! 🎆
#
# Following on the results that SVM's have proven good generalisation
# behavior, it will be interesting to inspect the decision boundaries of
# our classifier:
trained_plot_data = plot_decision_boundaries(svm_trained, plt.gca())
##############################################################################
# Indeed, we see that now not only every data instance falls within the
# correct class, but also that there are no strong artifacts that would make us
# distrust the model. In this sense, our approach benefits from both: on
# one hand it can adjust itself to the dataset, and on the other hand
# is not expected to suffer from bad generalisation.
#
# References
# ----------
#
# .. [#Training_QEKs]
#
# <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, and <NAME>.
# "Training Quantum Embedding Kernels on Near-Term Quantum Computers."
# `arXiv:2105.02276 <https://arxiv.org/abs/2105.02276>`__, 2021.
#
# .. [#Alignment]
#
# <NAME>, <NAME>, and <NAME>.
# "An overview of kernel alignment and its applications."
# `Artificial Intelligence Review 43.2: 179-192 <https://link.springer.com/article/10.1007/s10462-012-9369-4>`__, 2015.
| [
"pennylane.broadcast",
"pennylane.numpy.ndindex",
"pennylane.device",
"pennylane.numpy.zeros_like",
"pennylane.GradientDescentOptimizer",
"pennylane.numpy.array",
"pennylane.numpy.printoptions",
"pennylane.numpy.sin",
"pennylane.numpy.sum",
"pennylane.numpy.vstack",
"pennylane.qnode",
"matplotlib.patches.Wedge",
"matplotlib.colors.ListedColormap",
"pennylane.kernels.square_kernel_matrix",
"pennylane.numpy.random.seed",
"pennylane.numpy.floor_divide",
"pennylane.numpy.cos",
"pennylane.kernels.kernel_matrix",
"pennylane.adjoint",
"matplotlib.pyplot.gca",
"pennylane.numpy.linspace",
"pennylane.numpy.outer",
"pennylane.RY",
"pennylane.kernels.target_alignment",
"sklearn.svm.SVC",
"pennylane.numpy.random.uniform",
"pennylane.probs",
"pennylane.Hadamard",
"pennylane.numpy.hstack"
] | [((5625, 5645), 'pennylane.numpy.random.seed', 'np.random.seed', (['(1359)'], {}), '(1359)\n', (5639, 5645), True, 'from pennylane import numpy as np\n'), ((10210, 10229), 'pennylane.adjoint', 'qml.adjoint', (['ansatz'], {}), '(ansatz)\n', (10221, 10229), True, 'import pennylane as qml\n'), ((10728, 10776), 'pennylane.device', 'qml.device', (['"""default.qubit"""'], {'wires': '(5)', 'shots': 'None'}), "('default.qubit', wires=5, shots=None)\n", (10738, 10776), True, 'import pennylane as qml\n'), ((11195, 11209), 'pennylane.qnode', 'qml.qnode', (['dev'], {}), '(dev)\n', (11204, 11209), True, 'import pennylane as qml\n'), ((13696, 13775), 'pennylane.kernels.square_kernel_matrix', 'qml.kernels.square_kernel_matrix', (['X', 'init_kernel'], {'assume_normalized_kernel': '(True)'}), '(X, init_kernel, assume_normalized_kernel=True)\n', (13728, 13775), True, 'import pennylane as qml\n'), ((20484, 20562), 'pennylane.kernels.target_alignment', 'qml.kernels.target_alignment', (['X', 'Y', 'init_kernel'], {'assume_normalized_kernel': '(True)'}), '(X, Y, init_kernel, assume_normalized_kernel=True)\n', (20512, 20562), True, 'import pennylane as qml\n'), ((22305, 22338), 'pennylane.GradientDescentOptimizer', 'qml.GradientDescentOptimizer', (['(0.2)'], {}), '(0.2)\n', (22333, 22338), True, 'import pennylane as qml\n'), ((6620, 6645), 'pennylane.numpy.hstack', 'np.hstack', (['[x1, 0.5 * x2]'], {}), '([x1, 0.5 * x2])\n', (6629, 6645), True, 'from pennylane import numpy as np\n'), ((6654, 6679), 'pennylane.numpy.hstack', 'np.hstack', (['[y1, 0.5 * y2]'], {}), '([y1, 0.5 * y2])\n', (6663, 6679), True, 'from pennylane import numpy as np\n'), ((6755, 6789), 'pennylane.numpy.hstack', 'np.hstack', (['[labels1, -1 * labels2]'], {}), '([labels1, -1 * labels2])\n', (6764, 6789), True, 'from pennylane import numpy as np\n'), ((7154, 7203), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (["['#FF0000', '#0000FF']"], {}), "(['#FF0000', '#0000FF'])\n", (7179, 7203), True, 'import matplotlib as mpl\n'), ((8569, 8578), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8576, 8578), True, 'import matplotlib.pyplot as plt\n'), ((9411, 9497), 'pennylane.broadcast', 'qml.broadcast', ([], {'unitary': 'qml.CRZ', 'pattern': '"""ring"""', 'wires': 'wires', 'parameters': 'params[1]'}), "(unitary=qml.CRZ, pattern='ring', wires=wires, parameters=\n params[1])\n", (9424, 9497), True, 'import pennylane as qml\n'), ((10363, 10442), 'pennylane.numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)', '(num_layers, 2, num_wires)'], {'requires_grad': '(True)'}), '(0, 2 * np.pi, (num_layers, 2, num_wires), requires_grad=True)\n', (10380, 10442), True, 'from pennylane import numpy as np\n'), ((11337, 11359), 'pennylane.probs', 'qml.probs', ([], {'wires': 'wires'}), '(wires=wires)\n', (11346, 11359), True, 'import pennylane as qml\n'), ((13782, 13825), 'pennylane.numpy.printoptions', 'np.printoptions', ([], {'precision': '(3)', 'suppress': '(True)'}), '(precision=3, suppress=True)\n', (13797, 13825), True, 'from pennylane import numpy as np\n'), ((16209, 16227), 'pennylane.numpy.zeros_like', 'np.zeros_like', (['_xx'], {}), '(_xx)\n', (16222, 16227), True, 'from pennylane import numpy as np\n'), ((16243, 16265), 'pennylane.numpy.ndindex', 'np.ndindex', (['*_xx.shape'], {}), '(*_xx.shape)\n', (16253, 16265), True, 'from pennylane import numpy as np\n'), ((16858, 16867), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (16865, 16867), True, 'import matplotlib.pyplot as plt\n'), ((21753, 21852), 'pennylane.kernels.square_kernel_matrix', 'qml.kernels.square_kernel_matrix', (['X', 'kernel'], {'assume_normalized_kernel': 'assume_normalized_kernel'}), '(X, kernel, assume_normalized_kernel=\n assume_normalized_kernel)\n', (21785, 21852), True, 'import pennylane as qml\n'), ((22108, 22124), 'pennylane.numpy.outer', 'np.outer', (['_Y', '_Y'], {}), '(_Y, _Y)\n', (22116, 22124), True, 'from pennylane import numpy as np\n'), ((22145, 22158), 'pennylane.numpy.sum', 'np.sum', (['(K * T)'], {}), '(K * T)\n', (22151, 22158), True, 'from pennylane import numpy as np\n'), ((23566, 23615), 'pennylane.kernels.kernel_matrix', 'qml.kernels.kernel_matrix', (['X1', 'X2', 'trained_kernel'], {}), '(X1, X2, trained_kernel)\n', (23591, 23615), True, 'import pennylane as qml\n'), ((24396, 24405), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (24403, 24405), True, 'import matplotlib.pyplot as plt\n'), ((6270, 6284), 'pennylane.numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (6276, 6284), True, 'from pennylane import numpy as np\n'), ((6299, 6313), 'pennylane.numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (6305, 6313), True, 'from pennylane import numpy as np\n'), ((6721, 6738), 'pennylane.numpy.vstack', 'np.vstack', (['[x, y]'], {}), '([x, y])\n', (6730, 6738), True, 'from pennylane import numpy as np\n'), ((9275, 9301), 'pennylane.Hadamard', 'qml.Hadamard', ([], {'wires': '[wire]'}), '(wires=[wire])\n', (9287, 9301), True, 'import pennylane as qml\n'), ((9371, 9405), 'pennylane.RY', 'qml.RY', (['params[0, j]'], {'wires': '[wire]'}), '(params[0, j], wires=[wire])\n', (9377, 9405), True, 'import pennylane as qml\n'), ((16130, 16162), 'pennylane.numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'N_gridpoints'], {}), '(-1, 1, N_gridpoints)\n', (16141, 16162), True, 'from pennylane import numpy as np\n'), ((16164, 16196), 'pennylane.numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'N_gridpoints'], {}), '(-1, 1, N_gridpoints)\n', (16175, 16196), True, 'from pennylane import numpy as np\n'), ((22005, 22065), 'pennylane.numpy.array', 'np.array', (['[(y / nplus if y == 1 else y / nminus) for y in Y]'], {}), '([(y / nplus if y == 1 else y / nminus) for y in Y])\n', (22013, 22065), True, 'from pennylane import numpy as np\n'), ((22087, 22098), 'pennylane.numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (22095, 22098), True, 'from pennylane import numpy as np\n'), ((23707, 23740), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': 'trained_kernel_matrix'}), '(kernel=trained_kernel_matrix)\n', (23710, 23740), False, 'from sklearn.svm import SVC\n'), ((16475, 16524), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (["['#FF0000', '#0000FF']"], {}), "(['#FF0000', '#0000FF'])\n", (16500, 16524), True, 'import matplotlib as mpl\n'), ((22178, 22191), 'pennylane.numpy.sum', 'np.sum', (['(K * K)'], {}), '(K * K)\n', (22184, 22191), True, 'from pennylane import numpy as np\n'), ((22194, 22207), 'pennylane.numpy.sum', 'np.sum', (['(T * T)'], {}), '(T * T)\n', (22200, 22207), True, 'from pennylane import numpy as np\n'), ((6344, 6381), 'pennylane.numpy.floor_divide', 'np.floor_divide', (['angles', 'sector_angle'], {}), '(angles, sector_angle)\n', (6359, 6381), True, 'from pennylane import numpy as np\n'), ((7529, 7644), 'matplotlib.patches.Wedge', 'mpl.patches.Wedge', (['(0, 0)', '(1)', '(i * sector_angle)', '((i + 1) * sector_angle)'], {'lw': '(0)', 'color': 'color', 'alpha': '(0.1)', 'width': '(0.5)'}), '((0, 0), 1, i * sector_angle, (i + 1) * sector_angle, lw=0,\n color=color, alpha=0.1, width=0.5)\n', (7546, 7644), True, 'import matplotlib as mpl\n'), ((7877, 7990), 'matplotlib.patches.Wedge', 'mpl.patches.Wedge', (['(0, 0)', '(0.5)', '(i * sector_angle)', '((i + 1) * sector_angle)'], {'lw': '(0)', 'color': 'other_color', 'alpha': '(0.1)'}), '((0, 0), 0.5, i * sector_angle, (i + 1) * sector_angle, lw\n =0, color=other_color, alpha=0.1)\n', (7894, 7990), True, 'import matplotlib as mpl\n'), ((16305, 16335), 'pennylane.numpy.array', 'np.array', (['[_xx[idx], _yy[idx]]'], {}), '([_xx[idx], _yy[idx]])\n', (16313, 16335), True, 'from pennylane import numpy as np\n'), ((21942, 21953), 'pennylane.numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (21950, 21953), True, 'from pennylane import numpy as np\n'), ((15218, 15264), 'pennylane.kernels.kernel_matrix', 'qml.kernels.kernel_matrix', (['X1', 'X2', 'init_kernel'], {}), '(X1, X2, init_kernel)\n', (15243, 15264), True, 'import pennylane as qml\n')] |