content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import fiftyone as fo
import fiftyone.zoo as foz
# Load Dataset
dataset = foz.load_zoo_dataset("coco-2017", split="validation")
# Randomly select 20 samples on which to generate predictions
view = dataset.take(20)
# Load zoo model
model = foz.load_zoo_model("keypoint-rcnn-resnet50-fpn-coco-torch")
# Run Inference
view.apply_model(model, label_field="predictions")
# Launch the FiftyOne App to visualize your dataset
session = fo.launch_app(dataset)
session.view = view
| 21.863636 | 67 | 0.77131 | [
"BSD-2-Clause"
] | bikramA/sample-code | scripts/fiftyone_sample.py | 481 | Python |
from spaceone.api.inventory.v1 import network_type_pb2, network_type_pb2_grpc
from spaceone.core.pygrpc import BaseAPI
class NetworkType(BaseAPI, network_type_pb2_grpc.NetworkTypeServicer):
pb2 = network_type_pb2
pb2_grpc = network_type_pb2_grpc
def create(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('NetworkTypeService', metadata) as ntype_service:
return self.locator.get_info('NetworkTypeInfo', ntype_service.create(params))
def update(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('NetworkTypeService', metadata) as ntype_service:
return self.locator.get_info('NetworkTypeInfo', ntype_service.update(params))
def delete(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('NetworkTypeService', metadata) as ntype_service:
ntype_service.delete(params)
return self.locator.get_info('EmptyInfo')
def get(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('NetworkTypeService', metadata) as ntype_service:
return self.locator.get_info('NetworkTypeInfo', ntype_service.get(params))
def list(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('NetworkTypeService', metadata) as ntype_service:
ntype_vos, total_count = ntype_service.list(params)
return self.locator.get_info('NetworkTypesInfo', ntype_vos, total_count, minimal=self.get_minimal(params))
def stat(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('NetworkTypeService', metadata) as ntype_service:
return self.locator.get_info('StatisticsInfo', ntype_service.stat(params))
| 43.531915 | 118 | 0.726784 | [
"Apache-2.0"
] | choonho/inventory | src/spaceone/inventory/api/v1/network_type.py | 2,046 | Python |
#!/usr/bin/env python3
import argparse
from botocore.exceptions import ClientError
import os
from pacu.core.lib import downloads_dir
module_info = {
# Name of the module (should be the same as the filename)
"name": "lightsail__generate_temp_access",
# Name and any other notes about the author
"author": "Alexander Morgenstern alexander.morgenstern@rhinosecuritylabs.com",
# Category of the module. Make sure the name matches an existing category.
"category": "EXPLOIT",
# One liner description of the module functionality. This shows up when a user searches for modules.
"one_liner": "Creates temporary SSH keys for available instances in AWS Lightsail.",
# Full description about what the module does and how it works
"description": "This module creates temporary SSH keys that can be used to connect to Lightsail instances, and downloads them into the session's download directory.",
# A list of AWS services that the module utilizes during its execution
"services": ["Lightsail"],
# For prerequisite modules, try and see if any existing modules return the data that is required for your module before writing that code yourself, that way, session data can stay separated and modular.
"prerequisite_modules": ["lightsail__enum"],
# External resources that the module depends on. Valid options are either a GitHub URL (must end in .git) or single file URL.
"external_dependencies": [],
# Module arguments to autocomplete when the user hits tab
"arguments_to_autocomplete": ["--instances", "--regions"],
}
parser = argparse.ArgumentParser(add_help=False, description=module_info["description"])
parser.add_argument(
"--instances",
required=False,
help="One or more Lightsail instance names, their regions, and their access protocol in the format instanceid@region@protocol. Windows instances will use the RDP protocol, and others use SSH. Defaults to all instances.",
)
parser.add_argument(
"--regions",
required=False,
default=None,
help="One or more (comma separated) AWS regions in the format us-east-1. Defaults to all session regions.",
)
def write_keys_to_file(created_keys, session):
for region in created_keys:
ssh_key_dir = os.path.join(downloads_dir(), module_info["name"], region)
if not os.path.exists(ssh_key_dir):
os.makedirs(ssh_key_dir)
for credential in created_keys[region]:
if credential["protocol"] == "rdp":
windows_file_dir = os.path.join(ssh_key_dir, credential["instanceName"])
try:
with open(windows_file_dir, "w") as windows_file:
# Create header for file.
windows_file.write("instanceName,ipAddress,username,password\n")
windows_file.write(credential["instanceName"] + ",")
windows_file.write(credential["ipAddress"] + ",")
windows_file.write(credential["username"] + ",")
windows_file.write(credential["password"] + "\n")
except IOError:
print(
"Error writing credential file for {}.".format(
credential["instanceName"]
)
)
continue
else:
private_key_file_dir = os.path.join(
ssh_key_dir, credential["instanceName"]
)
cert_key_file_dir = os.path.join(
ssh_key_dir, credential["instanceName"] + "-cert.pub"
)
try:
with open(private_key_file_dir, "w") as private_key_file:
private_key_file.write(credential["privateKey"])
with open(cert_key_file_dir, "w") as cert_key_file:
cert_key_file.write(credential["certKey"])
except IOError:
print(
"Error writing credential file for {}.".format(
credential["instanceName"]
)
)
continue
def main(args, pacu_main):
session = pacu_main.get_active_session()
print = pacu_main.print
get_regions = pacu_main.get_regions
fetch_data = pacu_main.fetch_data
args = parser.parse_args(args)
regions = args.regions.split(",") if args.regions else get_regions("lightsail")
instances = []
if (
args.instances is not None
): # need to update this to include the regions of these IDs
for instance in args.instances.split(","):
instance_name = instance.split("@")[0]
region = instance.split("@")[1]
protocol = instance.split("@")[2]
if region not in regions:
print(" {} is not a valid region".format(region))
continue
else:
instances.append(
{
"name": instance_name,
"protocol": protocol,
"region": region,
}
)
else:
print("Targeting all Lightsail instances...")
if (
fetch_data(
["Lightsail"], module_info["prerequisite_modules"][0], "--instances"
)
is False
):
print("Pre-req module not run successfully. Exiting...")
return
for instance in session.Lightsail["instances"]:
if instance["region"] in regions:
protocol = "rdp" if "Windows" in instance["blueprintName"] else "ssh"
instances.append(
{
"name": instance["name"],
"protocol": protocol,
"region": instance["region"],
}
)
temp_keys = {}
for instance in instances:
temp_keys[instance["region"]] = []
for instance in instances:
client = pacu_main.get_boto3_client("lightsail", instance["region"])
print(" Instance {}".format(instance["name"]))
try:
response = client.get_instance_access_details(
instanceName=instance["name"], protocol=instance["protocol"]
)
temp_keys[instance["region"]].append(response["accessDetails"])
print(
" Successfully created temporary access for {}".format(
instance["name"]
)
)
except ClientError as error:
code = error.response["Error"]["Code"]
if code == "AccessDeniedException":
print(" Unauthorized to generate temporary access.")
return
elif code == "OperationFailureException":
print(" FAILED: Unable to interact with non-running instance.")
continue
else:
print(error)
break
write_keys_to_file(temp_keys, session)
windows_count = 0
ssh_count = 0
for region in temp_keys:
for credential in temp_keys[region]:
if credential["protocol"] == "rdp":
windows_count += 1
else:
ssh_count += 1
if windows_count or ssh_count:
written_file_path = os.path.join(downloads_dir(), module_info["name"])
else:
written_file_path = None
summary_data = {
"windows": windows_count,
"linux": ssh_count,
"written_file_path": written_file_path,
}
return summary_data
def summary(data, pacu_main):
out = " Created temporary access for {} Windows instances.\n".format(
data["windows"]
)
out += " Created temporary access for {} Linux instances.\n".format(data["linux"])
if data["written_file_path"] is not None:
out += "\n Credential files written to:\n {}{}".format(
data["written_file_path"], os.path.sep
)
return out
| 40.7 | 224 | 0.575553 | [
"BSD-3-Clause"
] | damienjburks/pacu | pacu/modules/lightsail__generate_temp_access/main.py | 8,140 | Python |
# -*- coding: utf-8 -*-
import sys
import argparse
from cgate.reader import readfile, readschema, get_dtype
from cgate.validation import validate
def main():
parser = argparse.ArgumentParser()
parser.add_argument('target', help='Table name or File path')
parser.add_argument('--schema', '-s', help='Cerberus schema file')
parser.add_argument('--null', '-n', help='Null character', default='NULL,\\N')
args = parser.parse_args()
schema = readschema(args.schema)
try:
header = schema['header']
except:
header = None
na_values = args.null.split(',')
dtype, date_cols = get_dtype(schema['schema'])
dfs = readfile(args.target, header=header, dtype=dtype, parse_dates=date_cols, na_values=na_values)
fail_count = validate(dfs, schema['schema'])
if fail_count != 0:
print('Failed {0} error...'.format(fail_count), file=sys.stderr)
return 1
print('Success!', file=sys.stderr)
return 0
| 32.433333 | 103 | 0.664954 | [
"MIT"
] | buddseye/cerberus-gate | cgate/cgate.py | 973 | Python |
# Generated by Django 2.2.6 on 2020-09-03 03:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comment', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='target',
field=models.CharField(max_length=100, verbose_name='评论目标'),
),
]
| 20.947368 | 72 | 0.59799 | [
"BSD-2-Clause"
] | shenjinglei/typeidea | comment/migrations/0002_auto_20200903_0323.py | 406 | Python |
from django.contrib import admin
# Register your models here.
from .models import Join
class JoinAdmin(admin.ModelAdmin):
list_display = ['email', 'friend', 'timestamp', 'updated']
class Meta:
model = Join
admin.site.register(Join, JoinAdmin)
| 20.230769 | 62 | 0.707224 | [
"MIT"
] | volkandkaya/trader | trader/joins/admin.py | 263 | Python |
import re
def camel_space(string):
string = re.sub(r'(?<!^)(?=[A-Z])', ' ', string)
return string
Test.assert_equals(solution("helloWorld"), "hello World")
Test.assert_equals(solution("camelCase"), "camel Case")
Test.assert_equals(solution("breakCamelCase"), "break Camel Case") | 32 | 66 | 0.697917 | [
"BSD-2-Clause"
] | brennanbrown/code-challenges | camel_space.py | 288 | Python |
import logging
import os
import boto3
from lib.cleanup_resource_handler import CleanupResourceHandler
from lib.queue_handler import QueueHandler
logging.getLogger().setLevel(logging.INFO)
def queue_handler(event, context):
"""
Handler for the event queue lambda trigger
"""
ec2_client = boto3.client('ec2')
dynamodb_resource = boto3.resource('dynamodb')
route53_client = boto3.client('route53')
handler = QueueHandler(ec2_client=ec2_client, dynamodb_resource=dynamodb_resource, route53_client=route53_client,
environ=os.environ)
return handler.handle(event, context)
def cleanup_resource_handler(event, context):
"""
Event handler for the custom resource.
"""
route53_client = boto3.client('route53')
handler = CleanupResourceHandler(route53_client=route53_client)
handler.handle_event(event, context)
| 25.6 | 117 | 0.737723 | [
"Apache-2.0"
] | 14kw/aws-cdk | packages/@aws-cdk-containers/ecs-service-extensions/lib/extensions/assign-public-ip/lambda/index.py | 896 | Python |
import torchvision as torchvision
from torchvision.transforms import transforms
from tqdm import tqdm
from remote_dataloader.loader import RemoteDataLoader
if __name__ == '__main__':
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
total_trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
loader = RemoteDataLoader(total_trainset, batch_size=32, timeout=5)
for epoch in range(5):
for img, lb in tqdm(loader):
pass
| 33.857143 | 118 | 0.708861 | [
"MIT"
] | ildoonet/remote-dataloader | example.py | 711 | Python |
# inclass/mongo_queries.py
import pymongo
import os
from dotenv import load_dotenv
import sqlite3
load_dotenv()
DB_USER = os.getenv("MONGO_USER", default="OOPS")
DB_PASSWORD = os.getenv("MONGO_PASSWORD", default="OOPS")
CLUSTER_NAME = os.getenv("MONGO_CLUSTER_NAME", default="OOPS")
connection_uri = f"mongodb+srv://{DB_USER}:{DB_PASSWORD}@{CLUSTER_NAME}.mongodb.net/test?retryWrites=true&w=majority&ssl=true&ssl_cert_reqs=CERT_NONE"
print("----------------")
print("URI:", connection_uri)
client = pymongo.MongoClient(connection_uri)
print("----------------")
print("CLIENT:", type(client), client)
# print(dir(client))
# print("DB NAMES:", client.list_database_names()) #> ['admin', 'local']
db = client.ds14_db # "ds14_db" or whatever you want to call it
# print("----------------")
# print("DB:", type(db), db)
# collection = db.ds14_pokemon_collection # "ds14_collection" or whatever you want to call it
# print("----------------")
# print("COLLECTION:", type(collection), collection)
# print("----------------")
# # print("COLLECTIONS:")
# # print(db.list_collection_names())
# print("--------------------------------------")
################## ASSIGNMENT III #############################
# INSERT RPG DATA INTO MONGODB INSTANCE
# Create RPG database
db = client.rpg_data_db
# Establish sqlite3 connection to access rpg data
sl_conn = sqlite3.connect("data/rpg_db_original.sqlite3")
sl_curs = sl_conn.cursor()
################# CHARACTERS ###########################
# ## Create new collection for RPG data
# col_characters = db.character_collection
# ## Establish SQL syntax for query
# rpg_characters = 'SELECT * FROM charactercreator_character'
# # Function to loop through characters and return list of dictionaries
# def all_chars():
# query = rpg_characters
# chars = sl_curs.execute(query)
# char_data = []
# for row in chars:
# character = {
# "character_id": row[0],
# "name": row[1],
# "level": row[2],
# "exp": row[3],
# "hp": row[4],
# "strength": row[5],
# "intelligence": row[6],
# "dexterity": row[7],
# "wisdom": row[8]
# }
# char_data.append(character)
# result = char_data
# return result
# character_dict_list = all_chars()
# # print(character_dict_list)
# col_characters.insert_many(character_dict_list)
# print("DOCS(Num Characters):", col_characters.count_documents({})) #
# SELECT count(distinct id) from characters
################# MAGES ###########################
# col_mage = db.mage_collection
# mages = 'SELECT * FROM charactercreator_mage'
# def all_chars():
# query = mages
# chars = sl_curs.execute(query)
# char_data = []
# for row in chars:
# character = {
# "character_ptr_id": row[0],
# "has_pet": row[1],
# "mana": row[2],
# }
# char_data.append(character)
# result = char_data
# return result
# character_dict_list = all_chars()
# col_mage.insert_many(character_dict_list)
# print("DOCS:", col_mage.count_documents({}))
################# THIEVES ###########################
# col_thief = db.thief_collection
# thieves = 'SELECT * FROM charactercreator_thief'
# def all_chars():
# query = thieves
# chars = sl_curs.execute(query)
# char_data = []
# for row in chars:
# character = {
# "character_ptr_id": row[0],
# "is_sneaking": row[1],
# "energy": row[2],
# }
# char_data.append(character)
# result = char_data
# return result
# character_dict_list = all_chars()
# col_thief.insert_many(character_dict_list)
# print("DOCS:", col_thief.count_documents({}))
################# CLERICS ###########################
# col_cleric = db.cleric_collection
# clerics = 'SELECT * FROM charactercreator_cleric'
# def all_chars():
# query = clerics
# chars = sl_curs.execute(query)
# char_data = []
# for row in chars:
# character = {
# "character_ptr_id": row[0],
# "using_shield": row[1],
# "mana": row[2],
# }
# char_data.append(character)
# result = char_data
# return result
# character_dict_list = all_chars()
# col_cleric.insert_many(character_dict_list)
# print("DOCS:", col_cleric.count_documents({}))
################# FIGHTERS ###########################
# col_fighter = db.fighter_collection
# fighters = 'SELECT * FROM charactercreator_fighter'
# def all_chars():
# query = fighters
# chars = sl_curs.execute(query)
# char_data = []
# for row in chars:
# character = {
# "character_ptr_id": row[0],
# "using_shield": row[1],
# "rage": row[2],
# }
# char_data.append(character)
# result = char_data
# return result
# character_dict_list = all_chars()
# col_fighter.insert_many(character_dict_list)
# print("DOCS:", col_fighter.count_documents({}))
################# NECROMANCERS ###########################
# col_mancer = db.mancer_collection
# mancers = 'SELECT * FROM charactercreator_necromancer'
# def all_chars():
# query = mancers
# chars = sl_curs.execute(query)
# char_data = []
# for row in chars:
# character = {
# "mage_ptr_id": row[0],
# "talisman_charged": row[1],
# }
# char_data.append(character)
# result = char_data
# return result
# character_dict_list = all_chars()
# col_mancer.insert_many(character_dict_list)
# print("DOCS:", col_mancer.count_documents({}))
################# ITEMS ###########################
# col_items = db.items_collection
# items = 'SELECT * FROM armory_item'
# def all_chars():
# query = items
# chars = sl_curs.execute(query)
# char_data = []
# for row in chars:
# character = {
# "item_id": row[0],
# "name": row[1],
# "value": row[2],
# "weight": row[3]
# }
# char_data.append(character)
# result = char_data
# return result
# character_dict_list = all_chars()
# col_items.insert_many(character_dict_list)
# print("DOCS:", col_items.count_documents({}))
################# WEAPONS ###########################
# col_weapons = db.weapons_collection
# weapons = 'SELECT * FROM armory_weapon'
# def all_chars():
# query = weapons
# chars = sl_curs.execute(query)
# char_data = []
# for row in chars:
# character = {
# "item_ptr_id": row[0],
# "power": row[1]
# }
# char_data.append(character)
# result = char_data
# return result
# character_dict_list = all_chars()
# col_weapons.insert_many(character_dict_list)
# print("DOCS:", col_weapons.count_documents({}))
################# INVENTORY ###########################
# col_inventory = db.inventory_collection
# records = 'SELECT * FROM charactercreator_character_inventory'
# def all_chars():
# query = records
# chars = sl_curs.execute(query)
# char_data = []
# for row in chars:
# character = {
# "id": row[0],
# "character_id": row[1],
# "item_id": row[2]
# }
# char_data.append(character)
# result = char_data
# return result
# character_dict_list = all_chars()
# col_inventory.insert_many(character_dict_list)
# print("DOCS:", col_inventory.count_documents({}))
# print("COLLECTIONS:")
# print(db.list_collection_names())
#################### IN-CLASS POKEMON INSERTS #############################
# collection.insert_one({
# "name": "Pikachu",
# "level": 30,
# "exp": 76000000000,
# "hp": 400,
# "fav_icecream_flavors":["vanila_bean", "choc"],
# "stats":{"a":1,"b":2,"c":[1,2,3]}
# })
# print("DOCS:", collection.count_documents({})) # SELECT count(distinct id) from pokemon
# print(collection.count_documents({"name": "Pikachu"})) # SELECT
# count(distinct id) from pokemon WHERE name = "Pikachu"
# mewtwo = {
# "name": "Mewtwo",
# "level": 100,
# "exp": 76000000000,
# "hp": 450,
# "strength": 550,
# "intelligence": 450,
# "dexterity": 300,
# "wisdom": 575
# }
# blastoise = {
# "name": "Blastoise",
# "lvl": 70, # OOPS we made a mistake with the structure of this dict
# }
# charmander = {
# "nameeeeeee": "Charmander",
# "level": 70,
# "random_stat": {"a":2}
# }
# skarmory = {
# "name": "Skarmory",
# "level": 22,
# "exp": 42000,
# "hp": 85,
# "strength": 750,
# "intelligence": 8,
# "dexterity": 57
# }
# cubone = {
# "name": "Cubone",
# "level": 20,
# "exp": 35000,
# "hp": 80,
# "strength": 600,
# "intelligence": 60,
# "dexterity": 200,
# "wisdom": 200
# }
# scyther = {
# "name": "Scyther",
# "level": 99,
# "exp": 7000,
# "hp": 40,
# "strength": 50,
# "intelligence": 40,
# "dexterity": 30,
# "wisdom": 57
# }
# slowpoke = {
# "name": "Slowpoke",
# "level": 1,
# "exp": 100,
# "hp": 80,
# "strength": 100,
# "intelligence": 10,
# "dexterity": 50,
# "wisdom": 200
# }
# pokemon_team = [mewtwo, blastoise, skarmory, cubone, scyther, slowpoke, charmander]
# collection.insert_many(pokemon_team)
# print("DOCS:", collection.count_documents({})) # SELECT count(distinct id) from pokemon
# #collection.insert_one({"_id": "OURVAL", "name":"TEST"})
# # can overwrite the _id but not insert duplicate _id values
# #breakpoint()
# pikas = list(collection.find({"name": "Pikachu"})) # SELECT * FROM pokemon WHERE name = "Pikachu"
# # print(len(pikas), "PIKAS")
# # print(pikas[0]["_id"]) #> ObjectId('5ebc31c79c171e43bb5ed469')
# # print(pikas[0]["name"])
# # strong = list(collection.find({"level": {"$gte": 60}} $or {"lvl": {"$gte": 60}}))
# # strong = list(collection.find({"level": {"$gte": 60}, "$or" "lvl": {"$gte": 60}}))
# strong = list(collection.find({"$or": [{"level": {"$gte": 60}}, {"lvl": {"$gte": 60}}]}))
# # TODO: also try to account for our mistakes "lvl" vs "level"
# breakpoint()
# print(strong)
| 26.685864 | 150 | 0.566314 | [
"MIT"
] | ekselan/DS-Unit-3-Sprint-2-SQL-and-Databases | assignment3/a3_mongo_queries_abw.py | 10,194 | Python |
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['TabWdg', 'TabSaveStateCmd']
from pyasm.common import TacticException, Xml, Common, Environment, Container
from pyasm.web import DivWdg, SpanWdg, WebState, WebContainer, WidgetSettings, HtmlElement
from pyasm.search import Search
from pyasm.widget import WidgetConfigView, WidgetConfig, IconWdg
from tactic.ui.common import BaseRefreshWdg
import types, sys, re, os
import six
basestring = six.string_types
class TabWdg(BaseRefreshWdg):
ARGS_KEYS = {
'show_add': {
'description': 'show the + button',
'values': 'true|false',
'category': 'Display'
},
'show_context_menu': {
'description': 'show the context menu',
'values': 'true|false',
'category': 'Display'
},
'show_remove': {
'description': 'show the close button',
'values': 'true|false',
'category': 'Display'
},
'save_state': {
'description': 'key which is used to save state [ie: "save_state|main_tab" is the default]',
'category': 'Display'
},
}
def __init__(self, **kwargs):
super(TabWdg, self).__init__(**kwargs)
self.unique_id = self.generate_unique_id(is_random=True)
self.header = HtmlElement.ul()
self.header_id = self.header.set_unique_id()
self.use_default_style = kwargs.get("use_default_style")
if self.use_default_style not in [False, 'false']:
self.use_default_style = True
else:
self.use_default_style = False
def get_onload_js(self):
return r'''
if (spt.tab) {
return;
}
spt.Environment.get().add_library("spt_tab");
spt.tab = {};
spt.tab.top = null;
spt.tab.set_main_body_tab = function() {
spt.tab.top = document.id(document.body).getElement(".spt_tab_top");
return spt.tab.top;
}
// this is to be deprecated
spt.tab.set_main_body_top = function() {
spt.tab.top = document.id('main_body').getElement(".spt_tab_top");
}
spt.tab.set_tab_top = function( tab_top ) {
// if this is not really a tab top, then find a child
if (! tab_top.hasClass("spt_tab_top") ) {
tab_top = tab_top.getElement(".spt_tab_top");
}
spt.tab.top = tab_top;
return spt.tab.top;
}
spt.tab.set_tab_top_from_child = function( el ) {
// if this is not really a tab top, then find a parent
if (! el.hasClass("spt_tab_top") ) {
el = el.getParent(".spt_tab_top");
}
spt.tab.top = el;
return spt.tab.top;
}
spt.tab.get_headers = function() {
var top = spt.tab.top;
var tab_id = top.getAttribute("id");
var header_top = top.getElement(".spt_tab_header_top");
var all_headers = header_top.getElements(".spt_tab_header");
//return all_headers;
var headers = [];
for (var i = 0; i < all_headers.length; i++ ) {
var header_tab_id = all_headers[i].getAttribute("spt_tab_id");
if (header_tab_id != tab_id) {
continue;
}
headers.push(all_headers[i]);
}
return headers;
}
spt.tab.get_header = function(name) {
var top = spt.tab.top;
var header_top = top.getElement(".spt_tab_header_top");
var headers = header_top.getElements(".spt_tab_header");
for (var i = 0; i < headers.length; i++) {
if (name == headers[i].getAttribute("spt_element_name") ) {
return headers[i];
}
}
return null;
}
spt.tab.resize_headers = function() {
var top = spt.tab.top;
var header_top = top.getElement(".spt_tab_header_top");
var offset = 120;
var size = top.getSize();
top.spt_last_width = size.x;
var els = header_top.getElements(".spt_tab_header");
var count = els.length;
var width = parseInt((size.x-offset) / (count));
if (width > 120) {
width = 120;
}
if (width < 30) {
width = 30;
}
for (var i = 0; i < els.length; i++) {
els[i].setStyle("width", width + 'px');
//var title_el = els[i].getElement(".spt_tab_header_label");
els[i].setStyle("width", width + 'px');
}
}
spt.tab.get_content = function(name) {
var top = spt.tab.top;
var tab_id = top.getAttribute("spt_tab_id");
var content_top = top.getElement(".spt_tab_content_top");
var all_contents = content_top.getElements(".spt_tab_content");
// FIXME: this breaks when opening new tabs for some reason
//return all_contents;
for (var i = 0; i < all_contents.length; i++ ) {
var content_tab_id = all_contents[i].getAttribute("spt_tab_id");
var content_name = all_contents[i].getAttribute("spt_element_name");
if (content_name == name) {
return all_contents[i];
}
}
return null;
}
spt.tab.get_contents = function() {
var top = spt.tab.top;
var tab_id = top.getAttribute("spt_tab_id");
var content_top = top.getElement(".spt_tab_content_top");
var all_contents = content_top.getElements(".spt_tab_content");
// FIXME: this breaks when opening new tabs for some reason
//return all_contents;
var contents = [];
for (var i = 0; i < all_contents.length; i++ ) {
var content_tab_id = all_contents[i].getAttribute("spt_tab_id");
if (content_tab_id == null) {
alert(all_contents[i].getAttribute("spt_element_name"));
}
if (content_tab_id != tab_id) {
continue;
}
contents.push(all_contents[i]);
}
return contents;
}
spt.tab.set_attribute = function(element_name, name, value) {
var header = spt.tab.get_header(element_name);
var kwargs_str = header.getAttribute("spt_kwargs");
var kwargs;
if (kwargs_str != '') {
kwargs_str = kwargs_str.replace(/\&quot\;/g, '"')
kwargs = JSON.parse(kwargs_str);
}
else {
kwargs = {};
}
kwargs[name] = value;
header.setAttribute("spt_"+name, value);
kwargs_str = JSON.stringify(kwargs);
kwargs_str = kwargs_str.replace(/"/g,"\&quot\;");
header.setAttribute("spt_kwargs", kwargs_str);
}
spt.tab.add_new = function(element_name, title, class_name, kwargs,
values, hash) {
if (typeof(title) == 'undefined') {
title = '(Untitled)';
}
if (typeof(element_name) == 'undefined') {
//alert("No element name provided");
//return;
element_name = "__default__";
}
if (typeof(class_name) == 'undefined') {
class_name = '';
}
if (typeof(kwargs) == 'undefined') {
kwargs = {};
}
if (typeof(values) == 'undefined') {
values = {};
}
var top = spt.tab.top;
if (!top) {
spt.tab.set_main_body_tab();
top = spt.tab.top;
}
if (!hash && hash != false && kwargs.hash) {
hash = kwargs.hash;
}
if (hash == "__link__") {
hash = "link/" + element_name;
}
var orig_element_name = element_name;
var mode = top.getAttribute("spt_tab_mode");
if (mode == "hidden") {
element_name = "__default__";
}
var unique = kwargs.unique;
if (unique == true || unique == "true") {
var header = spt.tab.get_header(element_name);
if (header) {
var num = Math.floor((Math.random()*10000)+1);
element_name = element_name + num;
}
}
var top_id = top.getAttribute("spt_tab_id");
// disable sub tabs for now
full_element_name = element_name;
subelement_name = "";
/*
if (element_name.indexOf("/") != -1) {
var full_element_name = element_name;
var parts = element_name.split("/");
element_name = parts[0];
var subelement_name = parts[1];
}
else {
var full_element_name = element_name;
var subelement_name = "";
}
*/
var subelement_title;
var full_title;
if (title.indexOf("/") != -1) {
full_title = title;
var parts = title.split("/");
title = parts[0];
subelement_title = parts[1];
}
else {
full_title = title;
subelement_title = title;
}
//var headers = header_top.getElements(".spt_tab_header");
var headers = spt.tab.get_headers();
if (headers.length > 20) {
spt.alert("You have too many tabs open. Please close before opening any others.");
return;
}
var header;
var found = false;
for (var k=0; k < headers.length; k++){
var existing_header = headers[k];
if (existing_header.getAttribute('spt_element_name')==element_name){
header = existing_header;
found = true;
break;
}
}
// add a new tab
if (!found) {
var template_top = top.getElement(".spt_tab_template_top");
var header_template = template_top.getElement(".spt_tab_header");
// clone the header template
var header = spt.behavior.clone(header_template);
var header_id = Math.floor(Math.random()*10000000+1);
header.setAttribute("id", header_id);
if (kwargs.hidden == "true") {
header.setStyle("display", "none");
}
// add a subheader template for each header
var subheader_template = template_top.getElement(".spt_tab_subheader");
if (subheader_template) {
var subheader = spt.behavior.clone(subheader_template);
var subheader_id = Math.floor(Math.random()*10000000+1);
header.setAttribute("spt_subheader_id", subheader_id);
subheader.setAttribute("id", subheader_id);
subheader.setStyle("display", "none");
subheader.setAttribute("spt_header_id", header_id);
subheader_top = top.getElement(".spt_tab_subheader_top")
subheader.inject(subheader_top);
}
var last_header = headers[headers.length -1];
// set the new label
var label = header.getElement(".spt_tab_header_label");
var display_title = title;
label.setAttribute("title", title);
label.innerHTML = display_title;
header.setAttribute("spt_class_name", class_name);
var kwargs_str = JSON.stringify(kwargs);
kwargs_str = kwargs_str.replace(/\"/,"\&quot\;");
header.setAttribute("spt_kwargs", kwargs_str);
header.setAttribute("spt_element_name", element_name);
header.setAttribute("spt_title", title);
header.setAttribute("spt_tab_id", top_id);
header.removeClass("spt_content_loaded");
header.inject(last_header, "after");
spt.tab.resize_headers();
var selected_header = spt.tab.get_selected_header();
if (selected_header) {
var opener = selected_header.getAttribute("spt_element_name");
header.setAttribute("spt_tab_opener", opener);
}
// copy the content from template
var content_top = top.getElement(".spt_tab_content_top");
var content_template = template_top.getElement(".spt_tab_content");
var content_box = spt.behavior.clone(content_template);
content_box.setAttribute("spt_element_name", element_name);
content_box.setAttribute("spt_title", title);
content_box.setAttribute("spt_tab_id", top_id);
var content_boxes = spt.tab.get_contents();
var last_content = content_boxes[content_boxes.length -1];
content_box.inject(last_content, "after");
if (kwargs.count) {
var count_div = header.getElement(".spt_tab_header_count");
var expression = kwargs.count;
var search_key = kwargs.search_key;
var server = TacticServerStub.get();
var count = server.eval(expression, {search_keys: search_key});
count_div.innerText = count;
var update_data = {
expression: expression,
expr_key: search_key
};
spt.update.add(count_div, update_data);
}
}
// if a subtab is needed, create that
if (subelement_name) {
// find out if the subheader exists
var subheader_id = header.getAttribute("spt_subheader_id");
var subheader_top = document.id(subheader_id);
var subheaders = subheader_top.getElements(".spt_tab_subheader_item");
var subheader_exists = false;
var subheader = null;
for (var i = 0; i < subheaders.length; i++) {
var box_name = subheaders[i].getAttribute("spt_element_name");
if (full_element_name == box_name) {
subheader_exists = true;
subheader = subheaders[i];
break;
}
}
if (subheader_exists == false) {
// create a new one
var subheader = document.id(document.createElement("div"));
subheader.innerHTML = "<div style='padding: 5px 5px'><div class='spt_tab_header_label'>"+subelement_name+"</div></div>";
subheader_top.appendChild(subheader);
subheader.addClass("spt_tab_subheader_item");
// set the new label
var label = subheader.getElement(".spt_tab_header_label");
var display_title = subelement_title;
if (display_title.length > 20) {
display_title = subelement_title.substr(0,18) + "...";
}
title = subelement_name;
label.setAttribute("title", subelement_title);
label.innerHTML = display_title;
subheader.setAttribute("spt_class_name", class_name);
var kwargs_str = JSON.stringify(kwargs);
kwargs_str = kwargs_str.replace(/\"/,"\&quot\;");
subheader.setAttribute("spt_kwargs", kwargs_str);
subheader.setAttribute("spt_element_name", full_element_name);
subheader.setAttribute("spt_title", full_title);
subheader.setAttribute("spt_tab_id", top_id);
subheader.removeClass("spt_content_loaded");
// copy the content from template
var template_top = top.getElement(".spt_tab_template_top");
var content_top = top.getElement(".spt_tab_content_top");
var content_template = template_top.getElement(".spt_tab_content");
var content_box = spt.behavior.clone(content_template);
content_box.setAttribute("spt_element_name", full_element_name);
content_box.setAttribute("spt_title", full_title);
content_box.setAttribute("spt_tab_id", top_id);
var content_boxes = spt.tab.get_contents();
var last_content = content_boxes[content_boxes.length -1];
content_box.inject(last_content, "after");
}
}
if (! class_name) {
spt.tab.select(element_name);
}
else if (subelement_name) {
var force = true;
spt.tab.load_class(subheader, class_name, kwargs, values, force);
}
else {
var force = true;
spt.tab.load_class(header, class_name, kwargs, values, force);
}
// FIXME: this should only move on the main table
//var top_pos = spt.tab.getY(header_top);
//scroll(0,top_pos-20);
// register the hash
if (hash) {
var state = {
element_name: orig_element_name,
title: title,
class_name: class_name,
kwargs: kwargs,
hash: hash,
mode: 'tab',
}
spt.hash.set_hash(state, title, hash);
}
if (top.hasClass("spt_tab_save_state") ) {
spt.tab.save_state();
}
return header;
}
// TEST
spt.tab.getY = function(oElement)
{
var iReturnValue = 0;
while( oElement != null ) {
iReturnValue += oElement.offsetTop;
oElement = oElement.offsetParent;
}
return iReturnValue;
}
spt.tab.load_selected = function(element_name, title, class_name, kwargs, values) {
var top = spt.tab.top;
var header = spt.tab.get_selected_header();
// if none are selected, use the last one
if (header == null) {
var headers = spt.tab.get_headers();
header = headers[headers.length - 1];
}
var old_element_name = header.getAttribute("spt_element_name");
header.setAttribute("spt_element_name", element_name);
header.setAttribute("spt_title", title);
header.setAttribute("spt_class_name", class_name);
if (typeof(kwargs) == 'undefined') {
kwargs = {};
}
var kwargs_str = JSON.stringify(kwargs)
header.setAttribute("spt_kwargs", kwargs_str);
var label = header.getElement(".spt_tab_header_label");
var display_title = title;
if (display_title.length > 20) {
display_title = title.substr(0,18) + "...";
}
label.innerHTML = display_title;
var content_top = top.getElement(".spt_tab_content_top");
var content_boxes = content_top.getElements(".spt_tab_content");
for (var i=0; i < content_boxes.length; i++) {
var content_box = content_boxes[i];
var box_name = content_box.getAttribute("spt_element_name")
if (box_name == old_element_name) {
content_box.setAttribute("spt_element_name", element_name)
break;
}
}
var force = true;
spt.tab.load_class(header, class_name, kwargs, values, force);
}
// add a DOM node to the named content
spt.tab.load_node = function(element_name, node) {
var top = spt.tab.top;
var content_top = top.getElement(".spt_tab_content_top");
var content_boxes = spt.tab.get_contents();
for (var i=0; i < content_boxes.length; i++) {
var content_box = content_boxes[i];
var box_name = content_box.getAttribute("spt_element_name")
if (box_name == element_name) {
if(content_box.hasChildNodes()) {
while(content_box.childNodes.length >= 1 ) {
content_box.removeChild(content_box.firstChild);
}
}
content_box.appendChild(node);
break;
}
}
}
// add raw HTML to the named content
spt.tab.load_html = function(element_name, html) {
var top = spt.tab.top;
var content_top = top.getElement(".spt_tab_content_top");
var content_boxes = spt.tab.get_contents();
for (var i=0; i < content_boxes.length; i++) {
var content_box = content_boxes[i];
var box_name = content_box.getAttribute("spt_element_name")
if (box_name == element_name) {
spt.behavior.replace_inner_html(content_box, html);
}
}
}
spt.tab.select = function(element_name) {
var header = spt.tab.get_header(element_name);
var top = spt.tab.top;
var header_top = top.getElement(".spt_tab_header_top");
var headers = spt.tab.get_headers();
for (var i=0; i < headers.length; i++) {
headers[i].setStyle("opacity", "0.4");
headers[i].setStyle("font-weight", "normal");
headers[i].removeClass("spt_is_selected");
headers[i].removeClass("spt_tab_selected");
headers[i].addClass("spt_tab_unselected");
}
if (header) {
header.setStyle("opacity", "1.0");
header.addClass("spt_is_selected");
header.addClass("spt_tab_selected");
header.removeClass("spt_tab_unselected");
}
var content_top = top.getElement(".spt_tab_content_top");
var content_boxes = spt.tab.get_contents();
for (var i=0; i < content_boxes.length; i++) {
var content_box = content_boxes[i];
content_box.setStyle("display", "none");
}
for (var i=0; i < content_boxes.length; i++) {
var content_box = content_boxes[i];
var box_name = content_box.getAttribute("spt_element_name")
if (box_name == element_name) {
content_box.setStyle("display", "");
if (!content_box.hasClass("spt_content_loaded")) {
spt.tab.load_class(header);
}
break;
}
}
var kwargs_str = header ? header.getAttribute("spt_kwargs") : '';
if (!kwargs_str) {
kwargs = {};
}
else {
kwargs_str = kwargs_str.replace(/\&quot\;/g, '"');
kwargs = JSON.parse(kwargs_str);
}
bvr.options = {
element_name: element_name,
alias: kwargs.help_alias
}
spt.named_events.fire_event("tab|select", bvr);
// usually a tab contains a table and layout. it's better to set to that.
var tab_content = top.getElement('.spt_tab_content[spt_element_name=' + element_name + ']');
if (tab_content) {
var table = tab_content.getElement('.spt_table_table');
if (table) {
var layout = table.getParent(".spt_layout");
spt.table.set_layout(layout);
}
var last_element_name = spt.tab.get_selected_element_name();
if (last_element_name) {
top.setAttribute("spt_last_element_name", last_element_name);
}
return true;
} else {
return false;
}
}
spt.tab.load_class = function(header, class_name, kwargs, values, force) {
var title = header.getAttribute("spt_title");
var tab_element_name = header.getAttribute("spt_element_name");
if (typeof(force) == 'undefined') {
force = false;
}
if (typeof(class_name) == 'undefined') {
var class_name = header.getAttribute("SPT_WIDGET_KEY");
if (! class_name) {
class_name = header.getAttribute("spt_class_name");
}
}
if (typeof(kwargs) == 'undefined') {
kwargs = {};
var kwargs_str = header.getAttribute("spt_kwargs");
if (kwargs_str) {
kwargs_str = kwargs_str.replace(/\&quot\;/g, '"');
kwargs = JSON.parse(kwargs_str);
}
}
var top = spt.tab.top;
var header_top = top.getElement(".spt_tab_header_top");
var top_id = top.getAttribute("id");
//spt.api.app_busy_show("Loading " + title, '');
setTimeout( function() {
var header_top = header.getParent(".spt_tab_header_top");
var headers = spt.tab.get_headers();
for (var i=0; i < headers.length; i++) {
headers[i].setStyle("opacity", "0.4");
headers[i].setStyle("font-weight", "normal");
headers[i].removeClass("spt_is_selected");
headers[i].removeClass("spt_tab_selected");
headers[i].addClass("spt_tab_unselected");
}
// select the header
if (header.hasClass("spt_tab_subheader_item")) {
var subheader_top = header.getParent(".spt_tab_subheader");
header_id = subheader_top.getAttribute("spt_header_id");
select_header = document.id(header_id);
}
else {
select_header = header;
}
// select the header
select_header.setStyle("opacity", "1.0");
select_header.addClass("spt_is_selected");
select_header.addClass("spt_tab_selected");
select_header.removeClass("spt_tab_unselected");
var content_top = top.getElement(".spt_tab_content_top");
var content_boxes = spt.tab.get_contents();
// make all of the content boxes disappear
for (var i=0; i < content_boxes.length; i++) {
var content_box = content_boxes[i];
content_box.setStyle("display", "none");
}
for (var i=0; i < content_boxes.length; i++) {
var content_box = content_boxes[i];
var box_name = content_box.getAttribute("spt_element_name")
if (box_name == tab_element_name) {
content_box.setStyle("display", "");
// if no class name is defined, then break out
if (typeof(class_name) == 'undefined' || class_name == '') {
break;
}
if (force || ! content_box.hasClass("spt_content_loaded")) {
var resize_offset = content_box.getAttribute("spt_window_resize_offset");
spt.panel.load(content_box, class_name, kwargs, values);
// update info on header
header.setAttribute("spt_class_name", class_name);
var kwargs_str = JSON.stringify(kwargs);
kwargs_str = kwargs_str.replace(/\"/,"\&quot\;");
header.setAttribute("spt_kwargs", kwargs_str);
header.setAttribute("spt_element_name", tab_element_name);
header.setAttribute("spt_title", title);
content_box.addClass("spt_content_loaded");
// have to set this again because load removes it
content_box.setAttribute("spt_element_name", tab_element_name);
content_box.setAttribute("spt_tab_id", top_id);
content_box.setAttribute("spt_title", title);
if (resize_offset) {
content_box.setAttribute("spt_window_resize_offset", resize_offset);
}
}
break;
}
}
var bvr = {};
var parts = tab_element_name.split("/");
var element_name = parts[parts.length-1];
var alias = kwargs.help_alias;
bvr.options = {
element_name: element_name,
alias: alias
}
spt.named_events.fire_event("tab|select", bvr);
spt.api.app_busy_hide();
}, 10 );
}
spt.tab.reload_selected = function() {
var header = spt.tab.get_selected_header();
var class_name = header.getAttribute("spt_class_name");
var kwargs = header.getAttribute("spt_kwargs");
var kwargs_str = header.getAttribute("spt_kwargs");
var kwargs;
if (kwargs_str != '') {
kwargs_str = kwargs_str.replace(/\&quot\;/g, '"');
kwargs = JSON.parse(kwargs_str);
}
else {
kwargs = {};
}
var values = null;
var force = true;
spt.tab.load_class(header, class_name, kwargs, values, force);
}
spt.tab.get_selected_header = function() {
var top = spt.tab.top;
var header_top = top.getElement(".spt_tab_header_top");
var headers = header_top.getElements(".spt_tab_header");
for (var i = 0; i < headers.length; i++) {
var header = headers[i];
if ( header.hasClass("spt_is_selected") ) {
return header;
}
}
return null;
}
spt.tab.get_selected_element_name = function() {
var header = spt.tab.get_selected_header();
if (header) {
var element_name = header.getAttribute("spt_element_name");
return element_name;
}
return "";
}
spt.tab.get_last_selected_element_name = function() {
var top = spt.tab.top;
return top.getAttribute("spt_last_element_name");
}
spt.tab.save_state = function() {
var top = spt.tab.top;
var save_state = top.getAttribute("spt_tab_save_state");
var header_top = top.getElement(".spt_tab_header_top");
var headers = header_top.getElements(".spt_tab_header");
var class_names = [];
var attrs_list = [];
var kwargs_list = [];
for (var i = 0; i < headers.length; i++) {
var header = headers[i];
var element_name = header.getAttribute("spt_element_name")
var title = header.getAttribute("spt_title")
var attrs = {
name: element_name,
title: title
};
var class_name = header.getAttribute("spt_class_name");
class_names.push(class_name);
var kwargs_str = header.getAttribute("spt_kwargs");
var kwargs;
if (kwargs_str) {
kwargs_str = kwargs_str.replace(/\&quot\;/g, '"');
kwargs = JSON.parse(kwargs_str);
}
else {
kwargs = {};
}
kwargs_list.push(kwargs);
if (kwargs.count)
attrs.count = kwargs.count;
attrs_list.push(attrs)
}
var server = TacticServerStub.get();
var command = 'tactic.ui.container.TabSaveStateCmd';
var kwargs = {
class_names: class_names,
attrs_list: attrs_list,
kwargs_list: kwargs_list,
save_state: save_state
};
server.execute_cmd(command, kwargs, {}, {
on_complete: function(ret_val) {console.log(ret_val)},
on_error: function(err) {console.log(err)}
});
}
spt.tab.header_pos = null;
spt.tab.mouse_pos = null;
spt.tab.dragging = false;
spt.tab.header_drag_setup = function( evt, bvr, mouse_411) {
spt.tab.top = bvr.src_el.getParent(".spt_tab_top");
spt.tab.header_pos = bvr.src_el.getPosition(spt.tab.top);
spt.tab.mouse_pos = {x: mouse_411.curr_x, y: mouse_411.curr_y};
var header = bvr.src_el;
var element_name = header.getAttribute("spt_element_name");
spt.tab.select(element_name);
}
spt.tab.header_drag_motion = function( evt, bvr, mouse_411) {
//var header = bvr.drag_el;
var header = bvr.src_el;
var dx = mouse_411.curr_x - spt.tab.mouse_pos.x;
var dy = mouse_411.curr_y - spt.tab.mouse_pos.y;
if (Math.abs(dx) < 20) {
spt.tab.dragging = false;
return;
}
spt.tab.dragging = true;
header.setStyle("position", "absolute");
header.setStyle("z-index", "100");
header.setStyle("opacity", "1.0");
header.setStyle("left", spt.tab.header_pos.x + dx - 10 );
//header.setStyle("top", spt.tab.header_pos.y + dy );
}
spt.tab.header_drag_action = function( evt, bvr, mouse_411) {
var header = bvr.src_el;
var drag_pos = header.getPosition();
if (spt.tab.dragging == false)
return;
var headers = spt.tab.get_headers();
for ( var i = headers.length-1; i >= 0; i-- ) {
if (headers[i] == header) {
continue;
}
if (headers[i].getStyle("display") == "none") {
continue;
}
var pos = headers[i].getPosition();
var size = headers[i].getSize();
// the y ensures 2nd row tabs don't jump to first row on click
if (drag_pos.x > pos.x + size.x/2 && drag_pos.y >= pos.y) {
header.inject(headers[i], "after");
break;
}
if (drag_pos.x > pos.x && drag_pos.y >= pos.y ) {
header.inject(headers[i], "before");
break;
}
}
bvr.drag_el.setStyle("position", "relative");
bvr.drag_el.setStyle("z-index", "");
bvr.drag_el.setStyle("top", "");
bvr.drag_el.setStyle("left", "");
bvr.drag_el.setStyle("background", bvr.gradient);
var top = spt.tab.top;
if (top.hasClass("spt_tab_save_state") ) {
spt.tab.save_state();
}
}
spt.tab.close = function(src_el) {
// src_el should be a child of spt_tab_content or spt_tab_header
if (!src_el) {
spt.error('src_el passed in to spt.tab.close() does not exist.');
return;
}
spt.tab.top = src_el.getParent(".spt_tab_top");
var top = spt.tab.top;
var headers = spt.tab.get_headers();
if (headers.length == 1) {
return;
}
var content = src_el.getParent(".spt_tab_content");
var element_name;
// check if it's a header child
if (src_el.hasClass("spt_tab_header")) {
var header = src_el;
}
else {
var header = src_el.getParent(".spt_tab_header");
}
var subheader = src_el.getParent(".spt_tab_subheader");
if (header) {
element_name = header.getAttribute("spt_element_name");
content = spt.tab.get_content(element_name);
} else if (subheader) {
element_name = header.getAttribute("spt_element_name");
content = spt.tab.get_content(element_name);
} else if (content) {
element_name = content.getAttribute("spt_element_name");
header = spt.tab.get_selected_header(element_name);
}
if (!header || !content) {
spt.error('Tab close cannot find the header or content. Abort');
return;
}
/* If there are changed elements in the current tab, changedParameters
* is a list with index 0 containing changed element, and index 1 containing
* change type class. Otherwise, changedParameters is false.
*/
function ok(changedParameters) {
//Remove unsaved changes flags
if (changedParameters) {
var changed_element = changedParameters[0];
var changed_type = changedParameters[1];
changed_element.removeClass(changed_type);
}
var opener = header.getAttribute("spt_tab_opener");
var element_name = header.getAttribute("spt_element_name");
if (header) {
var subheader = document.id(header.getAttribute("spt_subheader_id"));
if (subheader) {
var items = subheader.getElements(".spt_tab_subheader_item");
for (var i = 0; i < items.length; i++) {
var subheader_element_name = items[i].getAttribute("spt_element_name");
var subheader_content = spt.tab.get_content(subheader_element_name);
spt.behavior.destroy_element(subheader_content);
}
spt.behavior.destroy_element(subheader);
}
}
//header.destroy();
//content.destroy();
spt.behavior.destroy_element(header);
spt.behavior.destroy_element(content);
//spt.tab.resize_headers();
var last_element_name = spt.tab.get_last_selected_element_name();
last_element_name = null;
// make the opener active
if (opener) {
spt.tab.select(opener);
}
else if (last_element_name) {
spt.tab.select(last_element_name);
}
else {
// select last one from the remaining
headers = spt.tab.get_headers();
var last = headers[headers.length - 1].getAttribute("spt_element_name");
spt.tab.select(last);
}
if (top.hasClass("spt_tab_save_state") ) {
spt.tab.save_state();
}
}
var changed_el = content.getElement(".spt_has_changes");
var changed_row = content.getElement(".spt_row_changed");
if (changed_el) {
spt.confirm("There are unsaved changes in the current tab. Continue without saving?", ok, null, {ok_args : [changed_el, "spt_has_changed"]});
}
else if (changed_row) {
spt.confirm("There are unsaved changes in the current tab. Continue without saving?", ok, null, {ok_args: [changed_row, "spt_row_changed"]});
}
else {
ok(false);
}
}
spt.tab.view_definition = function(bvr) {
var activator = spt.smenu.get_activator(bvr);
var header = activator;
var kwargs_str = header.getAttribute("spt_kwargs");
var kwargs;
if (kwargs_str != '') {
kwargs_str = kwargs_str.replace(/\&quot\;/g, '"');
kwargs = JSON.parse(kwargs_str);
}
else {
kwargs = {};
}
var class_name = header.getAttribute("spt_class_name_decoded");
/* TEST: show widget editor
var class_name2 = 'tactic.ui.tools.WidgetEditorWdg';
var kwargs2 = {
'editor_id': bvr.editor_id,
'display_handler': class_name,
'display_options': kwargs,
}
spt.panel.load_popup("Widget Editor", class_name2, kwargs2);
*/
var br = '\n';
var xml = '';
var placeholder = "element";
xml += '<' + placeholder + '>' + br;
xml += ' <display class="'+class_name+'">' + br;
for (var name in kwargs) {
if (name == 'class_name') {
continue;
}
xml += ' <'+name+'>'+kwargs[name]+'</'+name+'>' + br;
}
xml += ' </display>' + br;
xml += '</' + placeholder + '>';
var html = spt.convert_to_html_display(xml);
spt.alert(html, {type:'html'});
}
'''
def get_config_xml(self):
return '''
<config>
<tab>
<element name="untitled" title="(Untitled)"/>
</tab>
</config>
'''
def add_styles(self):
if self.use_default_style:
palette = self.top.get_palette()
border = palette.color("border")
color = palette.color("color")
background = palette.color("background")
data = {
'border': border,
'color': color,
'background': background,
'header_id': self.header_id,
}
from pyasm.web import HtmlElement
style = HtmlElement.style()
self.top.add(style)
style.add('''
#%(header_id)s .spt_tab_header {
border-style: solid;
position: relative;
border-color: %(border)s;
border-width: 1px 1px 0px 1px;
padding: 7px 5px;
color: %(color)s;
background: %(background)s;
height: 30px;
box-sizing: border-box;
}
#%(header_id)s .spt_tab_selected {
opacity: 1.0;
#border-bottom: none;
}
#%(header_id)s .spt_tab_unselected {
opacity: 0.4 ;
#border-bottom: solid 1px %(border)s;
}
#%(header_id)s .spt_tab_hover {
}
''' % data)
def get_bootstrap_styles(self):
styles = HtmlElement.style()
color = styles.get_color("color")
styles.add('''
.nav-pills .nav-link, .nav-tabs .nav-link {
padding: .5em .8575em;
font-size: 12px;
height: 31px;
}
.spt_tab_header_top {
height: 31px;
}
.spt_tab_header_top {
height: 31px;
}
.spt_tab_header {
display: flex;
align-items: center;
justify-content: space-between;
user-select: none;
-webkit-touch-callout: none; /* iOS Safari */
-webkit-user-select: none; /* Safari */
-khtml-user-select: none; /* Konqueror HTML */
-moz-user-select: none; /* Old versions of Firefox */
-ms-user-select: none; /* Internet Explorer/Edge */
}
.spt_tab_header_top .spt_tab_selected {
height: 31px;
background: var(--spt_palette_background);
color: #000;
}
.spt_tab_content_top .spt_tab_header {
border-bottom: solid .214rem transparent;
}
.spt_popup_content .spt_tab_header {
border-bottom: solid .214rem transparent;
background: inherit;
color: inherit;
}
.spt_tab_content_top .spt_tab_header_top .spt_tab_selected {
border-bottom: solid .214rem var(--spt_palette_md_secondary);
background: inherit;
color: inherit;
}
.spt_popup_content .spt_tab_header_top .spt_tab_selected {
border-bottom: solid .214rem var(--spt_palette_md_secondary);
background: inherit;
color: inherit;
}
.spt_tab_header_top .spt_tab_selected .nav-link {
color: #fff;
border-bottom: 0px solid transparent;
}
.spt_tab_header_top .spt_tab_selected .nav-link:hover {
color: #fff;
border-bottom: 0px solid transparent;
}
.nav-tabs .nav-link {
border-bottom: .214rem solid transparent;
}
.nav-tabs .nav-link:hover {
border-bottom: .214rem solid transparent;
}
.spt_tab_remove {
display: none;
position: absolute;
right: 3px;
}
.spt_tab_selected .spt_tab_remove {
display: block;
}
.spt_tab_selected .spt_tab_remove {
display: block;
}
.spt_tab_header:hover .spt_tab_remove {
display: block;
}
.spt_tab_header_label_container {
display: flex;
align-items: center;
}
.spt_tab_header_label {
color: %s;
text-overflow: ellipsis;
overflow-x: hidden;
white-space: nowrap;
}
.spt_tab_selected .spt_tab_header_label {
color: %s !important;
}
.spt_tab_header_count {
margin-left: 10px;
}
''' % (color, color))
return styles
def get_styles(self):
styles = HtmlElement.style('''
/* NEW */
.spt_tab_header_top {
white-space: nowrap;
height: auto;
float: left;
position: relative;
z-index: 1;
margin-bottom: -1px;
width: 100%;
overflow: hidden;
text-align: left;
box-sizing: border-box;
}
.spt_tab_header {
vertical-align: top;
margin-right: 1px;
box-sizing: border-box;
overflow: hidden;
display: inline-block;
}
.spt_tab_header.rounded-top-corners {
border-top-left-radius: 5px;
border-top-right-radius: 5px;
}
.spt_tab_header.drag-header {
position: relative;
}
.spt_tab_header_label {
text-align: left;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
z-index: 1;
}
.spt_add_tab {
margin-left: -2px;
display: inline-block;
}
.spt_add_tab_inner {
padding: 0px 2px 0px 2px;
border-top-right-radius: 12px;
opacity: 0.5;
background: linear-gradient(180deg, #f2f2f2, #FFFFFF);
float: left;
margin-top: 2px;
padding-top: 4px;
height: 21px;
width: 22px;
margin-left: 4px;
border-style: solid;
border-width: 1px 1px 0px 1px;
border-color: #BBB;
text-align: center;
}
.spt_tab_remove {
position: absolute;
right: 2px;
top: 8px;
z-index: 2;
width: 16px;
height: 16px;
padding-left: 2px;
border-radius: 10px;
box-sizing: border-box;
color: #000;
background: #FFF;
border: solid 1px transparent;
}
.spt_icon_active {
margin: auto;
position: absolute;
top: 0;
bottom: 0;
max-height: 100%;
opacity: 0.3;
}
.spt_tab_content_top {
border-bottom-left-radius: 5px;
color: #000;
border-bottom-right-radius: 5px;
margin-top: -1px;
min-height: 500px;
width: 100%;
z-index: 1;
background: var(--spt_palette_background);
border-top-right-radius: 5px;
border: 1px solid #BBB;
}
''')
return styles
def get_tab_id(self):
return self.unique_id
def get_header_id(self):
return self.header_id
def get_display(self):
top = self.top
top.add_class("spt_tab_top")
if self._use_bootstrap():
top.add(self.get_bootstrap_styles())
else:
top.add(self.get_styles())
self.search_type = None
self.view = self.kwargs.get("view")
config_xml = self.kwargs.get("config_xml")
config = self.kwargs.get("config")
# save state overrides
saved_config_xml = None
self.save_state = self.kwargs.get("save_state")
if self.save_state in [True, 'true']:
self.save_state = "save_state|main_tab"
if self.save_state:
saved_config_xml = WidgetSettings.get_value_by_key(self.save_state)
if saved_config_xml:
if not config_xml:
config_xml = saved_config_xml
else:
saved_xml_data = Xml()
saved_xml_data.read_string(saved_config_xml)
saved_xml_root = saved_xml_data.get_root_node()
saved_xml_tab = Xml.get_first_child(saved_xml_root)
nodes = Xml.get_children(saved_xml_tab)
xml_data = Xml()
xml_data.read_string(config_xml)
xml_root = xml_data.get_root_node()
xml_tab = Xml.get_first_child(xml_root)
curr_nodes = Xml.get_children(xml_tab)
curr_node_names = [Xml.get_attribute(node, "name") for node in curr_nodes]
for node in nodes:
node_name = Xml.get_attribute(node, "name")
if node_name not in curr_node_names:
Xml.append_child(xml_tab, node)
config_xml = xml_data.get_xml()
saved_config_xml = None
top.add_class("spt_tab_save_state")
top.add_attr("spt_tab_save_state", self.save_state)
self.mode = self.kwargs.get('mode')
if not self.mode:
self.mode = "default"
if self.view and self.view != 'tab' and not config_xml:
config = None
# if it is not defined in the database, look at a config file
includes = self.kwargs.get("include")
if includes:
includes = includes.split("|")
for include in includes:
tmp_path = __file__
dir_name = os.path.dirname(tmp_path)
file_path="%s/../config/%s" % (dir_name, include)
config = WidgetConfig.get(file_path=file_path, view=self.view)
if config and config.has_view(self.view):
pass
else:
config = None
if not config:
search = Search("config/widget_config")
search.add_filter("category", "TabWdg")
search.add_filter("view", self.view)
config_sobj = search.get_sobject()
if not config_sobj:
config_xml = "<config><%s></%s></config>" % (self.view, self.view)
else:
config_xml = config_sobj.get_value("config")
config = WidgetConfig.get(view=self.view, xml=config_xml)
else:
if saved_config_xml:
# this is for custom config_xml with a matching custom view
if not self.view:
self.view = 'tab'
config = WidgetConfig.get(view=self.view, xml=saved_config_xml)
elif config:
pass
elif config_xml:
# this is for custom config_xml with a matching custom view
if not self.view:
self.view = 'tab'
config = WidgetConfig.get(view=self.view, xml=config_xml)
elif self.widgets:
config_xml = '''
<config>
<tab></tab>
</config>
'''
else:
config_xml = '''
<config>
<tab>
<element name="untitled" title="(Untitled)"/>
</tab>
</config>
'''
self.view = 'tab'
config = WidgetConfig.get(view=self.view, xml=config_xml)
element_names = None
if self.save_state and config:
element_names = config.get_element_names()
if not element_names:
element_names = self.kwargs.get("element_names")
if element_names and isinstance(element_names, basestring):
element_names = element_names.split(",")
if not element_names and config:
element_names = config.get_element_names()
if not element_names:
element_names = []
#top.add_style("padding: 10px")
top.set_id(self.unique_id)
top.set_attr("spt_tab_id", self.unique_id)
top.set_attr("spt_tab_mode", self.mode)
gradient = top.get_gradient("background", -5, 5)
inner = DivWdg()
top.add(inner)
inner.add_style("position: relative")
inner.add_style("width: auto")
if not Container.get_dict("JSLibraries", "spt_tab"):
inner.add_behavior( {
'type': 'load',
'gradient': gradient,
'cbjs_action': self.get_onload_js()
} )
header_div = self.header
header_id = self.get_header_id()
inner.add(header_div)
header_div.add_class("spt_tab_header_top")
header_div.add_class("nav nav-tabs")
subheader_div = DivWdg()
subheader_div.add_class("spt_tab_subheader_top")
subheader_div.add_class("SPT_TEMPLATE")
inner.add(subheader_div)
self.add_subheader_behaviors(subheader_div)
if not self._use_bootstrap():
self.add_styles()
# if a search_key has been passed in, add it to the state.
state = self.kwargs.get("state")
if not state:
state = self.kwargs
search_key = self.kwargs.get("search_key")
if search_key:
state['search_key'] = search_key
selected = self.kwargs.get("selected")
if not selected:
if element_names:
selected = element_names[0]
else:
selected = ''
offset = self.kwargs.get("tab_offset")
if offset:
header_div.add_style("padding-left: %s" % offset)
if self.mode == "hidden":
# TODO: The display is problematic because bmd is using !important
header_div.add_style("display: none")
header_div.add_style("visibility: hidden")
header_div.add_style("height: 0px")
header_defs = {}
title_dict = {}
self.add_context_menu( header_div )
min_width = self.kwargs.get("min_width")
if min_width:
try:
min_width = int(min_width)
min_width = str(min_width) + "px"
except ValueError:
pass
header_div.add_style("min-width", min_width)
resize_headers = self.kwargs.get("resize_headers")
if resize_headers:
offset = 120
header_div.add_behavior( {
'type': 'load',
'offset': offset,
'cbjs_action': '''
var top = bvr.src_el;
top.spt_last_width = 0;
var offset = bvr.offset;
setInterval( function() {
if (!top.isVisible() ) {
return;
}
var size = top.getSize();
if (size.x == top.spt_last_width) {
return;
}
top.spt_last_width = size.x;
var els = bvr.src_el.getElements(".spt_tab_header");
var count = els.length;
var width = parseInt((size.x-offset) / (count));
if (width > 120) {
width = 120;
}
if (width < 30) {
width = 30;
}
for (var i = 0; i < els.length; i++) {
els[i].setStyle("width", width + "px");
}
}, 250);
'''
} )
show_remove = self.kwargs.get("show_remove")
# resize headers on leave
resize_headers = self.kwargs.get("resize_headers")
resize_headers = True
if resize_headers:
header_div.add_behavior( {
'type': 'mouseleave',
'cbjs_action': '''
spt.tab.resize_headers();
'''
} )
if show_remove == "hover":
header_div.add_relay_behavior( {
'type': 'mouseenter',
'bvr_match_class': 'spt_tab_header',
'cbjs_action': '''
var el = bvr.src_el.getElement(".spt_tab_remove");
el.setStyle("display", "");
'''
} )
header_div.add_relay_behavior( {
'type': 'mouseleave',
'bvr_match_class': 'spt_tab_header',
'cbjs_action': '''
var el = bvr.src_el.getElement(".spt_tab_remove");
el.setStyle("display", "none");
'''
} )
header_div.add_relay_behavior( {
'type': 'mouseenter',
'bvr_match_class': 'spt_tab_remove',
'cbjs_action': '''
var active = bvr.src_el.getElement(".spt_icon_active");
active.setStyle("opacity", 1.0);
'''
} )
header_div.add_relay_behavior( {
'type': 'mouseleave',
'bvr_match_class': 'spt_tab_remove',
'cbjs_action': '''
var active = bvr.src_el.getElement(".spt_icon_active");
active.setStyle("opacity", 0.3);
'''
} )
loaded_dict = {}
for element_name in element_names:
attrs = config.get_element_attributes(element_name)
title = attrs.get("title")
if not title:
title = Common.get_display_title(element_name)
title = _(title)
if attrs.get("display") == "false":
continue
load_now = attrs.get('load')
is_loaded = load_now =='true'
loaded_dict[element_name] = is_loaded
display_class = config.get_display_handler(element_name)
display_options = config.get_display_options(element_name)
header_defs[element_name] = {
'display_class': display_class,
'display_options': display_options
}
# FIXME: this is already defined in get_display_options
# process the display options
for name, value in display_options.items():
# so it allows JSON string to pass thru without eval as expression
if re.search("^{[@$]", value) and re.search("}$", value):
value = Search.eval(value, state=state)
display_options[name] = value
# DEPRECATED: this should not really be used. It is likely
# better to use expressions set above to explicitly set
# the values
if display_options.get("use_state") in [True, 'true']:
# add the state items to the display options
for state_name, state_value in state.items():
display_options[state_name] = state_value
if element_name == selected:
is_selected = True
else:
is_selected = False
header = self.get_tab_header(element_name, title, display_class, display_options, is_selected=is_selected, is_loaded=is_loaded, is_template=False, attrs=attrs)
header_div.add(header)
# add widgets that have been manually added
for i, widget in enumerate(self.widgets):
name = widget.get_name()
if not name:
num = Common.randint(0, 10000)
name = "noname%s" % num
widget.set_name(name)
title = "(Untitled)"
else:
title = Common.get_display_title(name)
if not title:
title = "(Untitled)"
title_dict[name] = title
if name == selected:
is_selected = True
else:
is_selected = False
class_name = Common.get_full_class_name(widget)
if isinstance(widget, BaseRefreshWdg):
kwargs = widget.get_kwargs()
else:
kwargs = {}
header = self.get_tab_header(name, title, class_name, kwargs, is_selected=is_selected, is_loaded=True, is_template=False)
header_div.add(header)
show_add = self.kwargs.get("show_add") not in [False, "false"]
if show_add:
header_div.add( self.get_add_wdg() )
content_top = DivWdg()
content_top.add_class("spt_tab_content_top")
content_top.add_attr("spt_tab_id", self.get_tab_id())
inner.add(content_top)
resize_offset = self.kwargs.get("resize_offset")
resize_attr = self.kwargs.get("resize_attr") or "height"
if resize_offset != None:
content_top.add_class("spt_window_resize")
content_top.add_attr("spt_window_resize_offset", resize_offset)
content_top.add_attr("spt_window_resize_attr", resize_attr)
#content_top.add_style("overflow: auto")
content_top.add_style("overflow: auto")
else:
height = self.kwargs.get("height")
if height:
try:
height = int(height)
height = str(height) + "px"
except ValueError:
pass
content_top.add_style("height: %s" % height)
content_top.add_style("overflow-y: auto")
content_top.add_style("min-height: %s" % height)
width = self.kwargs.get("width")
if not width:
width = self.kwargs.get("min_width")
if width:
try:
width = int(width)
width = str(width) + "px"
except ValueError:
pass
content_top.add_style("min-width: %s" % width)
content_top.add_class("tab_content_top")
color_mode = self.kwargs.get("color_mode")
if color_mode == "transparent":
pass
else:
content_top.add_color("color", "color")
# content_top.add_color("background", "background")
# put in a content box for each element
for element_name in element_names:
content_div = DivWdg()
content_top.add(content_div)
content_div.add_class("spt_tab_content")
content_div.add_attr("spt_tab_id", self.unique_id)
content_div.add_attr("spt_element_name", element_name)
resize_offset = self.kwargs.get("resize_offset")
if resize_offset != None:
content_div.add_class("spt_window_resize")
content_div.add_attr("spt_window_resize_offset", resize_offset)
content_div.add_style("overflow: auto")
content_div.add_style("width: 100%")
content_div.add_style("text-align: left")
is_loaded = loaded_dict.get(element_name)
if element_name == selected or is_loaded:
header_def = header_defs[element_name]
display_class = header_def.get("display_class")
if not display_class:
widget = DivWdg()
widget.add_color("background", "background")
widget.add_style("height: 300px")
widget.add_style("padding-top: 50px")
inner = DivWdg()
widget.add(inner)
inner.add_style("margin-left: auto")
inner.add_style("margin-right: auto")
inner.add_style("width: 500px")
inner.add_style("height: 100px")
inner.add_border()
inner.add_style("text-align: center")
inner.add_style("padding-top: 50px")
inner.add_color("color", "color3")
inner.add_color("background", "background3")
inner.add( IconWdg("WARNING", IconWdg.WARNING) )
inner.add(" <b>Nothing to display</b>")
else:
display_options = header_def.get("display_options")
widget = Common.create_from_class_path(display_class, kwargs=display_options)
content_div.add(widget)
content_div.add_class("spt_content_loaded")
if is_loaded and element_name != selected:
# hide preloaded tabs or non-selected tabs
content_div.add_style("display: none")
else:
content_div.add(" ")
content_div.add_style("display: none")
for widget in self.widgets:
name = widget.get_name()
content_div = DivWdg()
content_div.add_class("spt_tab_content")
content_div.add_attr("spt_tab_id", self.unique_id)
content_div.add_class("spt_content_loaded")
content_div.add_attr("spt_element_name", name)
resize_offset = self.kwargs.get("resize_offset")
if resize_offset != None:
content_div.add_class("spt_window_resize")
content_div.add_attr("spt_window_resize_offset", resize_offset)
content_div.add_style("overflow: auto")
title = title_dict.get(name)
content_div.add_attr("spt_title", title)
if name != selected:
content_div.add_style("display: none")
content_div.add(widget)
content_div.add_style("width: 100%")
# content_div.add_style("height: 100%")
content_div.add_style("text-align: left")
content_top.add(content_div)
# Add in a template
template_div = DivWdg()
template_div.add_class("spt_tab_template_top")
template_div.add_style("display: none")
template_div.add_class("SPT_TEMPLATE")
name = ""
title = ""
is_selected = False
header = self.get_tab_header(name, title, None, None, is_selected=is_selected, is_template=True)
template_div.add(header)
# subheader test
subheader = self.get_tab_subheader(name, title, None, None, is_selected=is_selected, is_template=True, config=config)
template_div.add(subheader)
subheader.add_style("z-index: 3")
#header.add_behavior( {
header.add_relay_behavior( {
'type': 'click',
'bvr_match_class': 'spt_tab_header',
'cbjs_action': '''
var header_top = bvr.src_el.getParent(".spt_tab_header_top");
var top = bvr.src_el.getParent(".spt_tab_top");
var subheader_id = bvr.src_el.getAttribute("spt_subheader_id")
var subheaders = top.getElements(".spt_tab_subheader");
for ( var i = 0; i < subheaders.length; i++) {
subheaders[i].setStyle("display", "none");
}
var el = document.id(subheader_id);
var items = el.getElements(".spt_tab_subheader_item");
if (items.length == 0) {
return;
}
var size = bvr.src_el.getSize();
var pos = bvr.src_el.getPosition(header_top);
if (el) {
el.setStyle("display", "");
spt.body.add_focus_element(el);
el.position({x: pos.x, y: pos.y+size.y-1}, el);
}
'''
} )
top.add(template_div)
content_div = DivWdg()
content_div.add_class("spt_tab_content")
content_div.add_attr("spt_element_name", "NEW")
content_div.add_attr("spt_tab_id", self.unique_id)
content_div.add("")
content_div.add_style("width: 100%")
# content_div.add_style("height: 100%")
content_div.add_style("text-align: left")
template_div.add(content_div)
return top
def get_add_wdg(self):
style = HtmlElement.style('''
.spt_add_tab {
display: flex;
align-items: center;
padding-left: 10px;
}
''')
div = DivWdg()
div.add(style)
div.add_class("spt_add_tab")
icon_div = DivWdg()
icon_div.add_class("spt_add_tab_inner")
from tactic.ui.widget import IconButtonWdg, ButtonNewWdg
icon = ButtonNewWdg(title="New Tab", icon="FA_PLUS", width=20)
#icon = IconWdg("New Tab", IconWdg.PLUS)
#icon.add_style("top: -1px")
#icon.add_style("left: 0px")
#icon.add_style("position: absolute")
icon.add_style("display: block")
icon_div.add_class("hand")
icon_div.add(icon)
add_bvr = self.kwargs.get("add_bvr")
if not add_bvr:
add_bvr = """
spt.tab.top = bvr.src_el.getParent(".spt_tab_top");
spt.tab.add_new();
"""
icon_div.add_behavior( {
'type': 'click_up',
'cbjs_action': add_bvr
} )
div.add(icon_div)
self.extra_menu = self.kwargs.get("extra_menu")
if self.extra_menu:
icon_div = DivWdg()
icon_div.set_round_corners(3, corners=['TR'])
from tactic.ui.widget import IconButtonWdg
icon = IconWdg("More Options", IconWdg.ARROWHEAD_DARK_DOWN)
icon.add_style("margin-left: -2px")
icon_div.add(icon)
from smart_menu_wdg import SmartMenu
smenu_set = SmartMenu.add_smart_menu_set( icon_div, { 'BUTTON_MENU': self.extra_menu } )
SmartMenu.assign_as_local_activator( icon_div, "BUTTON_MENU", True )
icon_div.add_style("padding-top: 4px")
icon_div.add_style("margin-top: 10px")
icon_div.add_style("float: left")
icon_div.add_style("height: 16px")
icon_div.add_style("width: 10px")
icon_div.add_style("margin-left: -1px")
icon_div.add_gradient("background", "background", -5, 5)
icon_div.add_border()
icon_div.add_style("text-align: center")
icon_div.add_style("opacity: 0.5")
div.add(icon_div)
return div
def get_edit_wdg(self):
div = DivWdg()
div.add_style("margin-left: -2px")
icon_div = DivWdg()
icon_div.add_style("padding: 0px 2px 0px 2px")
icon_div.set_round_corners(3, corners=['TR','TL'])
from tactic.ui.widget import IconButtonWdg
icon = IconButtonWdg(title="New Tab", icon=IconWdg.EDIT)
icon = IconWdg("Edit Tab Definition", IconWdg.EDIT)
icon.add_style("margin-top: -1px")
icon.add_style("margin-left: 1px")
icon_div.add_class("hand")
icon_div.add(icon)
icon.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var class_name = 'tactic.ui.tools.tab_edit_wdg.TabEditWdg';
var kwargs = {}
spt.panel.load_popup("Tab Edit", class_name, kwargs)
'''
} )
icon_div.add_style("padding-top: 4px")
icon_div.add_style("float: left")
icon_div.add_style("height: 20px")
icon_div.add_style("width: 18px")
icon_div.add_style("margin-left: 2px")
icon_div.add_gradient("background", "background", -5, 5)
icon_div.add_border()
icon_div.add_style("text-align: center")
div.add(icon_div)
return div
def add_context_menu(self, header_div):
from .menu_wdg import Menu, MenuItem
menu = Menu(width=180)
#menu.set_allow_icons(False)
#menu.set_setup_cbfn( 'spt.tab.smenu_ctx.setup_cbk' )
menu_item = MenuItem(type='title', label='Actions')
menu.add(menu_item)
menu_item = MenuItem(type='action', label='Reload Tab')
menu_item.add_behavior( {
'cbjs_action': '''
var activator = spt.smenu.get_activator(bvr);
var top = activator.getParent(".spt_tab_top");
spt.tab.top = top;
var header = activator;
var class_name = header.getAttribute("spt_class_name");
var kwargs_str = header.getAttribute("spt_kwargs");
var kwargs;
if (kwargs_str != '') {
kwargs_str = kwargs_str.replace(/\&quot\;/g, '"');
kwargs = JSON.parse(kwargs_str);
}
else {
kwargs = {};
}
var values = null;
var force = true;
spt.tab.load_class(header, class_name, kwargs, values, force);
'''
} )
menu.add(menu_item)
menu_item = MenuItem(type='separator')
menu.add(menu_item)
menu_item = MenuItem(type='action', label='Rename Tab')
menu_item.add_behavior( {
'cbjs_action': '''
var class_name = 'tactic.ui.container.TabRenameWdg';
var kwargs = {};
var activator = spt.smenu.get_activator(bvr);
var label = activator.getElement(".spt_tab_header_label");
name = label.innerHTML;
title = "Raname Tab ["+name+"]";
var popup = spt.panel.load_popup(title, class_name, kwargs);
popup.activator = activator;
'''
} )
menu.add(menu_item)
"""
menu_item = MenuItem(type='action', label='New Tab')
menu_item.add_behavior( {
'cbjs_action': '''
var activator = spt.smenu.get_activator(bvr);
var top = activator.getParent(".spt_tab_top");
spt.tab.top = top;
spt.tab.add_new();
'''
} )
menu.add(menu_item)
"""
menu_item = MenuItem(type='action', label='Tear Off')
menu_item.add_behavior( {
'cbjs_action': '''
var activator = spt.smenu.get_activator(bvr);
var top = activator.getParent(".spt_tab_top");
// add new if this is the last oni
var headers = spt.tab.get_headers();
if (headers.length == 1) {
spt.tab.add_new();
}
spt.tab.top = top;
var header = activator;
var element_name = header.getAttribute("spt_element_name");
spt.behavior.destroy_element(header);
var contents = spt.tab.get_contents();
for (var i=0; i<contents.length; i++) {
var content = contents[i];
if (content.getAttribute("spt_element_name") == element_name) {
spt.panel.load_popup_with_html( element_name, content.innerHTML );
spt.behavior.destroy_element(content);
}
}
'''
} )
menu.add(menu_item)
menu_item = MenuItem(type='action', label='Copy To Main Tab')
menu_item.add_behavior( {
'cbjs_action': '''
var activator = spt.smenu.get_activator(bvr);
var top = activator.getParent(".spt_tab_top");
spt.tab.top = top;
var html;
var header = activator;
var element_name = header.getAttribute("spt_element_name");
var title = header.getAttribute("spt_title");
var class_name = header.getAttribute("spt_class_name");
var kwargs_str = header.getAttribute("spt_kwargs");
var kwargs = {};
if (kwargs_str) {
kwargs_str = kwargs_str.replace(/\&quot\;/g, '"');
kwargs = JSON.parse(kwargs_str);
}
var contents = spt.tab.get_contents();
for (var i=0; i<contents.length; i++) {
var content = contents[i];
if (content.getAttribute("spt_element_name") == element_name) {
html = content.innerHTML;
break;
}
}
spt.tab.set_main_body_tab();
spt.tab.add_new(element_name, title, class_name, kwargs);
'''
} )
menu.add(menu_item)
if self.kwargs.get("show_remove") not in ['false', False]:
menu_item = MenuItem(type='separator')
menu.add(menu_item)
menu_item = MenuItem(type='action', label='Close Tab')
menu_item.add_behavior( {
'cbjs_action': '''
var activator = spt.smenu.get_activator(bvr);
var top = activator.getParent(".spt_tab_top");
spt.tab.top = top;
var header = activator;
var element_name = header.getAttribute("spt_element_name");
spt.behavior.destroy_element(header);
var contents = top.getElements(".spt_tab_content");
for (var i=0; i<contents.length; i++) {
var content = contents[i];
if (content.getAttribute("element_name") == element_name) {
spt.behavior.destroy_element(content);
}
}
'''
} )
menu.add(menu_item)
menu_item = MenuItem(type='action', label='Close All Except This Tab')
menu_item.add_behavior( {
'cbjs_action': '''
var activator = spt.smenu.get_activator(bvr);
var top = activator.getParent(".spt_tab_top");
spt.tab.top = top;
var headers = spt.tab.get_headers();
for (var i=0; i < headers.length; i++) {
var element_name = headers[i].getAttribute("spt_element_name");
if (activator.getAttribute('spt_element_name') != element_name) {
spt.tab.close(headers[i]);
}
}
var element_name = activator.getAttribute("spt_element_name");
spt.tab.select(element_name);
'''
} )
menu.add(menu_item)
security = Environment.get_security()
if security.check_access("builtin", "view_site_admin", "allow"):
menu_item = MenuItem(type='separator')
menu.add(menu_item)
menu_item = MenuItem(type='action', label='View Definition')
menu_item.add_behavior( {
'cbjs_action': '''spt.tab.view_definition(bvr);'''
} )
menu.add(menu_item)
menu_item = MenuItem(type='action', label='Add to Side Bar')
menu_item.add_behavior( {
'cbjs_action': '''
var activator = spt.smenu.get_activator(bvr);
var top = activator.getParent(".spt_tab_top");
spt.tab.top = top;
var header = activator;
var element_name = header.getAttribute("spt_element_name");
var title = header.getAttribute("spt_title");
var kwargs = header.getAttribute("spt_kwargs");
kwargs = kwargs.replace(/\&quot\;/g, '"');
kwargs = JSON.parse(kwargs);
var view = element_name;
var element_name = element_name.replace(/ /g, "_");
element_name = element_name.replace(/\//g, "_");
var kwargs = {
class_name: 'LinkWdg',
display_options: kwargs,
element_attrs: {
title: title
}
}
try {
var server = TacticServerStub.get();
var info = server.add_config_element("SideBarWdg", "definition", element_name, kwargs);
var info = server.add_config_element("SideBarWdg", "project_view", element_name, kwargs);
spt.panel.refresh("side_bar");
}
catch(e) {
alert(e);
throw(e);
}
'''
} )
menu.add(menu_item)
has_my_views = True
if has_my_views:
menu_item = MenuItem(type='action', label='Add to My Views')
menu_item.add_behavior( {
'cbjs_action': '''
var activator = spt.smenu.get_activator(bvr);
var top = activator.getParent(".spt_tab_top");
spt.tab.top = top;
var header = activator;
var element_name = header.getAttribute("spt_element_name");
var title = header.getAttribute("spt_title");
var kwargs = header.getAttribute("spt_kwargs");
kwargs = kwargs.replace(/\&quot\;/g, '"');
kwargs = JSON.parse(kwargs);
var login = 'admin';
var class_name = kwargs.class_name;
if (!class_name) {
class_name = "tactic.ui.panel.CustomLayoutWdg";
}
var view = element_name;
var element_name = element_name.replace(/ /g, "_");
element_name = element_name.replace(/\//g, "_");
element_name = login + "." + element_name;
var kwargs = {
class_name: class_name,
display_options: kwargs,
element_attrs: {
title: title
},
login: login,
unique: false,
}
var view = "self_view_" + login;
try {
var server = TacticServerStub.get();
var info = server.add_config_element("SideBarWdg", "definition", element_name, kwargs);
var info = server.add_config_element("SideBarWdg", view, element_name, kwargs);
spt.panel.refresh("side_bar");
}
catch(e) {
alert(e);
throw(e);
}
'''
} )
menu.add(menu_item)
if self.kwargs.get("show_context_menu") not in ['false', False]:
menus = [menu.get_data()]
menus_in = {
'DG_HEADER_CTX': menus,
}
from .smart_menu_wdg import SmartMenu
SmartMenu.attach_smart_context_menu( header_div, menus_in, False )
def get_tab_header(self, element_name, title, class_name=None, kwargs=None, is_selected=False, is_loaded=False, is_template=False, attrs={}):
web = WebContainer.get_web()
#header = DivWdg()
header = HtmlElement.li()
header.add_class("spt_tab_header")
header.add_class("nav-item")
header.add_attr("spt_tab_id", self.unique_id)
header.add_class("hand")
header.add_style("width: 120px")
header.add_behavior({
'type': 'load',
'cbjs_action': '''$(bvr.src_el).bmdRipples();'''
})
if self.use_default_style:
header.add_class("rounded-top-corners")
if is_selected:
header.add_class("spt_tab_selected")
header.add_class("spt_is_selected")
else:
header.add_class("spt_tab_unselected")
count = attrs.get("count")
header.add_attr("spt_element_name", element_name)
header.add_attr("spt_title", title)
if not is_template:
if kwargs:
if count:
kwargs['count'] = count
kwargs_str = Common.convert_to_json(kwargs)
header.add_attr("spt_kwargs", kwargs_str)
# Disable widget key for now
#widget_key = header.generate_widget_key(class_name, inputs=kwargs)
#header.add_attr("spt_class_name", widget_key)
header.add_attr("spt_class_name", class_name)
# For viewing definition with widget_key
header.add_attr("spt_class_name_decoded", class_name)
else:
header.add_attr("spt_kwargs", '')
widget_key = None
header.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var header = bvr.src_el;
spt.tab.top = header.getParent(".spt_tab_top");
var this_name = header.getAttribute('spt_element_name');
spt.tab.select(this_name);
'''
} )
from .smart_menu_wdg import SmartMenu
SmartMenu.assign_as_local_activator( header, 'DG_HEADER_CTX' )
title_container = DivWdg(css="spt_tab_header_label_container")
title_container.add_class("nav-link")
show_remove = self.kwargs.get("show_remove")
if show_remove not in [False, "false"]:
title_container.add_style("width: 95%")
else:
title_container.add_style("width: 100%")
header.add(title_container)
title_div = DivWdg()
title_div.add_class("spt_tab_header_label")
display_title = title
title_div.add(display_title)
title_container.add(title_div)
title_div.add_attr("title", "%s" % (title))
count_wdg = SpanWdg()
count_wdg.add_class("badge badge-secondary spt_tab_header_count")
title_container.add(count_wdg)
icon = None
if icon:
icon = IconWdg(name="whatever", icon=icon)
title_div.add(icon)
if count:
count_color = attrs.get("count_color")
state = self.kwargs.get("state") or {}
search_key = state.get("search_key")
if not search_key:
search_key = self.kwargs.get("search_key")
if search_key:
sobject = Search.get_by_search_key(search_key)
else:
sobject = None
if sobject:
value = Search.eval(count, sobject)
count_wdg.add(value)
if count_color:
count_wdg.add_style("background", count_color)
count_wdg.add_update( {
'expression': count,
'expr_key': search_key,
} )
remove_wdg = DivWdg()
remove_wdg.add_class("spt_tab_remove")
remove_wdg.add_behavior( {
'type': 'click',
'cbjs_action': '''
spt.tab.close(bvr.src_el);
'''
} )
show_remove = self.kwargs.get("show_remove")
show_remove_tabs = self.kwargs.get("show_remove_tabs")
if show_remove_tabs:
show_remove_tabs_list = show_remove_tabs.split("|")
if element_name in show_remove_tabs_list:
show_remove = False
if is_template or show_remove not in [False, 'false']:
header.add(remove_wdg)
if show_remove == "hover":
remove_wdg.add_style("display: none")
remove_icon_path = self.kwargs.get("remove_icon_path")
if (remove_icon_path):
icon = HtmlElement.img(remove_icon_path)
icon.add_styles("padding: 2px; width: 11px")
remove_wdg.add_style("right: 6px;")
else:
from tactic.ui.widget import ButtonNewWdg
icon = ButtonNewWdg(icon="FA_TIMES", size=12, width=20)
icon.add_class("spt_icon_active")
remove_wdg.add(icon)
divider_div = DivWdg()
divider_div.add_style("width: 1px")
divider_div.add_style("background: #fff")
divider_div.add_style("height: 20px")
header.add(divider_div)
# add a drag behavior
allow_drag = self.kwargs.get("allow_drag")
if allow_drag not in [False, 'false']:
header.add_class("drag-header")
header.add_behavior( {
'type': 'drag',
"drag_el": '@',
"cb_set_prefix": 'spt.tab.header_drag'
} )
return header
def get_tab_subheader(self, element_name, title, class_name=None, kwargs=None, is_selected=False, is_loaded=False, is_template=False, config=None):
subheader_div = DivWdg()
subheader_div.add_class("spt_tab_subheader")
subheader_div.add_style("width: 200px")
subheader_div.add_style("height: auto")
subheader_div.add_border()
subheader_div.add_style("position: absolute")
subheader_div.add_style("left: 5px")
subheader_div.add_color("background", "background")
subheader_div.add_style("top: 28px")
subheader_div.add_style("padding: 10px 5px")
#element_names = ['my_tasks','all_orders','all_deliverables']
element_names = []
for element_name in element_names:
attrs = config.get_element_attributes(element_name)
title = attrs.get("title")
if not title:
title = Common.get_display_title(element_name)
subheader = DivWdg()
subheader.add_style("position: relative")
subheader.add_attr("spt_element_name", element_name)
subheader.add_class("spt_tab_subheader_item")
icon = IconWdg("Remove Tab", "FA_TIMES", opacity=0.3)
subheader.add(icon)
icon.add_class("spt_icon_inactive")
icon.add_styles("position: absolute; right: 0; top: 3px;")
subheader_div.add( subheader )
subheader.add_style("padding: 5px")
subheader.add(title)
display_class = config.get_display_handler(element_name)
display_options = config.get_display_options(element_name)
"""
subheader.add_behavior( {
'type': 'click',
'title': title,
'display_class': display_class,
'display_options': display_options,
'cbjs_action': '''
spt.panel.load_popup(bvr.title, bvr.display_class, bvr.display_options);
'''
} )
subheader.add_behavior( {
'type': 'mouseenter',
'cbjs_action': '''
bvr.src_el.setStyle("background", "#DDD");
'''
} )
subheader.add_behavior( {
'type': 'mouseleave',
'cbjs_action': '''
bvr.src_el.setStyle("background", "");
'''
} )
"""
return subheader_div
def add_subheader_behaviors(self, subheader_top):
subheader_top.set_unique_id()
subheader_top.add_smart_style("spt_tab_subheader_item", "pointer", "cursor")
subheader_top.add_relay_behavior( {
'type': 'click',
'bvr_match_class': 'spt_tab_subheader_item',
'cbjs_action': '''
var element_name = bvr.src_el.getAttribute("spt_element_name");
var title = bvr.src_el.getAttribute("spt_title");
var display_class = bvr.src_el.getAttribute("spt_class_name");
var kwargs_str = bvr.src_el.getAttribute("spt_kwargs");
if (!kwargs_str) {
kwargs = {}
}
else {
kwargs_str = kwargs_str.replace(/\&quot\;/g, '"');
kwargs = JSON.parse(kwargs_str);
}
spt.tab.load_selected(element_name, title, display_class, kwargs);
'''
} )
subheader_top.add_relay_behavior( {
'type': 'mouseenter',
'bvr_match_class': 'spt_tab_subheader_item',
'cbjs_action': '''
bvr.src_el.setStyle("background", "#DDD");
'''
} )
subheader_top.add_relay_behavior( {
'type': 'mouseleave',
'bvr_match_class': 'spt_tab_subheader_item',
'cbjs_action': '''
bvr.src_el.setStyle("background", "");
'''
} )
subheader_top.add_relay_behavior( {
'type': 'mouseleave',
'bvr_match_class': 'spt_tab_subheader',
'cbjs_action': '''
bvr.src_el.setStyle("display", "none");
'''
} )
__all__.append("TabRenameWdg")
class TabRenameWdg(BaseRefreshWdg):
def get_display(self):
top = self.top
top.add_style("margin: 20px")
top.add_style("min-width: 250px")
top.add_class("spt_tab_rename_top")
top.add("<div>New Name:</div>")
from tactic.ui.input import TextInputWdg
from tactic.ui.widget import ActionButtonWdg
text = TextInputWdg(name="new_name")
text.add_class("spt_tab_new_name")
top.add(text)
text.add_behavior( {
'type': 'load',
'cbjs_action': 'bvr.src_el.focus()'
} )
top.add("<br/>")
button = ActionButtonWdg(title="Rename", color="primary")
top.add(button)
button.add_style("float: right")
button.add_behavior( {
'type': 'click',
'cbjs_action': '''
var popup = bvr.src_el.getParent(".spt_popup");
var activator = popup.activator
var rename_top = bvr.src_el.getParent(".spt_tab_rename_top");
var input = rename_top.getElement(".spt_tab_new_name");
new_name = input.value
spt.popup.close(popup);
var label = activator.getElement(".spt_tab_header_label");
label.innerHTML = new_name;
label.setAttribute("title", new_name);
activator.setAttribute("spt_title", new_name);
var top = spt.tab.top;
if (!top) {
spt.tab.set_main_body_tab();
top = spt.tab.top;
}
if (top.hasClass("spt_tab_save_state") ) {
spt.tab.save_state();
}
'''
} )
top.add("<br clear='all'/>")
return top
from pyasm.command import Command
class TabSaveStateCmd(Command):
def __init__(self, **kwargs):
super(TabSaveStateCmd, self).__init__(**kwargs)
self.update = True
def execute(self):
class_names = self.kwargs.get("class_names")
attrs_list = self.kwargs.get("attrs_list")
kwargs_list = self.kwargs.get("kwargs_list")
save_state = self.kwargs.get("save_state")
xml = Xml()
xml.create_doc("config")
root = xml.get_root_node()
view = xml.create_element("tab")
xml.append_child(root, view)
for class_name, attrs, kwargs in zip(class_names, attrs_list, kwargs_list):
element = xml.create_element("element")
xml.append_child(view, element)
for key, value in attrs.items():
xml.set_attribute(element, key, value)
display = xml.create_element("display")
xml.append_child(element, display)
if class_name.startswith("$"):
from pyasm.common import jsonloads
key = class_name
key = key.lstrip("$")
tmp_dir = Environment.get_tmp_dir(include_ticket=True)
path = "%s/%s_key_%s.txt" % (tmp_dir, "widget", key)
if not os.path.exists(path):
print("ERROR: %s path [%s] not found" % ("widget", path))
raise Exception("widget key not valid")
f = open(path, 'r')
data = f.read()
f.close()
data = jsonloads(data)
class_name = data.get("method")
xml.set_attribute(display, "class", class_name)
for key, value in kwargs.items():
if (key == 'count'):
xml.set_attribute(element, key, value)
else:
attr = xml.create_text_element(key, value)
xml.append_child(display, attr)
xml_string = xml.to_string()
WidgetSettings.set_value_by_key(save_state, xml_string)
| 30.215484 | 171 | 0.547882 | [
"EPL-1.0"
] | CeltonMcGrath/TACTIC | src/tactic/ui/container/tab_wdg.py | 93,668 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'WorkloadCrrAccessTokenResponse',
]
@pulumi.output_type
class WorkloadCrrAccessTokenResponse(dict):
def __init__(__self__, *,
object_type: str,
access_token_string: Optional[str] = None,
b_ms_active_region: Optional[str] = None,
backup_management_type: Optional[str] = None,
container_id: Optional[str] = None,
container_name: Optional[str] = None,
container_type: Optional[str] = None,
coordinator_service_stamp_id: Optional[str] = None,
coordinator_service_stamp_uri: Optional[str] = None,
datasource_container_name: Optional[str] = None,
datasource_id: Optional[str] = None,
datasource_name: Optional[str] = None,
datasource_type: Optional[str] = None,
policy_id: Optional[str] = None,
policy_name: Optional[str] = None,
protectable_object_container_host_os_name: Optional[str] = None,
protectable_object_friendly_name: Optional[str] = None,
protectable_object_parent_logical_container_name: Optional[str] = None,
protectable_object_protection_state: Optional[str] = None,
protectable_object_unique_name: Optional[str] = None,
protectable_object_workload_type: Optional[str] = None,
protection_container_id: Optional[float] = None,
protection_service_stamp_id: Optional[str] = None,
protection_service_stamp_uri: Optional[str] = None,
recovery_point_id: Optional[str] = None,
recovery_point_time: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_id: Optional[str] = None,
resource_name: Optional[str] = None,
rp_is_managed_virtual_machine: Optional[bool] = None,
rp_original_sa_option: Optional[bool] = None,
rp_tier_information: Optional[Mapping[str, str]] = None,
rp_vm_size_description: Optional[str] = None,
subscription_id: Optional[str] = None,
token_extended_information: Optional[str] = None):
"""
:param str object_type: Type of the specific object - used for deserializing
Expected value is 'WorkloadCrrAccessToken'.
:param str access_token_string: Access token used for authentication
:param str b_ms_active_region: Active region name of BMS Stamp
:param str backup_management_type: Backup Management Type
:param str container_id: Container Id
:param str container_name: Container Unique name
:param str container_type: Container Type
:param str coordinator_service_stamp_id: CoordinatorServiceStampId to be used by BCM in restore call
:param str coordinator_service_stamp_uri: CoordinatorServiceStampUri to be used by BCM in restore call
:param str datasource_container_name: Datasource Container Unique Name
:param str datasource_id: Datasource Id
:param str datasource_name: Datasource Friendly Name
:param str datasource_type: Datasource Type
:param str policy_id: Policy Id
:param str policy_name: Policy Name
:param float protection_container_id: Protected item container id
:param str protection_service_stamp_id: ProtectionServiceStampId to be used by BCM in restore call
:param str protection_service_stamp_uri: ProtectionServiceStampUri to be used by BCM in restore call
:param str recovery_point_id: Recovery Point Id
:param str recovery_point_time: Recovery Point Time
:param str resource_group_name: Resource Group name of the source vault
:param str resource_id: Resource Id of the source vault
:param str resource_name: Resource Name of the source vault
:param bool rp_is_managed_virtual_machine: Recovery point information: Managed virtual machine
:param bool rp_original_sa_option: Recovery point information: Original SA option
:param Mapping[str, str] rp_tier_information: Recovery point Tier Information
:param str rp_vm_size_description: Recovery point information: VM size description
:param str subscription_id: Subscription Id of the source vault
:param str token_extended_information: Extended Information about the token like FileSpec etc.
"""
pulumi.set(__self__, "object_type", 'WorkloadCrrAccessToken')
if access_token_string is not None:
pulumi.set(__self__, "access_token_string", access_token_string)
if b_ms_active_region is not None:
pulumi.set(__self__, "b_ms_active_region", b_ms_active_region)
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if container_id is not None:
pulumi.set(__self__, "container_id", container_id)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if container_type is not None:
pulumi.set(__self__, "container_type", container_type)
if coordinator_service_stamp_id is not None:
pulumi.set(__self__, "coordinator_service_stamp_id", coordinator_service_stamp_id)
if coordinator_service_stamp_uri is not None:
pulumi.set(__self__, "coordinator_service_stamp_uri", coordinator_service_stamp_uri)
if datasource_container_name is not None:
pulumi.set(__self__, "datasource_container_name", datasource_container_name)
if datasource_id is not None:
pulumi.set(__self__, "datasource_id", datasource_id)
if datasource_name is not None:
pulumi.set(__self__, "datasource_name", datasource_name)
if datasource_type is not None:
pulumi.set(__self__, "datasource_type", datasource_type)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if policy_name is not None:
pulumi.set(__self__, "policy_name", policy_name)
if protectable_object_container_host_os_name is not None:
pulumi.set(__self__, "protectable_object_container_host_os_name", protectable_object_container_host_os_name)
if protectable_object_friendly_name is not None:
pulumi.set(__self__, "protectable_object_friendly_name", protectable_object_friendly_name)
if protectable_object_parent_logical_container_name is not None:
pulumi.set(__self__, "protectable_object_parent_logical_container_name", protectable_object_parent_logical_container_name)
if protectable_object_protection_state is not None:
pulumi.set(__self__, "protectable_object_protection_state", protectable_object_protection_state)
if protectable_object_unique_name is not None:
pulumi.set(__self__, "protectable_object_unique_name", protectable_object_unique_name)
if protectable_object_workload_type is not None:
pulumi.set(__self__, "protectable_object_workload_type", protectable_object_workload_type)
if protection_container_id is not None:
pulumi.set(__self__, "protection_container_id", protection_container_id)
if protection_service_stamp_id is not None:
pulumi.set(__self__, "protection_service_stamp_id", protection_service_stamp_id)
if protection_service_stamp_uri is not None:
pulumi.set(__self__, "protection_service_stamp_uri", protection_service_stamp_uri)
if recovery_point_id is not None:
pulumi.set(__self__, "recovery_point_id", recovery_point_id)
if recovery_point_time is not None:
pulumi.set(__self__, "recovery_point_time", recovery_point_time)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
if resource_name is not None:
pulumi.set(__self__, "resource_name", resource_name)
if rp_is_managed_virtual_machine is not None:
pulumi.set(__self__, "rp_is_managed_virtual_machine", rp_is_managed_virtual_machine)
if rp_original_sa_option is not None:
pulumi.set(__self__, "rp_original_sa_option", rp_original_sa_option)
if rp_tier_information is not None:
pulumi.set(__self__, "rp_tier_information", rp_tier_information)
if rp_vm_size_description is not None:
pulumi.set(__self__, "rp_vm_size_description", rp_vm_size_description)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
if token_extended_information is not None:
pulumi.set(__self__, "token_extended_information", token_extended_information)
@property
@pulumi.getter(name="objectType")
def object_type(self) -> str:
"""
Type of the specific object - used for deserializing
Expected value is 'WorkloadCrrAccessToken'.
"""
return pulumi.get(self, "object_type")
@property
@pulumi.getter(name="accessTokenString")
def access_token_string(self) -> Optional[str]:
"""
Access token used for authentication
"""
return pulumi.get(self, "access_token_string")
@property
@pulumi.getter(name="bMSActiveRegion")
def b_ms_active_region(self) -> Optional[str]:
"""
Active region name of BMS Stamp
"""
return pulumi.get(self, "b_ms_active_region")
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Backup Management Type
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="containerId")
def container_id(self) -> Optional[str]:
"""
Container Id
"""
return pulumi.get(self, "container_id")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[str]:
"""
Container Unique name
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="containerType")
def container_type(self) -> Optional[str]:
"""
Container Type
"""
return pulumi.get(self, "container_type")
@property
@pulumi.getter(name="coordinatorServiceStampId")
def coordinator_service_stamp_id(self) -> Optional[str]:
"""
CoordinatorServiceStampId to be used by BCM in restore call
"""
return pulumi.get(self, "coordinator_service_stamp_id")
@property
@pulumi.getter(name="coordinatorServiceStampUri")
def coordinator_service_stamp_uri(self) -> Optional[str]:
"""
CoordinatorServiceStampUri to be used by BCM in restore call
"""
return pulumi.get(self, "coordinator_service_stamp_uri")
@property
@pulumi.getter(name="datasourceContainerName")
def datasource_container_name(self) -> Optional[str]:
"""
Datasource Container Unique Name
"""
return pulumi.get(self, "datasource_container_name")
@property
@pulumi.getter(name="datasourceId")
def datasource_id(self) -> Optional[str]:
"""
Datasource Id
"""
return pulumi.get(self, "datasource_id")
@property
@pulumi.getter(name="datasourceName")
def datasource_name(self) -> Optional[str]:
"""
Datasource Friendly Name
"""
return pulumi.get(self, "datasource_name")
@property
@pulumi.getter(name="datasourceType")
def datasource_type(self) -> Optional[str]:
"""
Datasource Type
"""
return pulumi.get(self, "datasource_type")
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[str]:
"""
Policy Id
"""
return pulumi.get(self, "policy_id")
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> Optional[str]:
"""
Policy Name
"""
return pulumi.get(self, "policy_name")
@property
@pulumi.getter(name="protectableObjectContainerHostOsName")
def protectable_object_container_host_os_name(self) -> Optional[str]:
return pulumi.get(self, "protectable_object_container_host_os_name")
@property
@pulumi.getter(name="protectableObjectFriendlyName")
def protectable_object_friendly_name(self) -> Optional[str]:
return pulumi.get(self, "protectable_object_friendly_name")
@property
@pulumi.getter(name="protectableObjectParentLogicalContainerName")
def protectable_object_parent_logical_container_name(self) -> Optional[str]:
return pulumi.get(self, "protectable_object_parent_logical_container_name")
@property
@pulumi.getter(name="protectableObjectProtectionState")
def protectable_object_protection_state(self) -> Optional[str]:
return pulumi.get(self, "protectable_object_protection_state")
@property
@pulumi.getter(name="protectableObjectUniqueName")
def protectable_object_unique_name(self) -> Optional[str]:
return pulumi.get(self, "protectable_object_unique_name")
@property
@pulumi.getter(name="protectableObjectWorkloadType")
def protectable_object_workload_type(self) -> Optional[str]:
return pulumi.get(self, "protectable_object_workload_type")
@property
@pulumi.getter(name="protectionContainerId")
def protection_container_id(self) -> Optional[float]:
"""
Protected item container id
"""
return pulumi.get(self, "protection_container_id")
@property
@pulumi.getter(name="protectionServiceStampId")
def protection_service_stamp_id(self) -> Optional[str]:
"""
ProtectionServiceStampId to be used by BCM in restore call
"""
return pulumi.get(self, "protection_service_stamp_id")
@property
@pulumi.getter(name="protectionServiceStampUri")
def protection_service_stamp_uri(self) -> Optional[str]:
"""
ProtectionServiceStampUri to be used by BCM in restore call
"""
return pulumi.get(self, "protection_service_stamp_uri")
@property
@pulumi.getter(name="recoveryPointId")
def recovery_point_id(self) -> Optional[str]:
"""
Recovery Point Id
"""
return pulumi.get(self, "recovery_point_id")
@property
@pulumi.getter(name="recoveryPointTime")
def recovery_point_time(self) -> Optional[str]:
"""
Recovery Point Time
"""
return pulumi.get(self, "recovery_point_time")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[str]:
"""
Resource Group name of the source vault
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[str]:
"""
Resource Id of the source vault
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> Optional[str]:
"""
Resource Name of the source vault
"""
return pulumi.get(self, "resource_name")
@property
@pulumi.getter(name="rpIsManagedVirtualMachine")
def rp_is_managed_virtual_machine(self) -> Optional[bool]:
"""
Recovery point information: Managed virtual machine
"""
return pulumi.get(self, "rp_is_managed_virtual_machine")
@property
@pulumi.getter(name="rpOriginalSAOption")
def rp_original_sa_option(self) -> Optional[bool]:
"""
Recovery point information: Original SA option
"""
return pulumi.get(self, "rp_original_sa_option")
@property
@pulumi.getter(name="rpTierInformation")
def rp_tier_information(self) -> Optional[Mapping[str, str]]:
"""
Recovery point Tier Information
"""
return pulumi.get(self, "rp_tier_information")
@property
@pulumi.getter(name="rpVMSizeDescription")
def rp_vm_size_description(self) -> Optional[str]:
"""
Recovery point information: VM size description
"""
return pulumi.get(self, "rp_vm_size_description")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[str]:
"""
Subscription Id of the source vault
"""
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter(name="tokenExtendedInformation")
def token_extended_information(self) -> Optional[str]:
"""
Extended Information about the token like FileSpec etc.
"""
return pulumi.get(self, "token_extended_information")
| 41.794749 | 134 | 0.672624 | [
"Apache-2.0"
] | polivbr/pulumi-azure-native | sdk/python/pulumi_azure_native/recoveryservices/v20181220/outputs.py | 17,512 | Python |
from .ric import RIC
import pysecm.ric.commodity
import pysecm.ric.equity
import pysecm.ric.fixed_income
import pysecm.ric.fx
import pysecm.ric.index
| 21.428571 | 30 | 0.833333 | [
"MIT"
] | bostonrwalker/pysecm | pysecm/ric/__init__.py | 150 | Python |
import os
import numpy as np
from netCDF4 import Dataset
def load_region(region_id, local=False, return_regions=False):
if local:
_vr = Dataset(
os.path.join(os.path.dirname(os.path.abspath(__file__)), r"data/terrain_parameters/VarslingsOmr_2017.nc"),
"r")
# flip up-down because Meps data is upside down
#_regions = np.flipud(_vr.variables["LokalOmr_2018"][:])
_regions = _vr.variables["LokalOmr_2018"][:]
else:
_vr = Dataset(
os.path.join(os.path.dirname(os.path.abspath(__file__)), r"data/terrain_parameters/VarslingsOmr_2019.nc"),
"r")
# flip up-down because Meps data is upside down
#_regions = np.flipud(_vr.variables["skredomr19_km"][:])
_regions = _vr.variables["skredomr19_km"][:]
print("Missing value: {mv}".format(mv=_vr.variables["skredomr19_km"].missing_value))
_region_bounds = np.where(_regions == region_id) # just to get the bounding box
# get the lower left and upper right corner of a rectangle around the region
y_min, y_max, x_min, x_max = min(_region_bounds[0].flatten()), max(_region_bounds[0].flatten()), \
min(_region_bounds[1].flatten()), max(_region_bounds[1].flatten())
#reg_mask = np.ma.masked_where(_regions[y_min:y_max, x_min:x_max] == region_id, _regions[y_min:y_max, x_min:x_max]).mask
#reg_mask = np.where(_regions[y_min:y_max, x_min:x_max] == region_id, _regions[y_min:y_max, x_min:x_max], np.nan)
reg_mask = np.where(_regions[y_min:y_max, x_min:x_max] == region_id, 1., np.nan)
#reg_mask = np.ma.masked_where(_reg_mask == region_id).mask
_vr.close()
if return_regions:
return _regions, reg_mask, y_min, y_max, x_min, x_max
else:
return reg_mask, y_min, y_max, x_min, x_max
def clip_region(nc_variable, region_mask, t_index, y_min, y_max, x_min, x_max):
s = len(nc_variable.shape)
if s == 2:
#return np.flipud(region_mask * nc_variable[y_min:y_max, x_min:x_max])
return (region_mask * nc_variable[y_min:y_max, x_min:x_max])
elif s == 3:
#return np.flipud(region_mask * nc_variable[t_index, y_min:y_max, x_min:x_max])
return (region_mask * nc_variable[t_index, y_min:y_max, x_min:x_max])
elif s == 4:
#return np.flipud(region_mask * nc_variable[t_index, 0, y_min:y_max, x_min:x_max])
return (region_mask * nc_variable[t_index, 0, y_min:y_max, x_min:x_max])
else:
print('Input array needs to have 2- to 4-dimensions: {0} were given.'.format(s))
if __name__ == "__main__":
import matplotlib.pyplot as plt
regions, region_mask, y_min, y_max, x_min, x_max = load_region(3013, return_regions=True)
print(region_mask, type(region_mask), np.unique(region_mask))
clp = clip_region(regions, region_mask, 0, y_min, y_max, x_min, x_max)
plt.imshow(clp)
plt.show()
k = 'm' | 43.731343 | 124 | 0.669283 | [
"MIT"
] | kmunve/APS | aps/load_region.py | 2,930 | Python |
# Generated by Django 2.1.11 on 2020-06-24 06:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0008_auto_20190919_1521'),
]
operations = [
migrations.AddField(
model_name='product',
name='is_deleted',
field=models.BooleanField(default=False),
),
]
| 20.736842 | 53 | 0.606599 | [
"MIT"
] | Baktybek0312/ecommerce_api | products/migrations/0009_product_is_deleted.py | 394 | Python |
import time
import torch
from hpc_rll.origin.td import iqn_nstep_td_error, iqn_nstep_td_data
from hpc_rll.rl_utils.td import IQNNStepTDError
from testbase import mean_relative_error, times
assert torch.cuda.is_available()
use_cuda = True
tau = 33
tauPrime = 34
T = 10
B = 64
N = 8
gamma = 0.95
kappa = 0.9
def iqn_val():
ori_q = torch.randn(tau, B, N)
ori_next_n_q = torch.randn(tauPrime, B, N)
ori_action = torch.randint(0, N, size=(B, ))
ori_next_n_action = torch.randint(0, N, size=(B, ))
ori_reward = torch.randn(T, B)
ori_done = torch.randn(B)
ori_r_q = torch.randn(tau, B)
ori_weight = torch.randn(B)
ori_value_gamma = torch.randn(B)
hpc_q = ori_q.clone().detach()
hpc_next_n_q = ori_next_n_q.clone().detach()
hpc_action = ori_action.clone().detach()
hpc_next_n_action = ori_next_n_action.clone().detach()
hpc_reward = ori_reward.clone().detach()
hpc_done = ori_done.clone().detach()
hpc_r_q = ori_r_q.clone().detach()
hpc_weight = ori_weight.clone().detach()
hpc_value_gamma = ori_value_gamma.clone().detach()
hpc_iqn = IQNNStepTDError(tau, tauPrime, T, B, N)
if use_cuda:
ori_q = ori_q.cuda()
ori_next_n_q = ori_next_n_q.cuda()
ori_action = ori_action.cuda()
ori_next_n_action = ori_next_n_action.cuda()
ori_reward = ori_reward.cuda()
ori_done = ori_done.cuda()
ori_r_q = ori_r_q.cuda()
ori_weight = ori_weight.cuda()
ori_value_gamma = ori_value_gamma.cuda()
hpc_q = hpc_q.cuda()
hpc_next_n_q = hpc_next_n_q.cuda()
hpc_action = hpc_action.cuda()
hpc_next_n_action = hpc_next_n_action.cuda()
hpc_reward = hpc_reward.cuda()
hpc_done = hpc_done.cuda()
hpc_r_q = hpc_r_q.cuda()
hpc_weight = hpc_weight.cuda()
hpc_value_gamma = hpc_value_gamma.cuda()
hpc_iqn = hpc_iqn.cuda()
ori_q.requires_grad_(True)
ori_loss, ori_ = iqn_nstep_td_error(iqn_nstep_td_data(ori_q, ori_next_n_q, ori_action, ori_next_n_action, ori_reward, ori_done, ori_r_q, ori_weight), gamma, T, kappa, ori_value_gamma)
ori_loss = ori_loss.mean()
ori_loss.backward()
if use_cuda:
torch.cuda.synchronize()
torch.cuda.cudart().cudaProfilerStart()
hpc_q.requires_grad_(True)
hpc_loss, hpc_ = hpc_iqn(hpc_q, hpc_next_n_q, hpc_action, hpc_next_n_action, hpc_reward, hpc_done, hpc_r_q, gamma, kappa, hpc_weight, hpc_value_gamma)
hpc_loss = hpc_loss.mean()
hpc_loss.backward()
if use_cuda:
torch.cuda.synchronize()
torch.cuda.cudart().cudaProfilerStop()
mre = mean_relative_error(torch.flatten(ori_loss).cpu().detach().numpy(), torch.flatten(hpc_loss).cpu().detach().numpy())
print("iqn fp mean_relative_error: " + str(mre))
mre = mean_relative_error(torch.flatten(ori_q.grad).cpu().detach().numpy(), torch.flatten(hpc_q.grad).cpu().detach().numpy())
print("iqn bp mean_relative_error: " + str(mre))
def iqn_perf():
ori_q = torch.randn(tau, B, N)
ori_next_n_q = torch.randn(tauPrime, B, N)
ori_action = torch.randint(0, N, size=(B, ))
ori_next_n_action = torch.randint(0, N, size=(B, ))
ori_reward = torch.randn(T, B)
ori_done = torch.randn(B)
ori_r_q = torch.randn(tau, B)
ori_weight = torch.randn(B)
ori_value_gamma = torch.randn(B)
hpc_q = ori_q.clone().detach()
hpc_next_n_q = ori_next_n_q.clone().detach()
hpc_action = ori_action.clone().detach()
hpc_next_n_action = ori_next_n_action.clone().detach()
hpc_reward = ori_reward.clone().detach()
hpc_done = ori_done.clone().detach()
hpc_r_q = ori_r_q.clone().detach()
hpc_weight = ori_weight.clone().detach()
hpc_value_gamma = ori_value_gamma.clone().detach()
hpc_iqn = IQNNStepTDError(tau, tauPrime, T, B, N)
if use_cuda:
ori_q = ori_q.cuda()
ori_next_n_q = ori_next_n_q.cuda()
ori_action = ori_action.cuda()
ori_next_n_action = ori_next_n_action.cuda()
ori_reward = ori_reward.cuda()
ori_done = ori_done.cuda()
ori_r_q = ori_r_q.cuda()
ori_weight = ori_weight.cuda()
ori_value_gamma = ori_value_gamma.cuda()
hpc_q = hpc_q.cuda()
hpc_next_n_q = hpc_next_n_q.cuda()
hpc_action = hpc_action.cuda()
hpc_next_n_action = hpc_next_n_action.cuda()
hpc_reward = hpc_reward.cuda()
hpc_done = hpc_done.cuda()
hpc_r_q = hpc_r_q.cuda()
hpc_weight = hpc_weight.cuda()
hpc_iqn = hpc_iqn.cuda()
hpc_value_gamma = hpc_value_gamma.cuda()
ori_q.requires_grad_(True)
for i in range(times):
t = time.time()
ori_loss, ori_ = iqn_nstep_td_error(iqn_nstep_td_data(ori_q, ori_next_n_q, ori_action, ori_next_n_action, ori_reward, ori_done, ori_r_q, ori_weight), gamma, T, kappa, ori_value_gamma)
ori_loss = ori_loss.mean()
ori_loss.backward()
if use_cuda:
torch.cuda.synchronize()
print('epoch: {}, original iqn cost time: {}'.format(i, time.time() - t))
#torch.cuda.cudart().cudaProfilerStart()
hpc_q.requires_grad_(True)
for i in range(times):
t = time.time()
hpc_loss, hpc_ = hpc_iqn(hpc_q, hpc_next_n_q, hpc_action, hpc_next_n_action, hpc_reward, hpc_done, hpc_r_q, gamma, kappa, hpc_weight, hpc_value_gamma)
hpc_loss = hpc_loss.mean()
hpc_loss.backward()
if use_cuda:
torch.cuda.synchronize()
print('epoch: {}, hpc iqn cost time: {}'.format(i, time.time() - t))
#torch.cuda.cudart().cudaProfilerStop()
mre = mean_relative_error(torch.flatten(ori_loss).cpu().detach().numpy(), torch.flatten(hpc_loss).cpu().detach().numpy())
print("iqn fp mean_relative_error: " + str(mre))
mre = mean_relative_error(torch.flatten(ori_q.grad).cpu().detach().numpy(), torch.flatten(hpc_q.grad).cpu().detach().numpy())
print("iqn bp mean_relative_error: " + str(mre))
if __name__ == '__main__':
print("target problem: tau = {}, tauPrime = {}, T = {}, B = {}, N = {}, gamma = {}, kappa = {}".format(tau, tauPrime, T, B, N, gamma, kappa))
print("================run iqn validation test================")
iqn_val()
print("================run iqn performance test================")
iqn_perf()
| 39.425 | 191 | 0.656309 | [
"Apache-2.0"
] | opendilab/DI-hpc | tests/test_iqn_nstep_td_error.py | 6,308 | Python |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""
from test_framework.mininode import *
from test_framework.script import CScript, OP_TRUE, OP_CHECKSIG
# Create a block (with regtest difficulty)
def create_block(hashprev, coinbase, nTime=None):
block = CBlock()
if nTime is None:
import time
block.nTime = int(time.time()+600)
else:
block.nTime = nTime
block.hashPrevBlock = hashprev
block.nBits = 0x1e0ffff0 # Will break after a difficulty adjustment...
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def serialize_script_num(value):
r = bytearray(0)
if value == 0:
return r
neg = value < 0
absvalue = -value if neg else value
while (absvalue):
r.append(int(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return r
def cbase_scriptsig(height):
return ser_string(serialize_script_num(height))
def cbase_value(height):
#return ((50 * COIN) >> int(height/150))
return (250 * COIN)
# Create a coinbase transaction, assuming no miner fees.
# If pubkey is passed in, the coinbase output will be a P2PK output;
# otherwise an anyone-can-spend output.
def create_coinbase(height, pubkey = None):
coinbase = CTransaction()
coinbase.vin = [CTxIn(NullOutPoint, cbase_scriptsig(height), 0xffffffff)]
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = cbase_value(height)
if (pubkey != None):
coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [coinbaseoutput]
coinbase.calc_sha256()
return coinbase
# Create a transaction.
# If the scriptPubKey is not specified, make it anyone-can-spend.
def create_transaction(prevtx, n, sig, value, scriptPubKey=CScript()):
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
tx.vout.append(CTxOut(value, scriptPubKey))
tx.calc_sha256()
return tx
def create_transaction_from_outpoint(outPoint, sig, value, scriptPubKey=CScript()):
tx = CTransaction()
tx.vin.append(CTxIn(outPoint, sig, 0xffffffff))
tx.vout.append(CTxOut(value, scriptPubKey))
tx.calc_sha256()
return tx
def get_legacy_sigopcount_block(block, fAccurate=True):
count = 0
for tx in block.vtx:
count += get_legacy_sigopcount_tx(tx, fAccurate)
return count
def get_legacy_sigopcount_tx(tx, fAccurate=True):
count = 0
for i in tx.vout:
count += i.scriptPubKey.GetSigOpCount(fAccurate)
for j in tx.vin:
# scriptSig might be of type bytes, so convert to CScript for the moment
count += CScript(j.scriptSig).GetSigOpCount(fAccurate)
return count
### SupernodeCoin specific blocktools ###
def create_coinbase_pos(height):
coinbase = CTransaction()
coinbase.vin = [CTxIn(NullOutPoint, cbase_scriptsig(height), 0xffffffff)]
coinbase.vout = [CTxOut(0, b"")]
coinbase.calc_sha256()
return coinbase
def is_zerocoin(uniqueness):
ulen = len(uniqueness)
if ulen == 32: return True
if ulen == 36: return False
raise Exception("Wrong uniqueness len: %d" % ulen) | 33 | 83 | 0.695567 | [
"MIT"
] | Supernode-SUNO/SUNO | test/functional/test_framework/blocktools.py | 3,564 | Python |
# -*- coding: utf-8 -*-
import datetime
from unittest.mock import Mock
import pytest
from h.activity import bucketing
from tests.common import factories
UTCNOW = datetime.datetime(year=1970, month=2, day=21, hour=19, minute=30)
FIVE_MINS_AGO = UTCNOW - datetime.timedelta(minutes=5)
YESTERDAY = UTCNOW - datetime.timedelta(days=1)
THIRD_MARCH_1968 = datetime.datetime(year=1968, month=3, day=3)
FIFTH_NOVEMBER_1969 = datetime.datetime(year=1969, month=11, day=5)
class timeframe_with: # noqa: N801
def __init__(self, label, document_buckets):
self.label = label
self.document_buckets = document_buckets
def __eq__(self, timeframe):
return (
self.label == timeframe.label
and self.document_buckets == timeframe.document_buckets
)
def __repr__(self):
return '{class_} "{label}" with {n} document buckets'.format(
class_=self.__class__, label=self.label, n=len(self.document_buckets)
)
@pytest.mark.usefixtures("factories")
class TestDocumentBucket:
def test_init_sets_the_document_title(self, db_session, document):
title_meta = factories.DocumentMeta(
type="title", value=["The Document Title"], document=document
)
document.title = "The Document Title"
db_session.add(title_meta)
db_session.flush()
bucket = bucketing.DocumentBucket(document)
assert bucket.title == "The Document Title"
def test_init_uses_the_document_web_uri(self, db_session, document):
document.web_uri = "http://example.com"
bucket = bucketing.DocumentBucket(document)
assert bucket.uri == "http://example.com"
def test_init_sets_None_uri_when_no_http_or_https_can_be_found(
self, db_session, document
):
document.web_uri = None
bucket = bucketing.DocumentBucket(document)
assert bucket.uri is None
def test_init_sets_the_domain_from_the_extracted_uri(self, db_session, document):
document.web_uri = "https://www.example.com/foobar.html"
bucket = bucketing.DocumentBucket(document)
assert bucket.domain == "www.example.com"
def test_init_sets_domain_to_local_file_when_no_uri_is_set(
self, db_session, document
):
docuri_pdf = factories.DocumentURI(
uri="urn:x-pdf:fingerprint", document=document
)
db_session.add(docuri_pdf)
db_session.flush()
bucket = bucketing.DocumentBucket(document)
assert bucket.domain == "Local file"
def test_annotations_count_returns_count_of_annotations(self, db_session, document):
bucket = bucketing.DocumentBucket(document)
for _ in range(7):
annotation = factories.Annotation()
bucket.append(annotation)
assert bucket.annotations_count == 7
def test_append_appends_the_annotation(self, document):
bucket = bucketing.DocumentBucket(document)
annotations = []
for _ in range(7):
annotation = factories.Annotation()
annotations.append(annotation)
bucket.append(annotation)
assert bucket.annotations == annotations
def test_append_adds_unique_annotation_tag_to_bucket(self, document):
ann_1 = factories.Annotation(tags=["foo", "bar"])
ann_2 = factories.Annotation(tags=["foo", "baz"])
bucket = bucketing.DocumentBucket(document)
bucket.append(ann_1)
bucket.append(ann_2)
assert bucket.tags == {"foo", "bar", "baz"}
def test_append_adds_unique_annotation_user_to_bucket(self, document):
ann_1 = factories.Annotation(userid="luke")
ann_2 = factories.Annotation(userid="alice")
ann_3 = factories.Annotation(userid="luke")
bucket = bucketing.DocumentBucket(document)
bucket.append(ann_1)
bucket.append(ann_2)
bucket.append(ann_3)
assert bucket.users == {"luke", "alice"}
def test_eq(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
for _ in range(5):
annotation = factories.Annotation()
bucket_1.append(annotation)
bucket_2.append(annotation)
assert bucket_1 == bucket_2
def test_eq_annotations_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.annotations = [1, 2, 3]
bucket_2.annotations = [2, 3, 4]
assert not bucket_1 == bucket_2
def test_eq_tags_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.tags.update(["foo", "bar"])
bucket_2.tags.update(["foo", "baz"])
assert not bucket_1 == bucket_2
def test_eq_users_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.users.update(["alice", "luke"])
bucket_2.users.update(["luke", "paula"])
assert not bucket_1 == bucket_2
def test_eq_uri_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.uri = "http://example.com"
bucket_2.uri = "http://example.org"
assert not bucket_1 == bucket_2
def test_eq_domain_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.domain = "example.com"
bucket_2.domain = "example.org"
assert not bucket_1 == bucket_2
def test_eq_title_mismatch(self, document):
bucket_1 = bucketing.DocumentBucket(document)
bucket_2 = bucketing.DocumentBucket(document)
bucket_1.title = "First Title"
bucket_2.title = "Second Title"
assert not bucket_1 == bucket_2
def test_incontext_link_returns_link_to_first_annotation(self, document, patch):
incontext_link = patch("h.links.incontext_link")
bucket = bucketing.DocumentBucket(document)
ann = factories.Annotation()
bucket.append(ann)
request = Mock()
assert bucket.incontext_link(request) == incontext_link.return_value
def test_incontext_link_returns_none_if_bucket_empty(self, document, patch):
patch("h.links.incontext_link")
bucket = bucketing.DocumentBucket(document)
request = Mock()
assert bucket.incontext_link(request) is None
@pytest.fixture
def document(self, db_session):
document = factories.Document()
db_session.add(document)
db_session.flush()
return document
@pytest.mark.usefixtures("factories", "utcnow")
class TestBucket:
def test_no_annotations(self):
assert bucketing.bucket([]) == []
@pytest.mark.parametrize(
"annotation_datetime,timeframe_label",
[(FIVE_MINS_AGO, "Last 7 days"), (THIRD_MARCH_1968, "Mar 1968")],
)
def test_one_annotation(self, annotation_datetime, timeframe_label):
annotation = factories.Annotation(
document=factories.Document(), updated=annotation_datetime
)
timeframes = bucketing.bucket([annotation])
assert timeframes == [
timeframe_with(
timeframe_label,
{
annotation.document: bucketing.DocumentBucket(
annotation.document, [annotation]
)
},
)
]
@pytest.mark.parametrize(
"annotation_datetime,timeframe_label",
[(FIVE_MINS_AGO, "Last 7 days"), (THIRD_MARCH_1968, "Mar 1968")],
)
def test_multiple_annotations_of_one_document_in_one_timeframe(
self, annotation_datetime, timeframe_label
):
results = [
factories.Annotation(
target_uri="https://example.com", updated=annotation_datetime
)
for _ in range(3)
]
timeframes = bucketing.bucket(results)
document = results[0].document
assert timeframes == [
timeframe_with(
timeframe_label, {document: bucketing.DocumentBucket(document, results)}
)
]
@pytest.mark.parametrize(
"annotation_datetime,timeframe_label",
[(YESTERDAY, "Last 7 days"), (THIRD_MARCH_1968, "Mar 1968")],
)
def test_annotations_of_multiple_documents_in_one_timeframe(
self, annotation_datetime, timeframe_label
):
annotation_1 = factories.Annotation(
target_uri="http://example1.com", updated=annotation_datetime
)
annotation_2 = factories.Annotation(
target_uri="http://example2.com", updated=annotation_datetime
)
annotation_3 = factories.Annotation(
target_uri="http://example3.com", updated=annotation_datetime
)
timeframes = bucketing.bucket([annotation_1, annotation_2, annotation_3])
assert timeframes == [
timeframe_with(
timeframe_label,
{
annotation_1.document: bucketing.DocumentBucket(
annotation_1.document, [annotation_1]
),
annotation_2.document: bucketing.DocumentBucket(
annotation_2.document, [annotation_2]
),
annotation_3.document: bucketing.DocumentBucket(
annotation_3.document, [annotation_3]
),
},
)
]
def test_annotations_of_the_same_document_in_different_timeframes(self):
results = [
factories.Annotation(),
factories.Annotation(updated=FIFTH_NOVEMBER_1969),
factories.Annotation(updated=THIRD_MARCH_1968),
]
document = factories.Document()
for annotation in results:
annotation.document = document
timeframes = bucketing.bucket(results)
expected_bucket_1 = bucketing.DocumentBucket(document, [results[0]])
expected_bucket_2 = bucketing.DocumentBucket(document, [results[1]])
expected_bucket_3 = bucketing.DocumentBucket(document, [results[2]])
assert timeframes == [
timeframe_with("Last 7 days", {document: expected_bucket_1}),
timeframe_with("Nov 1969", {document: expected_bucket_2}),
timeframe_with("Mar 1968", {document: expected_bucket_3}),
]
def test_recent_and_older_annotations_together(self):
results = [
factories.Annotation(target_uri="http://example1.com"),
factories.Annotation(target_uri="http://example2.com"),
factories.Annotation(target_uri="http://example3.com"),
factories.Annotation(
target_uri="http://example4.com", updated=THIRD_MARCH_1968
),
factories.Annotation(
target_uri="http://example5.com", updated=THIRD_MARCH_1968
),
factories.Annotation(
target_uri="http://example6.com", updated=THIRD_MARCH_1968
),
]
timeframes = bucketing.bucket(results)
expected_bucket_1 = bucketing.DocumentBucket(results[0].document, [results[0]])
expected_bucket_2 = bucketing.DocumentBucket(results[1].document, [results[1]])
expected_bucket_3 = bucketing.DocumentBucket(results[2].document, [results[2]])
expected_bucket_4 = bucketing.DocumentBucket(results[3].document, [results[3]])
expected_bucket_5 = bucketing.DocumentBucket(results[4].document, [results[4]])
expected_bucket_6 = bucketing.DocumentBucket(results[5].document, [results[5]])
assert timeframes == [
timeframe_with(
"Last 7 days",
{
results[0].document: expected_bucket_1,
results[1].document: expected_bucket_2,
results[2].document: expected_bucket_3,
},
),
timeframe_with(
"Mar 1968",
{
results[3].document: expected_bucket_4,
results[4].document: expected_bucket_5,
results[5].document: expected_bucket_6,
},
),
]
def test_annotations_from_different_days_in_same_month(self):
"""
Test bucketing multiple annotations from different days of same month.
Annotations from different days of the same month should go into one
bucket.
"""
one_month_ago = UTCNOW - datetime.timedelta(days=30)
annotations = [
factories.Annotation(
target_uri="http://example.com", updated=one_month_ago
),
factories.Annotation(
target_uri="http://example.com",
updated=one_month_ago - datetime.timedelta(days=1),
),
factories.Annotation(
target_uri="http://example.com",
updated=one_month_ago - datetime.timedelta(days=2),
),
]
timeframes = bucketing.bucket(annotations)
expected_bucket = bucketing.DocumentBucket(annotations[0].document)
expected_bucket.update(annotations)
assert timeframes == [
timeframe_with("Jan 1970", {annotations[0].document: expected_bucket})
]
@pytest.fixture
def utcnow(self, patch):
utcnow = patch("h.activity.bucketing.utcnow")
utcnow.return_value = UTCNOW
return utcnow
| 34.671679 | 88 | 0.630331 | [
"BSD-2-Clause"
] | y3g0r/h | tests/h/activity/bucketing_test.py | 13,834 | Python |
# models.py
from flask_login import UserMixin
from . import db
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True) # primary keys are required by SQLAlchemy
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
name = db.Column(db.String(1000))
home = db.Column(db.String(1000)) | 32.363636 | 90 | 0.707865 | [
"MIT"
] | devsunny/flask-secure-upload | webapp/models.py | 356 | Python |
# -*- coding: utf-8 -*-
from . import fields
from . import integrators
from . import points
from . import system
from . import utils
from . import visualizer
from .system import NBodySystem
from .visualizer import Visualizer, run | 19.333333 | 39 | 0.75 | [
"BSD-3-Clause"
] | DFNaiff/FieldBillard | fieldbillard/__init__.py | 232 | Python |
import json
import logging
import requests
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Event, OpenPortEvent, Service
from kube_hunter.core.types import Discovery
class EtcdAccessEvent(Service, Event):
"""Etcd is a DB that stores cluster's data, it contains configuration and current
state information, and might contain secrets"""
def __init__(self):
Service.__init__(self, name="Etcd")
@handler.subscribe(OpenPortEvent, predicate= lambda p: p.port == 2379)
class EtcdRemoteAccess(Discovery):
"""Etcd service
check for the existence of etcd service
"""
def __init__(self, event):
self.event = event
def execute(self):
self.publish_event(EtcdAccessEvent())
| 28.148148 | 85 | 0.735526 | [
"Apache-2.0"
] | LozanoMatheus/kube-hunter | kube_hunter/modules/discovery/etcd.py | 760 | Python |
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2021-present Kaleidos Ventures SL
from __future__ import unicode_literals
from django.db import connection, migrations, models
def set_finished_date_for_tasks(apps, schema_editor):
# Updates the finished date from tasks according to the history_entries associated
# It takes the last history change updateing the status of a task and if it's a closed
# one it updates the finished_date attribute
sql="""
WITH status_update AS(
WITH status_update AS(
WITH history_entries AS (
SELECT
diff #>>'{status, 1}' new_status_id,
regexp_split_to_array(key, ':') as split_key,
created_at as date
FROM history_historyentry
WHERE diff #>>'{status, 1}' != ''
)
SELECT
split_key[2] as object_id,
new_status_id::int,
MAX(date) as status_change_datetime
FROM history_entries
WHERE split_key[1] = 'tasks.task'
GROUP BY object_id, new_status_id, date
)
SELECT status_update.*
FROM status_update
INNER JOIN projects_taskstatus
ON projects_taskstatus.id = new_status_id AND projects_taskstatus.is_closed = True
)
UPDATE tasks_task
SET finished_date = status_update.status_change_datetime
FROM status_update
WHERE tasks_task.id = status_update.object_id::int
"""
cursor = connection.cursor()
cursor.execute(sql)
class Migration(migrations.Migration):
dependencies = [
('tasks', '0008_remove_task_watchers'),
]
operations = [
migrations.RunPython(set_finished_date_for_tasks),
]
| 30.017544 | 90 | 0.735243 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | CarlosLVar/taiga-back-1 | taiga/projects/tasks/migrations/0009_auto_20151104_1131.py | 1,711 | Python |
from gpiozero import CPUTemperature
from tabulate import tabulate
from math import floor
import numpy as np
import termplotlib as tpl
import time
import shutil
def roundNum(num, digits):
return floor(num * 10 ** digits) / (10 ** digits)
def CtoF(temp):
fahrenheit = (temp + 1.8) + 32
rounded = roundNum(fahrenheit, 3)
return str(rounded)
cpu = CPUTemperature()
colors = {
'HEADER': '\033[95m',
'OKBLUE': '\033[94m',
'OKCYAN': '\033[96m',
'OKGREEN': '\033[92m',
'WARNING': '\033[93m',
'FAIL': '\033[91m',
'ENDC': '\033[0m',
'BOLD': '\033[1m',
'UNDERLINE': '\033[4m',
}
times = [0]
temps = [cpu.temperature]
while True:
tickRate = 2 #takes data every {tickRate} seconds
minutes = 5
numPoints = int(60 / tickRate * minutes)
width, height = shutil.get_terminal_size()
if len(temps) > numPoints:
temps = temps[-numPoints:]
times = times[-numPoints:]
temps.append(cpu.temperature)
times.append(times[-1] + tickRate)
averageTemp = roundNum(np.average(temps), 3)
cpuTempColor = ''
if cpu.temperature < 50:
cpuTempColor = colors['OKBLUE']
elif cpu.temperature < 65:
cpuTempColor = colors['OKCYAN']
elif cpu.temperature < 80:
cpuTempColor = colors['OKGREEN']
else:
cpuTempColor = colors['FAIL'] + colors['BOLD']
table = [[
f"{cpuTempColor}{str(cpu.temperature)}\N{DEGREE SIGN}C / {CtoF(cpu.temperature)}\N{DEGREE SIGN}F\n",
f"{colors['OKGREEN']}{averageTemp} / {CtoF(averageTemp)}\N{DEGREE SIGN}F\n",
f"{colors['OKGREEN']}{np.amax(temps)} / {CtoF(np.amax(temps))}\N{DEGREE SIGN}F\n",
f"{colors['OKGREEN']}{np.amin(temps)} / {CtoF(np.amin(temps))}\N{DEGREE SIGN}F"
]]
headers = [
f"{colors['OKGREEN']}CPU TEMPERATURE",
f"{colors['OKGREEN']}Average Temperature (last {minutes} minutes)",
f"{colors['FAIL']}Peak Temperature (last {minutes} minutes)",
f"{colors['OKCYAN']}Lowest Temperature (last {minutes} minutes){colors['OKGREEN']}", #OKGREEN at end is to make sure table lines are green, not cyan
]
print('\n')
fig = tpl.figure()
plotConfig = {
'width': width-2,
'height': height-5,
'label': 'CPU Temperature',
'xlabel': 'Time (s)',
'xlim': [times[0], times[-1:]],
'ylim': [np.amin(temps)-2, np.amax(temps)+2],
'title': f"CPU Temperature over last {minutes} minutes",
}
fig.plot(times, temps, **plotConfig)
fig.show()
# width=width-2, height=height-5, label='CPU Temperature', xlabel='Time (s)', , ylim=[np.amin(temps)-2, np.amax(temps)+2], title='CPU Temperature over last 5 minutes'
print('\n')
print(tabulate(table, headers=headers))
time.sleep(tickRate) | 30.988889 | 170 | 0.608103 | [
"MIT"
] | KevinLee3627/pi-temp-monitor | monitor_temp.py | 2,789 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Nexmark launcher.
The Nexmark suite is a series of queries (streaming pipelines) performed
on a simulation of auction events. The launcher orchestrates the generation
and parsing of streaming events and the running of queries.
Model
- Person: Author of an auction or a bid.
- Auction: Item under auction.
- Bid: A bid for an item under auction.
Events
- Create Person
- Create Auction
- Create Bid
Queries
- Query0: Pass through (send and receive auction events).
Usage
- DirectRunner
python nexmark_launcher.py \
--query/q <query number> \
--project <project id> \
--loglevel=DEBUG (optional) \
--wait_until_finish_duration <time_in_ms> \
--streaming
- DataflowRunner
python nexmark_launcher.py \
--query/q <query number> \
--project <project id> \
--region <GCE region> \
--loglevel=DEBUG (optional) \
--wait_until_finish_duration <time_in_ms> \
--streaming \
--sdk_location <apache_beam tar.gz> \
--staging_location=gs://... \
--temp_location=gs://
"""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
import argparse
import logging
import sys
import uuid
from google.cloud import pubsub
import apache_beam as beam
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import TestOptions
from apache_beam.testing.benchmarks.nexmark.nexmark_util import Command
from apache_beam.testing.benchmarks.nexmark.queries import query0
from apache_beam.testing.benchmarks.nexmark.queries import query1
from apache_beam.testing.benchmarks.nexmark.queries import query2
class NexmarkLauncher(object):
def __init__(self):
self.parse_args()
self.uuid = str(uuid.uuid4())
self.topic_name = self.args.topic_name + self.uuid
self.subscription_name = self.args.subscription_name + self.uuid
publish_client = pubsub.Client(project=self.project)
topic = publish_client.topic(self.topic_name)
if topic.exists():
logging.info('deleting topic %s', self.topic_name)
topic.delete()
logging.info('creating topic %s', self.topic_name)
topic.create()
sub = topic.subscription(self.subscription_name)
if sub.exists():
logging.info('deleting sub %s', self.topic_name)
sub.delete()
logging.info('creating sub %s', self.topic_name)
sub.create()
def parse_args(self):
parser = argparse.ArgumentParser()
parser.add_argument(
'--query',
'-q',
type=int,
action='append',
required=True,
choices=[0, 1, 2],
help='Query to run')
parser.add_argument(
'--subscription_name',
type=str,
help='Pub/Sub subscription to read from')
parser.add_argument(
'--topic_name', type=str, help='Pub/Sub topic to read from')
parser.add_argument(
'--loglevel',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO',
help='Set logging level to debug')
parser.add_argument(
'--input',
type=str,
required=True,
help='Path to the data file containing nexmark events.')
self.args, self.pipeline_args = parser.parse_known_args()
logging.basicConfig(
level=getattr(logging, self.args.loglevel, None),
format='(%(threadName)-10s) %(message)s')
self.pipeline_options = PipelineOptions(self.pipeline_args)
logging.debug('args, pipeline_args: %s, %s', self.args, self.pipeline_args)
# Usage with Dataflow requires a project to be supplied.
self.project = self.pipeline_options.view_as(GoogleCloudOptions).project
if self.project is None:
parser.print_usage()
print(sys.argv[0] + ': error: argument --project is required')
sys.exit(1)
# Pub/Sub is currently available for use only in streaming pipelines.
self.streaming = self.pipeline_options.view_as(StandardOptions).streaming
if self.streaming is None:
parser.print_usage()
print(sys.argv[0] + ': error: argument --streaming is required')
sys.exit(1)
# wait_until_finish ensures that the streaming job is canceled.
self.wait_until_finish_duration = (
self.pipeline_options.view_as(TestOptions).wait_until_finish_duration)
if self.wait_until_finish_duration is None:
parser.print_usage()
print(sys.argv[0] + ': error: argument --wait_until_finish_duration is required') # pylint: disable=line-too-long
sys.exit(1)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
self.pipeline_options.view_as(SetupOptions).save_main_session = True
def generate_events(self):
publish_client = pubsub.Client(project=self.project)
topic = publish_client.topic(self.topic_name)
sub = topic.subscription(self.subscription_name)
logging.info('Generating auction events to topic %s', topic.name)
if self.args.input.startswith('gs://'):
from apache_beam.io.gcp.gcsfilesystem import GCSFileSystem
fs = GCSFileSystem(self.pipeline_options)
with fs.open(self.args.input) as infile:
for line in infile:
topic.publish(line)
else:
with open(self.args.input) as infile:
for line in infile:
topic.publish(line)
logging.info('Finished event generation.')
# Read from PubSub into a PCollection.
if self.args.subscription_name:
raw_events = self.pipeline | 'ReadPubSub' >> beam.io.ReadFromPubSub(
subscription=sub.full_name)
else:
raw_events = self.pipeline | 'ReadPubSub' >> beam.io.ReadFromPubSub(
topic=topic.full_name)
return raw_events
def run_query(self, query, query_args, query_errors):
try:
self.parse_args()
self.pipeline = beam.Pipeline(options=self.pipeline_options)
raw_events = self.generate_events()
query.load(raw_events, query_args)
result = self.pipeline.run()
job_duration = (
self.pipeline_options.view_as(TestOptions).wait_until_finish_duration)
if self.pipeline_options.view_as(StandardOptions).runner == 'DataflowRunner': # pylint: disable=line-too-long
result.wait_until_finish(duration=job_duration)
result.cancel()
else:
result.wait_until_finish()
except Exception as exc:
query_errors.append(str(exc))
raise
def cleanup(self):
publish_client = pubsub.Client(project=self.project)
topic = publish_client.topic(self.topic_name)
if topic.exists():
topic.delete()
sub = topic.subscription(self.subscription_name)
if sub.exists():
sub.delete()
def run(self):
queries = {
0: query0,
1: query1,
2: query2, # TODO(mariagh): Add more queries.
}
# TODO(mariagh): Move to a config file.
query_args = {2: {'auction_id': 'a1003'}}
query_errors = []
for i in self.args.query:
self.parse_args()
logging.info('Running query %d', i)
# The DirectRunner is the default runner, and it needs
# special handling to cancel streaming jobs.
launch_from_direct_runner = self.pipeline_options.view_as(
StandardOptions).runner in [None, 'DirectRunner']
query_duration = self.pipeline_options.view_as(TestOptions).wait_until_finish_duration # pylint: disable=line-too-long
if launch_from_direct_runner:
command = Command(
self.run_query, args=[queries[i], query_args.get(i), query_errors])
command.run(timeout=query_duration // 1000)
else:
try:
self.run_query(queries[i], query_args.get(i), query_errors=None)
except Exception as exc:
query_errors.append(exc)
if query_errors:
logging.error('Query failed with %s', ', '.join(query_errors))
else:
logging.info('Queries run: %s', self.args.query)
if __name__ == '__main__':
launcher = NexmarkLauncher()
launcher.run()
launcher.cleanup()
| 34.164794 | 125 | 0.691515 | [
"Apache-2.0"
] | AldairCoronel/beam | sdks/python/apache_beam/testing/benchmarks/nexmark/nexmark_launcher.py | 9,122 | Python |
from rlbot.utils.structures.game_data_struct import Physics, GameTickPacket, PlayerInfo
from rlbot.agents.base_agent import SimpleControllerState, BaseAgent
from states.state import State
from util.packet import ParsedPacket
class GroundSave(State):
def score(self, parsed_packet: ParsedPacket, packet: GameTickPacket, agent: BaseAgent) -> float:
return None
def get_output(self, parsed_packet: ParsedPacket, packet: GameTickPacket, agent: BaseAgent) -> SimpleControllerState:
return None | 39.692308 | 121 | 0.792636 | [
"MIT"
] | KrystopherWeeton/RLBot | FirstChild/src/states/ground_save.py | 516 | Python |
__author__ = 'Alexis.Koalla@orange.com'
| 20.5 | 40 | 0.756098 | [
"Apache-2.0"
] | chenhui0228/sfcsm | sfcsmCtrl/model/__init__.py | 41 | Python |
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
from rlstructures.logger import Logger, TFLogger
from rlstructures import DictTensor, TemporalDictTensor
from rlstructures import logging
from rlstructures.tools import weight_init
from rlstructures.batchers import EpisodeBatcher, Batcher
import torch.nn as nn
import copy
import torch
import time
import numpy as np
import torch.nn.functional as F
from tutorial.tutorial_reinforce.agent import *
class A2C:
def __init__(self, config, create_env, create_train_env, create_agent):
self.config = config
# Creation of the Logger (that saves in tensorboard and CSV)
self.logger = TFLogger(log_dir=self.config["logdir"], hps=self.config)
self._create_env = create_env
self._create_train_env = create_train_env
self._create_agent = create_agent
# Creation of one env instance to get the dimensionnality of observations and number of actions
env = self._create_env(
self.config["n_envs"], seed=0, env_name=self.config["env_name"]
)
self.n_actions = env.action_space.n
self.obs_dim = env.reset()[0]["frame"].size()[1]
del env
def run(self):
# Instantiate the learning model abd the baseline model
self.learning_model = AgentModel(self.obs_dim, self.n_actions, 32)
self.critic_model = BaselineModel(self.obs_dim, 32)
# We create a batcher dedicated to evaluation
model = copy.deepcopy(self.learning_model)
self.evaluation_batcher = EpisodeBatcher(
n_timesteps=self.config["max_episode_steps"],
n_slots=self.config["n_evaluation_episodes"],
create_agent=self._create_agent,
create_env=self._create_env,
env_args={
"n_envs": self.config["n_envs"],
"max_episode_steps": self.config["max_episode_steps"],
"env_name": self.config["env_name"],
},
agent_args={"n_actions": self.n_actions, "model": model},
n_threads=self.config["n_evaluation_threads"],
seeds=[
self.config["env_seed"] + k * 10
for k in range(self.config["n_evaluation_threads"])
],
)
# Creation of the batcher for sampling complete pieces of trajectories (i.e Batcher)
# The batcher will sample n_threads*n_envs trajectories at each call
# To have a fast batcher, we have to configure it with n_timesteps=self.config["max_episode_steps"]
model = copy.deepcopy(self.learning_model)
self.train_batcher = Batcher(
n_timesteps=self.config["a2c_timesteps"],
n_slots=self.config["n_envs"] * self.config["n_threads"],
create_agent=self._create_agent,
create_env=self._create_train_env,
env_args={
"n_envs": self.config["n_envs"],
"max_episode_steps": self.config["max_episode_steps"],
"env_name": self.config["env_name"],
},
agent_args={"n_actions": self.n_actions, "model": model},
n_threads=self.config["n_threads"],
seeds=[
self.config["env_seed"] + k * 10
for k in range(self.config["n_threads"])
],
)
# Creation of the optimizer
optimizer = torch.optim.Adam(
nn.Sequential(self.learning_model, self.critic_model).parameters(),
lr=self.config["lr"],
)
# Training Loop:
_start_time = time.time()
self.iteration = 0
# #We launch the evaluation batcher (in deterministic mode)
n_episodes = self.config["n_evaluation_episodes"]
agent_info = DictTensor(
{"stochastic": torch.tensor([False]).repeat(n_episodes)}
)
self.evaluation_batcher.execute(n_episodes=n_episodes, agent_info=agent_info)
self.evaluation_iteration = self.iteration
# Initialize the training batcher such that agents will start to acqire pieces of episodes
self.train_batcher.update(self.learning_model.state_dict())
n_episodes = self.config["n_envs"] * self.config["n_threads"]
agent_info = DictTensor({"stochastic": torch.tensor([True]).repeat(n_episodes)})
self.train_batcher.reset(agent_info=agent_info)
while time.time() - _start_time < self.config["time_limit"]:
# Call the batcher to get a sample of trajectories
# 2) We get the pieces of episodes. Since the env is an infinite env, we will always receive a new piece of episode
self.train_batcher.execute()
trajectories = self.train_batcher.get(blocking=True)
# 3) Now, we compute the loss
dt = self.get_loss(trajectories)
[self.logger.add_scalar(k, dt[k].item(), self.iteration) for k in dt.keys()]
# Computation of final loss
ld = self.config["critic_coef"] * dt["critic_loss"]
lr = self.config["a2c_coef"] * dt["a2c_loss"]
le = self.config["entropy_coef"] * dt["entropy_loss"]
floss = ld - le - lr
floss = floss / n_episodes * trajectories.n_elems()
optimizer.zero_grad()
floss.backward()
optimizer.step()
# Update the train batcher with the updated model
self.train_batcher.update(self.learning_model.state_dict())
self.iteration += 1
# We check the evaluation batcher
evaluation_trajectories = self.evaluation_batcher.get(blocking=False)
if not evaluation_trajectories is None: # trajectories are available
# Compute the cumulated reward
cumulated_reward = (
(
evaluation_trajectories["_reward"]
* evaluation_trajectories.mask()
)
.sum(1)
.mean()
)
self.logger.add_scalar(
"evaluation_reward",
cumulated_reward.item(),
self.evaluation_iteration,
)
print(
"At iteration %d, reward is %f"
% (self.evaluation_iteration, cumulated_reward.item())
)
# We reexecute the evaluation batcher (with same value of agent_info and same number of episodes)
self.evaluation_batcher.update(self.learning_model.state_dict())
self.evaluation_iteration = self.iteration
self.evaluation_batcher.reexecute()
self.train_batcher.close()
self.evaluation_batcher.get() # To wait for the last trajectories
self.evaluation_batcher.close()
self.logger.update_csv() # To save as a CSV file in logdir
self.logger.close()
def get_loss(self, trajectories):
# First, we want to compute the cumulated reward per trajectory
# The reward is a t+1 in each iteration (since it is btained after the aaction), so we use the '_reward' field in the trajectory
# The 'reward' field corresopnds to the reward at time t
reward = trajectories["_reward"]
# We get the mask that tells which transition is in a trajectory (1) or not (0)
mask = trajectories.mask()
# We remove the reward values that are not in the trajectories
reward = reward * mask
max_length = trajectories.lengths.max().item()
# Now, we want to compute the action probabilities over the trajectories such that we will be able to do 'backward'
action_probabilities = []
for t in range(max_length):
proba = self.learning_model(trajectories["frame"][:, t])
action_probabilities.append(
proba.unsqueeze(1)
) # We append the probability, and introduces the temporal dimension (2nde dimension)
action_probabilities = torch.cat(
action_probabilities, dim=1
) # Now, we have a B x T x n_actions tensor
# We compute the critic value for t=0 to T (i.e including the very last observation)
critic = []
for t in range(max_length):
b = self.critic_model(trajectories["frame"][:, t])
critic.append(b.unsqueeze(1))
critic = torch.cat(critic + [b.unsqueeze(1)], dim=1).squeeze(
-1
) # Now, we have a B x (T+1) tensor
# We also need to compute the critic value at for the last observation of the trajectories (to compute the TD)
# It may be the last element of the trajectories (if episode is not finished), or on the last frame of the episode
idx = torch.arange(trajectories.n_elems())
last_critic = self.critic_model(
trajectories["_frame"][idx, trajectories.lengths - 1]
).squeeze(-1)
critic[idx, trajectories.lengths] = last_critic
# We compute the temporal difference
target = (
reward
+ self.config["discount_factor"]
* (1 - trajectories["_done"].float())
* critic[:, 1:].detach()
)
td = critic[:, :-1] - target
critic_loss = td ** 2
# We sum the loss for each episode (considering the mask)
critic_loss = (critic_loss * mask).sum(1) / mask.sum(1)
# We average the loss over all the trajectories
avg_critic_loss = critic_loss.mean()
# We do the same on the reinforce loss
action_distribution = torch.distributions.Categorical(action_probabilities)
log_proba = action_distribution.log_prob(trajectories["action"])
a2c_loss = -log_proba * td.detach()
a2c_loss = (a2c_loss * mask).sum(1) / mask.sum(1)
avg_a2c_loss = a2c_loss.mean()
# We compute the entropy loss
entropy = action_distribution.entropy()
entropy = (entropy * mask).sum(1) / mask.sum(1)
avg_entropy = entropy.mean()
return DictTensor(
{
"critic_loss": avg_critic_loss,
"a2c_loss": avg_a2c_loss,
"entropy_loss": avg_entropy,
}
)
| 42.461224 | 136 | 0.612323 | [
"MIT"
] | Purple-PI/rlstructures | tutorial/deprecated/tutorial_a2c_with_infinite_env/a2c.py | 10,403 | Python |
"""Utility functions for parcinging Freesurfer output files."""
from os.path import join
import nibabel as nb
import numpy as np
def _vectorize_fs_surf(file_path):
"""
Read surface information from a file and turn it into a vector.
Parameters
----------
file_path : str
The path to a file with surface data.
Returns
-------
vectorized_data : numpy.ndarray
Extracted data.
"""
img = nb.load(file_path)
in_data = img.get_fdata().squeeze()
return in_data
def get_area(subject_dir, n_points):
"""
Read area information for the given subject and turn it into a vector.
Data for left and right hemispheres are concatenated.
Parameters
----------
subject_dir : str
The directory to files with surface data.
n_points : int
Defines how many points to take from cortex surface.
Returns
-------
: numpy.ndarray
Extracted data.
"""
AREA_FILES = ('lh.area.mgh', 'rh.area.mgh')
lh_data = _vectorize_fs_surf(join(subject_dir, AREA_FILES[0]))
rh_data = _vectorize_fs_surf(join(subject_dir, AREA_FILES[1]))
n_points = n_points // 2
return np.concatenate((lh_data[:n_points], rh_data[:n_points]), 0)
def get_thickness(subject_dir, n_points):
"""
Read thickness information for the given subject and turn it into a vector.
Data for left and right hemispheres are concatenated.
Parameters
----------
subject_dir : str
The directory to files with surface data.
n_points : int
Defines how many points to take from cortex surface.
Returns
-------
: numpy.ndarray
Extracted data.
"""
THICKNESS_FILES = ('rh.thickness.mgh', 'lh.thickness.mgh')
lh_data = _vectorize_fs_surf(join(subject_dir, THICKNESS_FILES[0]))
rh_data = _vectorize_fs_surf(join(subject_dir, THICKNESS_FILES[1]))
n_points = n_points // 2
return np.concatenate((lh_data[:n_points], rh_data[:n_points]), 0)
| 23.682353 | 79 | 0.655241 | [
"BSD-3-Clause"
] | McIntosh-Lab-RRI/meg-mri-surrogate-biomarkers-aging-2020 | camcan/utils/file_parsing.py | 2,013 | Python |
import builtins
import os
import sys
from array import array
from collections import Counter, defaultdict, deque
from dataclasses import dataclass, fields, is_dataclass
from itertools import islice
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Union,
Tuple,
)
from rich.highlighter import ReprHighlighter
from . import get_console
from ._loop import loop_last
from ._pick import pick_bool
from .abc import RichRenderable
from .cells import cell_len
from .highlighter import ReprHighlighter
from .jupyter import JupyterMixin, JupyterRenderable
from .measure import Measurement
from .text import Text
if TYPE_CHECKING:
from .console import (
Console,
ConsoleOptions,
HighlighterType,
JustifyMethod,
OverflowMethod,
RenderResult,
)
def install(
console: "Console" = None,
overflow: "OverflowMethod" = "ignore",
crop: bool = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> None:
"""Install automatic pretty printing in the Python REPL.
Args:
console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False
"""
from rich import get_console
from .console import ConsoleRenderable # needed here to prevent circular import
console = console or get_console()
assert console is not None
def display_hook(value: Any) -> None:
"""Replacement sys.displayhook which prettifies objects with Rich."""
if value is not None:
assert console is not None
builtins._ = None # type: ignore
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
),
crop=crop,
)
builtins._ = value # type: ignore
def ipy_display_hook(value: Any) -> None: # pragma: no cover
assert console is not None
# always skip rich generated jupyter renderables or None values
if isinstance(value, JupyterRenderable) or value is None:
return
# on jupyter rich display, if using one of the special representations dont use rich
if console.is_jupyter and any(attr.startswith("_repr_") for attr in dir(value)):
return
if hasattr(value, "_repr_mimebundle_"):
return
# certain renderables should start on a new line
if isinstance(value, ConsoleRenderable):
console.line()
console.print(
value
if isinstance(value, RichRenderable)
else Pretty(
value,
overflow=overflow,
indent_guides=indent_guides,
max_length=max_length,
max_string=max_string,
expand_all=expand_all,
margin=12,
),
crop=crop,
)
try: # pragma: no cover
ip = get_ipython() # type: ignore
from IPython.core.formatters import BaseFormatter
# replace plain text formatter with rich formatter
rich_formatter = BaseFormatter()
rich_formatter.for_type(object, func=ipy_display_hook)
ip.display_formatter.formatters["text/plain"] = rich_formatter
except Exception:
sys.displayhook = display_hook
class Pretty(JupyterMixin):
"""A rich renderable that pretty prints an object.
Args:
_object (Any): An object to pretty print.
highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
indent_size (int, optional): Number of spaces in indent. Defaults to 4.
justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
indent_guides (bool, optional): Enable indentation guides. Defaults to False.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
expand_all (bool, optional): Expand all containers. Defaults to False.
margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
"""
def __init__(
self,
_object: Any,
highlighter: "HighlighterType" = None,
*,
indent_size: int = 4,
justify: "JustifyMethod" = None,
overflow: Optional["OverflowMethod"] = None,
no_wrap: Optional[bool] = False,
indent_guides: bool = False,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
margin: int = 0,
insert_line: bool = False,
) -> None:
self._object = _object
self.highlighter = highlighter or ReprHighlighter()
self.indent_size = indent_size
self.justify = justify
self.overflow = overflow
self.no_wrap = no_wrap
self.indent_guides = indent_guides
self.max_length = max_length
self.max_string = max_string
self.expand_all = expand_all
self.margin = margin
self.insert_line = insert_line
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width - self.margin,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
expand_all=self.expand_all,
)
pretty_text = Text(
pretty_str,
justify=self.justify or options.justify,
overflow=self.overflow or options.overflow,
no_wrap=pick_bool(self.no_wrap, options.no_wrap),
style="pretty",
)
pretty_text = (
self.highlighter(pretty_text)
if pretty_text
else Text(
f"{type(self._object)}.__repr__ returned empty string",
style="dim italic",
)
)
if self.indent_guides and not options.ascii_only:
pretty_text = pretty_text.with_indent_guides(
self.indent_size, style="repr.indent"
)
if self.insert_line and "\n" in pretty_text:
yield ""
yield pretty_text
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
pretty_str = pretty_repr(
self._object,
max_width=options.max_width,
indent_size=self.indent_size,
max_length=self.max_length,
max_string=self.max_string,
)
text_width = (
max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
)
return Measurement(text_width, text_width)
def _get_braces_for_defaultdict(_object: defaultdict) -> Tuple[str, str, str]:
return (
f"defaultdict({_object.default_factory!r}, {{",
"})",
f"defaultdict({_object.default_factory!r}, {{}})",
)
def _get_braces_for_array(_object: array) -> Tuple[str, str, str]:
return (f"array({_object.typecode!r}, [", "])", "array({_object.typecode!r})")
_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
os._Environ: lambda _object: ("environ({", "})", "environ({})"),
array: _get_braces_for_array,
defaultdict: _get_braces_for_defaultdict,
Counter: lambda _object: ("Counter({", "})", "Counter()"),
deque: lambda _object: ("deque([", "])", "deque()"),
dict: lambda _object: ("{", "}", "{}"),
frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
list: lambda _object: ("[", "]", "[]"),
set: lambda _object: ("{", "}", "set()"),
tuple: lambda _object: ("(", ")", "()"),
}
_CONTAINERS = tuple(_BRACES.keys())
_MAPPING_CONTAINERS = (dict, os._Environ)
def is_expandable(obj: Any) -> bool:
"""Check if an object may be expanded by pretty print."""
return (
isinstance(obj, _CONTAINERS)
or (is_dataclass(obj) and not isinstance(obj, type))
or hasattr(obj, "__rich_repr__")
)
@dataclass
class Node:
"""A node in a repr tree. May be atomic or a container."""
key_repr: str = ""
value_repr: str = ""
open_brace: str = ""
close_brace: str = ""
empty: str = ""
last: bool = False
is_tuple: bool = False
children: Optional[List["Node"]] = None
key_separator = ": "
@property
def separator(self) -> str:
"""Get separator between items."""
return "" if self.last else ","
def iter_tokens(self) -> Iterable[str]:
"""Generate tokens for this node."""
if self.key_repr:
yield self.key_repr
yield self.key_separator
if self.value_repr:
yield self.value_repr
elif self.children is not None:
if self.children:
yield self.open_brace
if self.is_tuple and len(self.children) == 1:
yield from self.children[0].iter_tokens()
yield ","
else:
for child in self.children:
yield from child.iter_tokens()
if not child.last:
yield ", "
yield self.close_brace
else:
yield self.empty
def check_length(self, start_length: int, max_length: int) -> bool:
"""Check the length fits within a limit.
Args:
start_length (int): Starting length of the line (indent, prefix, suffix).
max_length (int): Maximum length.
Returns:
bool: True if the node can be rendered within max length, otherwise False.
"""
total_length = start_length
for token in self.iter_tokens():
total_length += cell_len(token)
if total_length > max_length:
return False
return True
def __str__(self) -> str:
repr_text = "".join(self.iter_tokens())
return repr_text
def render(
self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
) -> str:
"""Render the node to a pretty repr.
Args:
max_width (int, optional): Maximum width of the repr. Defaults to 80.
indent_size (int, optional): Size of indents. Defaults to 4.
expand_all (bool, optional): Expand all levels. Defaults to False.
Returns:
str: A repr string of the original object.
"""
lines = [_Line(node=self, is_root=True)]
line_no = 0
while line_no < len(lines):
line = lines[line_no]
if line.expandable and not line.expanded:
if expand_all or not line.check_length(max_width):
lines[line_no : line_no + 1] = line.expand(indent_size)
line_no += 1
repr_str = "\n".join(str(line) for line in lines)
return repr_str
@dataclass
class _Line:
"""A line in repr output."""
is_root: bool = False
node: Optional[Node] = None
text: str = ""
suffix: str = ""
whitespace: str = ""
expanded: bool = False
@property
def expandable(self) -> bool:
"""Check if the line may be expanded."""
return bool(self.node is not None and self.node.children)
def check_length(self, max_length: int) -> bool:
"""Check this line fits within a given number of cells."""
start_length = (
len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
)
assert self.node is not None
return self.node.check_length(start_length, max_length)
def expand(self, indent_size: int) -> Iterable["_Line"]:
"""Expand this line by adding children on their own line."""
node = self.node
assert node is not None
whitespace = self.whitespace
assert node.children
if node.key_repr:
yield _Line(
text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
whitespace=whitespace,
)
else:
yield _Line(text=node.open_brace, whitespace=whitespace)
child_whitespace = self.whitespace + " " * indent_size
tuple_of_one = node.is_tuple and len(node.children) == 1
for child in node.children:
separator = "," if tuple_of_one else child.separator
line = _Line(
node=child,
whitespace=child_whitespace,
suffix=separator,
)
yield line
yield _Line(
text=node.close_brace,
whitespace=whitespace,
suffix="," if (tuple_of_one and not self.is_root) else node.separator,
)
def __str__(self) -> str:
return f"{self.whitespace}{self.text}{self.node or ''}{self.suffix}"
def traverse(_object: Any, max_length: int = None, max_string: int = None) -> Node:
"""Traverse object and generate a tree.
Args:
_object (Any): Object to be traversed.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
Returns:
Node: The root of a tree structure which can be used to render a pretty repr.
"""
def to_repr(obj: Any) -> str:
"""Get repr string for an object, but catch errors."""
if (
max_string is not None
and isinstance(obj, (bytes, str))
and len(obj) > max_string
):
truncated = len(obj) - max_string
obj_repr = f"{obj[:max_string]!r}+{truncated}"
else:
try:
obj_repr = repr(obj)
except Exception as error:
obj_repr = f"<repr-error '{error}'>"
return obj_repr
visited_ids: Set[int] = set()
push_visited = visited_ids.add
pop_visited = visited_ids.remove
def _traverse(obj: Any, root: bool = False) -> Node:
"""Walk the object depth first."""
obj_type = type(obj)
py_version = (sys.version_info.major, sys.version_info.minor)
children: List[Node]
def iter_rich_args(rich_args) -> Iterable[Union[Any, Tuple[str, Any]]]:
for arg in rich_args:
if isinstance(arg, tuple):
if len(arg) == 3:
key, child, default = arg
if default == child:
continue
yield key, child
elif len(arg) == 2:
key, child = arg
yield key, child
elif len(arg) == 1:
yield arg[0]
else:
yield arg
if hasattr(obj, "__rich_repr__"):
args = list(iter_rich_args(obj.__rich_repr__()))
if args:
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, arg in loop_last(args):
if isinstance(arg, tuple):
key, child = arg
child_node = _traverse(child)
child_node.last = last
child_node.key_repr = key
child_node.last = last
child_node.key_separator = "="
append(child_node)
else:
child_node = _traverse(arg)
child_node.last = last
append(child_node)
else:
node = Node(
value_repr=f"{obj.__class__.__name__}()", children=[], last=root
)
elif (
is_dataclass(obj)
and not isinstance(obj, type)
and (
"__create_fn__" in obj.__repr__.__qualname__ or py_version == (3, 6)
) # Check if __repr__ wasn't overriden
):
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
children = []
append = children.append
node = Node(
open_brace=f"{obj.__class__.__name__}(",
close_brace=")",
children=children,
last=root,
)
for last, field in loop_last(fields(obj)):
if field.repr:
child_node = _traverse(getattr(obj, field.name))
child_node.key_repr = field.name
child_node.last = last
child_node.key_separator = "="
append(child_node)
pop_visited(obj_id)
elif obj_type in _CONTAINERS:
obj_id = id(obj)
if obj_id in visited_ids:
# Recursion detected
return Node(value_repr="...")
push_visited(obj_id)
open_brace, close_brace, empty = _BRACES[obj_type](obj)
if obj:
children = []
node = Node(
open_brace=open_brace,
close_brace=close_brace,
children=children,
last=root,
)
append = children.append
num_items = len(obj)
last_item_index = num_items - 1
if isinstance(obj, _MAPPING_CONTAINERS):
iter_items = iter(obj.items())
if max_length is not None:
iter_items = islice(iter_items, max_length)
for index, (key, child) in enumerate(iter_items):
child_node = _traverse(child)
child_node.key_repr = to_repr(key)
child_node.last = index == last_item_index
append(child_node)
else:
iter_values = iter(obj)
if max_length is not None:
iter_values = islice(iter_values, max_length)
for index, child in enumerate(iter_values):
child_node = _traverse(child)
child_node.last = index == last_item_index
append(child_node)
if max_length is not None and num_items > max_length:
append(Node(value_repr=f"... +{num_items-max_length}", last=True))
else:
node = Node(empty=empty, children=[], last=root)
pop_visited(obj_id)
else:
node = Node(value_repr=to_repr(obj), last=root)
node.is_tuple = isinstance(obj, tuple)
return node
node = _traverse(_object, root=True)
return node
def pretty_repr(
_object: Any,
*,
max_width: int = 80,
indent_size: int = 4,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
) -> str:
"""Prettify repr string by expanding on to new lines to fit within a given width.
Args:
_object (Any): Object to repr.
max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
indent_size (int, optional): Number of spaces to indent. Defaults to 4.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
Defaults to None.
expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
Returns:
str: A possibly multi-line representation of the object.
"""
if isinstance(_object, Node):
node = _object
else:
node = traverse(_object, max_length=max_length, max_string=max_string)
repr_str = node.render(
max_width=max_width, indent_size=indent_size, expand_all=expand_all
)
return repr_str
def pprint(
_object: Any,
*,
console: "Console" = None,
indent_guides: bool = True,
max_length: int = None,
max_string: int = None,
expand_all: bool = False,
):
"""A convenience function for pretty printing.
Args:
_object (Any): Object to pretty print.
console (Console, optional): Console instance, or None to use default. Defaults to None.
max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
Defaults to None.
max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
indent_guides (bool, optional): Enable indentation guides. Defaults to True.
expand_all (bool, optional): Expand all containers. Defaults to False.
"""
_console = get_console() if console is None else console
_console.print(
Pretty(
_object,
max_length=max_length,
max_string=max_string,
indent_guides=indent_guides,
expand_all=expand_all,
overflow="ignore",
),
soft_wrap=True,
)
if __name__ == "__main__": # pragma: no cover
class BrokenRepr:
def __repr__(self):
1 / 0
d = defaultdict(int)
d["foo"] = 5
data = {
"foo": [
1,
"Hello World!",
100.123,
323.232,
432324.0,
{5, 6, 7, (1, 2, 3, 4), 8},
],
"bar": frozenset({1, 2, 3}),
"defaultdict": defaultdict(
list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
),
"counter": Counter(
[
"apple",
"orange",
"pear",
"kumquat",
"kumquat",
"durian" * 100,
]
),
"atomic": (False, True, None),
"Broken": BrokenRepr(),
}
data["foo"].append(data) # type: ignore
from rich import print
print(Pretty(data, indent_guides=True, max_string=20))
| 34.741007 | 134 | 0.566163 | [
"MIT"
] | AwesomeGitHubRepos/rich | rich/pretty.py | 24,145 | Python |
import json
import os
import sys
import time
import torch
from training.training import Trainer
from data.conversion import GridDataConverter, PointCloudDataConverter, ERA5Converter
from data.dataloaders import mnist, celebahq
from data.dataloaders_era5 import era5
from data.dataloaders3d import shapenet_voxels, shapenet_point_clouds
from models.discriminator import PointConvDiscriminator
from models.function_distribution import HyperNetwork, FunctionDistribution
from models.function_representation import FunctionRepresentation, FourierFeatures
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Get config file from command line arguments
if len(sys.argv) != 2:
raise(RuntimeError("Wrong arguments, use python main.py <config_path>"))
config_path = sys.argv[1]
# Open config file
with open(config_path) as f:
config = json.load(f)
if config["path_to_data"] == "":
raise(RuntimeError("Path to data not specified. Modify path_to_data attribute in config to point to data."))
# Create a folder to store experiment results
timestamp = time.strftime("%Y-%m-%d_%H-%M")
directory = "{}_{}".format(timestamp, config["id"])
if not os.path.exists(directory):
os.makedirs(directory)
# Save config file in experiment directory
with open(directory + '/config.json', 'w') as f:
json.dump(config, f)
# Setup dataloader
is_voxel = False
is_point_cloud = False
is_era5 = False
if config["dataset"] == 'mnist':
dataloader = mnist(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"],
size=config["resolution"],
train=True)
input_dim = 2
output_dim = 1
data_shape = (1, config["resolution"], config["resolution"])
elif config["dataset"] == 'celebahq':
dataloader = celebahq(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"],
size=config["resolution"])
input_dim = 2
output_dim = 3
data_shape = (3, config["resolution"], config["resolution"])
elif config["dataset"] == 'shapenet_voxels':
dataloader = shapenet_voxels(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"],
size=config["resolution"])
input_dim = 3
output_dim = 1
data_shape = (1, config["resolution"], config["resolution"], config["resolution"])
is_voxel = True
elif config["dataset"] == 'shapenet_point_clouds':
dataloader = shapenet_point_clouds(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"])
input_dim = 3
output_dim = 1
data_shape = (1, config["resolution"], config["resolution"], config["resolution"])
is_point_cloud = True
elif config["dataset"] == 'era5':
dataloader = era5(path_to_data=config["path_to_data"],
batch_size=config["training"]["batch_size"])
input_dim = 3
output_dim = 1
data_shape = (46, 90)
is_era5 = True
# Setup data converter
if is_point_cloud:
data_converter = PointCloudDataConverter(device, data_shape, normalize_features=True)
elif is_era5:
data_converter = ERA5Converter(device, data_shape, normalize_features=True)
else:
data_converter = GridDataConverter(device, data_shape, normalize_features=True)
# Setup encoding for function distribution
num_frequencies = config["generator"]["encoding"]["num_frequencies"]
std_dev = config["generator"]["encoding"]["std_dev"]
if num_frequencies:
frequency_matrix = torch.normal(mean=torch.zeros(num_frequencies, input_dim),
std=std_dev).to(device)
encoding = FourierFeatures(frequency_matrix)
else:
encoding = torch.nn.Identity()
# Setup generator models
final_non_linearity = torch.nn.Tanh()
non_linearity = torch.nn.LeakyReLU(0.1)
function_representation = FunctionRepresentation(input_dim, output_dim,
config["generator"]["layer_sizes"],
encoding, non_linearity,
final_non_linearity).to(device)
hypernetwork = HyperNetwork(function_representation, config["generator"]["latent_dim"],
config["generator"]["hypernet_layer_sizes"], non_linearity).to(device)
function_distribution = FunctionDistribution(hypernetwork).to(device)
# Setup discriminator
discriminator = PointConvDiscriminator(input_dim, output_dim, config["discriminator"]["layer_configs"],
linear_layer_sizes=config["discriminator"]["linear_layer_sizes"],
norm_order=config["discriminator"]["norm_order"],
add_sigmoid=True,
add_batchnorm=config["discriminator"]["add_batchnorm"],
add_weightnet_batchnorm=config["discriminator"]["add_weightnet_batchnorm"],
deterministic=config["discriminator"]["deterministic"],
same_coordinates=config["discriminator"]["same_coordinates"]).to(device)
print("\nFunction distribution")
print(hypernetwork)
print("Number of parameters: {}".format(count_parameters(hypernetwork)))
print("\nDiscriminator")
print(discriminator)
print("Number of parameters: {}".format(count_parameters(discriminator)))
# Setup trainer
trainer = Trainer(device, function_distribution, discriminator, data_converter,
lr=config["training"]["lr"], lr_disc=config["training"]["lr_disc"],
r1_weight=config["training"]["r1_weight"],
max_num_points=config["training"]["max_num_points"],
print_freq=config["training"]["print_freq"], save_dir=directory,
model_save_freq=config["training"]["model_save_freq"],
is_voxel=is_voxel, is_point_cloud=is_point_cloud,
is_era5=is_era5)
trainer.train(dataloader, config["training"]["epochs"])
| 43.034247 | 115 | 0.660353 | [
"MIT"
] | EmilienDupont/neural-function-distributions | main.py | 6,283 | Python |
#! /usr/bin/env python
# Convert OpenSSH known_hosts and known_hosts2 files to "new format" PuTTY
# host keys.
# usage:
# kh2reg.py [ --win ] known_hosts1 2 3 4 ... > hosts.reg
# Creates a Windows .REG file (double-click to install).
# kh2reg.py --unix known_hosts1 2 3 4 ... > sshhostkeys
# Creates data suitable for storing in ~/.putty/sshhostkeys (Unix).
# Line endings are someone else's problem as is traditional.
# Originally developed for Python 1.5.2, but probably won't run on that
# any more.
import fileinput
import base64
import struct
import string
import re
import sys
import getopt
def winmungestr(s):
"Duplicate of PuTTY's mungestr() in winstore.c:1.10 for Registry keys"
candot = 0
r = ""
for c in s:
if c in ' \*?%~' or ord(c)<ord(' ') or (c == '.' and not candot):
r = r + ("%%%02X" % ord(c))
else:
r = r + c
candot = 1
return r
def strtolong(s):
"Convert arbitrary-length big-endian binary data to a Python long"
bytes = struct.unpack(">%luB" % len(s), s)
return reduce ((lambda a, b: (long(a) << 8) + long(b)), bytes)
def longtohex(n):
"""Convert long int to lower-case hex.
Ick, Python (at least in 1.5.2) doesn't appear to have a way to
turn a long int into an unadorned hex string -- % gets upset if the
number is too big, and raw hex() uses uppercase (sometimes), and
adds unwanted "0x...L" around it."""
plain=string.lower(re.match(r"0x([0-9A-Fa-f]*)l?$", hex(n), re.I).group(1))
return "0x" + plain
output_type = 'windows'
try:
optlist, args = getopt.getopt(sys.argv[1:], '', [ 'win', 'unix' ])
if filter(lambda x: x[0] == '--unix', optlist):
output_type = 'unix'
except getopt.error, e:
sys.stderr.write(str(e) + "\n")
sys.exit(1)
if output_type == 'windows':
# Output REG file header.
sys.stdout.write("""REGEDIT4
[HKEY_CURRENT_USER\Software\SimonTatham\PuTTY\SshHostKeys]
""")
class BlankInputLine(Exception):
pass
class UnknownKeyType(Exception):
def __init__(self, keytype):
self.keytype = keytype
# Now process all known_hosts input.
for line in fileinput.input(args):
try:
# Remove leading/trailing whitespace (should zap CR and LF)
line = string.strip (line)
# Skip blanks and comments
if line == '' or line[0] == '#':
raise BlankInputLine
# Split line on spaces.
fields = string.split (line, ' ')
# Common fields
hostpat = fields[0]
magicnumbers = [] # placeholder
keytype = "" # placeholder
# Grotty heuristic to distinguish known_hosts from known_hosts2:
# is second field entirely decimal digits?
if re.match (r"\d*$", fields[1]):
# Treat as SSH-1-type host key.
# Format: hostpat bits10 exp10 mod10 comment...
# (PuTTY doesn't store the number of bits.)
magicnumbers = map (long, fields[2:4])
keytype = "rsa"
else:
# Treat as SSH-2-type host key.
# Format: hostpat keytype keyblob64 comment...
sshkeytype, blob = fields[1], base64.decodestring (fields[2])
# 'blob' consists of a number of
# uint32 N (big-endian)
# uint8[N] field_data
subfields = []
while blob:
sizefmt = ">L"
(size,) = struct.unpack (sizefmt, blob[0:4])
size = int(size) # req'd for slicage
(data,) = struct.unpack (">%lus" % size, blob[4:size+4])
subfields.append(data)
blob = blob [struct.calcsize(sizefmt) + size : ]
# The first field is keytype again, and the rest we can treat as
# an opaque list of bignums (same numbers and order as stored
# by PuTTY). (currently embedded keytype is ignored entirely)
magicnumbers = map (strtolong, subfields[1:])
# Translate key type into something PuTTY can use.
if sshkeytype == "ssh-rsa": keytype = "rsa2"
elif sshkeytype == "ssh-dss": keytype = "dss"
else:
raise UnknownKeyType(sshkeytype)
# Now print out one line per host pattern, discarding wildcards.
for host in string.split (hostpat, ','):
if re.search (r"[*?!]", host):
sys.stderr.write("Skipping wildcard host pattern '%s'\n"
% host)
continue
elif re.match (r"\|", host):
sys.stderr.write("Skipping hashed hostname '%s'\n" % host)
continue
else:
m = re.match (r"\[([^]]*)\]:(\d*)$", host)
if m:
(host, port) = m.group(1,2)
port = int(port)
else:
port = 22
# Slightly bizarre output key format: 'type@port:hostname'
# XXX: does PuTTY do anything useful with literal IP[v4]s?
key = keytype + ("@%d:%s" % (port, host))
value = string.join (map (longtohex, magicnumbers), ',')
if output_type == 'unix':
# Unix format.
sys.stdout.write('%s %s\n' % (key, value))
else:
# Windows format.
# XXX: worry about double quotes?
sys.stdout.write("\"%s\"=\"%s\"\n"
% (winmungestr(key), value))
except UnknownKeyType, k:
sys.stderr.write("Unknown SSH key type '%s', skipping\n" % k.keytype)
except BlankInputLine:
pass
| 34.860606 | 79 | 0.546071 | [
"MIT"
] | FireEgl/FuTTY | contrib/kh2reg.py | 5,752 | Python |
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import subprocess
from typing import Optional # noqa
from lte.protos.policydb_pb2 import FlowMatch
from .tc_ops_cmd import TcOpsCmd, argSplit, run_cmd
from .tc_ops_pyroute2 import TcOpsPyRoute2
from .types import QosInfo
from .utils import IdManager
LOG = logging.getLogger('pipelined.qos.qos_tc_impl')
# LOG.setLevel(logging.DEBUG)
# TODO - replace this implementation with pyroute2 tc
ROOT_QID = 65534
DEFAULT_RATE = '80Kbit'
DEFAULT_INTF_SPEED = '1000'
class TrafficClass:
"""
Creates/Deletes queues in linux. Using Qdiscs for flow based
rate limiting(traffic shaping) of user traffic.
"""
tc_ops = None
@staticmethod
def delete_class(intf: str, qid: int, skip_filter=False) -> int:
qid_hex = hex(qid)
if not skip_filter:
TrafficClass.tc_ops.del_filter(intf, qid_hex, qid_hex)
return TrafficClass.tc_ops.del_htb(intf, qid_hex)
@staticmethod
def create_class(
intf: str, qid: int, max_bw: int, rate=None,
parent_qid=None, skip_filter=False,
) -> int:
if not rate:
rate = DEFAULT_RATE
if not parent_qid:
parent_qid = ROOT_QID
if parent_qid == qid:
# parent qid should only be self for root case, everything else
# should be the child of root class
LOG.error('parent and self qid equal, setting parent_qid to root')
parent_qid = ROOT_QID
qid_hex = hex(qid)
parent_qid_hex = '1:' + hex(parent_qid)
err = TrafficClass.tc_ops.create_htb(intf, qid_hex, max_bw, rate, parent_qid_hex)
if err < 0 or skip_filter:
return err
# add filter
return TrafficClass.tc_ops.create_filter(intf, qid_hex, qid_hex)
@staticmethod
def init_qdisc(
intf: str, show_error=False, enable_pyroute2=False,
default_gbr=DEFAULT_RATE,
) -> int:
# TODO: Convert this class into an object.
if TrafficClass.tc_ops is None:
if enable_pyroute2:
TrafficClass.tc_ops = TcOpsPyRoute2()
else:
TrafficClass.tc_ops = TcOpsCmd()
cmd_list = []
speed = DEFAULT_INTF_SPEED
qid_hex = hex(ROOT_QID)
fn = "/sys/class/net/{intf}/speed".format(intf=intf)
try:
with open(fn, encoding="utf-8") as f:
speed = f.read().strip()
except OSError:
LOG.error('unable to read speed from %s defaulting to %s', fn, speed)
# qdisc does not support replace, so check it before creating the HTB qdisc.
qdisc_type = TrafficClass._get_qdisc_type(intf)
if qdisc_type != "htb":
qdisc_cmd = "tc qdisc add dev {intf} root handle 1: htb".format(intf=intf)
cmd_list.append(qdisc_cmd)
LOG.info("Created root qdisc")
parent_q_cmd = "tc class replace dev {intf} parent 1: classid 1:{root_qid} htb "
parent_q_cmd += "rate {speed}Mbit ceil {speed}Mbit"
parent_q_cmd = parent_q_cmd.format(intf=intf, root_qid=qid_hex, speed=speed)
cmd_list.append(parent_q_cmd)
tc_cmd = "tc class replace dev {intf} parent 1:{root_qid} classid 1:1 htb "
tc_cmd += "rate {rate} ceil {speed}Mbit"
tc_cmd = tc_cmd.format(
intf=intf, root_qid=qid_hex, rate=default_gbr,
speed=speed,
)
cmd_list.append(tc_cmd)
return run_cmd(cmd_list, show_error)
@staticmethod
def read_all_classes(intf: str):
qid_list = []
# example output of this command
# b'class htb 1:1 parent 1:fffe prio 0 rate 12Kbit ceil 1Gbit burst \
# 1599b cburst 1375b \nclass htb 1:fffe root rate 1Gbit ceil 1Gbit \
# burst 1375b cburst 1375b \n'
# we need to parse this output and extract class ids from here
tc_cmd = "tc class show dev {}".format(intf)
args = argSplit(tc_cmd)
try:
output = subprocess.check_output(args)
for ln in output.decode('utf-8').split("\n"):
ln = ln.strip()
if not ln:
continue
tok = ln.split()
if len(tok) < 5:
continue
if tok[1] != "htb":
continue
if tok[3] == 'root':
continue
qid_str = tok[2].split(':')[1]
qid = int(qid_str, 16)
pqid_str = tok[4].split(':')[1]
pqid = int(pqid_str, 16)
qid_list.append((qid, pqid))
LOG.debug("TC-dump: %s qid %d pqid %d", ln, qid, pqid)
except subprocess.CalledProcessError as e:
LOG.error('failed extracting classids from tc %s', e)
return qid_list
@staticmethod
def dump_class_state(intf: str, qid: int):
qid_hex = hex(qid)
tc_cmd = "tc -s -d class show dev {} classid 1:{}".format(
intf,
qid_hex,
)
args = argSplit(tc_cmd)
try:
output = subprocess.check_output(args)
print(output.decode())
except subprocess.CalledProcessError:
print("Exception dumping Qos State for %s", intf)
@staticmethod
def dump_root_class_stats(intf: str):
tc_cmd = "tc -s -s -d q ls dev {}".format(intf)
args = argSplit(tc_cmd)
try:
output = subprocess.check_output(args)
print(output.decode())
except subprocess.CalledProcessError:
print("Exception dumping Qos State for %s", intf)
@staticmethod
def get_class_rate(intf: str, qid: int) -> Optional[str]:
qid_hex = hex(qid)
tc_cmd = "tc class show dev {} classid 1:{}".format(intf, qid_hex)
args = argSplit(tc_cmd)
try:
# output: class htb 1:3 parent 1:2 prio 2 rate 250Kbit ceil 500Kbit burst 1600b cburst 1600b
raw_output = subprocess.check_output(args)
output = raw_output.decode('utf-8')
# return all config from 'rate' onwards
config = output.split("rate")
try:
return config[1]
except IndexError:
LOG.error("could not find rate: %s", output)
except subprocess.CalledProcessError:
LOG.error("Exception dumping Qos State for %s", tc_cmd)
@staticmethod
def _get_qdisc_type(intf: str) -> Optional[str]:
tc_cmd = "tc qdisc show dev {}".format(intf)
args = argSplit(tc_cmd)
try:
# output: qdisc htb 1: root refcnt 2 r2q 10 default 0 direct_packets_stat 314 direct_qlen 1000
raw_output = subprocess.check_output(args)
output = raw_output.decode('utf-8')
config = output.split()
try:
return config[1]
except IndexError:
LOG.error("could not qdisc type: %s", output)
except subprocess.CalledProcessError:
LOG.error("Exception dumping Qos State for %s", tc_cmd)
class TCManager(object):
"""
Creates/Deletes queues in linux. Using Qdiscs for flow based
rate limiting(traffic shaping) of user traffic.
Queues are created on an egress interface and flows
in OVS are programmed with qid to filter traffic to the queue.
Traffic matching a specific flow is filtered to a queue and is
rate limited based on configured value.
Traffic to flows with no QoS configuration are sent to a
default queue and are not rate limited.
"""
def __init__(
self,
datapath,
config,
) -> None:
self._datapath = datapath
self._uplink = config['nat_iface']
self._downlink = config['enodeb_iface']
self._max_rate = config["qos"]["max_rate"]
self._gbr_rate = config["qos"].get("gbr_rate", DEFAULT_RATE)
self._enable_pyroute2 = config["qos"].get('enable_pyroute2', False)
self._start_idx, self._max_idx = (
config['qos']['linux_tc']['min_idx'],
config['qos']['linux_tc']['max_idx'],
)
self._id_manager = IdManager(self._start_idx, self._max_idx)
self._initialized = True
LOG.info(
"Init LinuxTC module uplink:%s downlink:%s",
config['nat_iface'], config['enodeb_iface'],
)
def destroy(self):
if not TrafficClass.tc_ops:
LOG.info("TC not initialized, skip destroying existing qos classes")
return
LOG.info("destroying existing leaf qos classes")
# ensure ordering during deletion of classes, children should be deleted
# prior to the parent class ids
p_qids = set()
for intf in [self._uplink, self._downlink]:
qid_list = TrafficClass.read_all_classes(intf)
for qid_tuple in qid_list:
(qid, pqid) = qid_tuple
if self._start_idx <= qid < (self._max_idx - 1):
LOG.info("Attempting to delete class idx %d", qid)
TrafficClass.delete_class(intf, qid)
if self._start_idx <= pqid < (self._max_idx - 1):
p_qids.add((intf, pqid))
LOG.info("destroying existing parent classes")
for p_qid_tuple in p_qids:
(intf, pqid) = p_qid_tuple
LOG.info("Attempting to delete parent class idx %d", pqid)
TrafficClass.delete_class(intf, pqid, skip_filter=True)
LOG.info("destroying All qos classes: done")
def setup(self):
# initialize new qdisc
TrafficClass.init_qdisc(
self._uplink, enable_pyroute2=self._enable_pyroute2,
default_gbr=self._gbr_rate,
)
TrafficClass.init_qdisc(
self._downlink, enable_pyroute2=self._enable_pyroute2,
default_gbr=self._gbr_rate,
)
def get_action_instruction(self, qid: int):
# return an action and an instruction corresponding to this qid
if qid < self._start_idx or qid > (self._max_idx - 1):
LOG.error("invalid qid %d, no action/inst returned", qid)
return None, None
parser = self._datapath.ofproto_parser
return parser.OFPActionSetField(pkt_mark=qid), None, qid
def create_class_async(
self, d: FlowMatch.Direction, qos_info: QosInfo,
qid,
parent, skip_filter, cleanup_rule,
):
intf = self._uplink if d == FlowMatch.UPLINK else self._downlink
gbr = qos_info.gbr
if gbr is None:
gbr = self._gbr_rate
err = TrafficClass.create_class(
intf, qid, qos_info.mbr,
rate=gbr,
parent_qid=parent,
skip_filter=skip_filter,
)
# typecast to int to avoid MagicMock related error in unit test
err_no = int(err)
if err_no < 0:
if cleanup_rule:
cleanup_rule()
LOG.error("qos create error: qid %d err %d", qid, err_no)
return
LOG.debug("create done: if: %s qid %d err %s", intf, qid, err_no)
def add_qos(
self, d: FlowMatch.Direction, qos_info: QosInfo,
cleanup_rule=None, parent=None, skip_filter=False,
) -> int:
LOG.debug("add QoS: %s", qos_info)
qid = self._id_manager.allocate_idx()
self.create_class_async(
d, qos_info,
qid, parent, skip_filter, cleanup_rule,
)
LOG.debug("assigned qid: %d", qid)
return qid
def remove_qos(
self, qid: int, d: FlowMatch.Direction,
recovery_mode=False, skip_filter=False,
):
if not self._initialized and not recovery_mode:
return
if qid < self._start_idx or qid > (self._max_idx - 1):
LOG.error("invalid qid %d, removal failed", qid)
return
LOG.debug("deleting qos_handle %s, skip_filter %s", qid, skip_filter)
intf = self._uplink if d == FlowMatch.UPLINK else self._downlink
err = TrafficClass.delete_class(intf, qid, skip_filter)
if err == 0:
self._id_manager.release_idx(qid)
else:
LOG.error('error deleting class %d, not releasing idx', qid)
return
def read_all_state(self):
LOG.debug("read_all_state")
st = {}
apn_qid_list = set()
ul_qid_list = TrafficClass.read_all_classes(self._uplink)
dl_qid_list = TrafficClass.read_all_classes(self._downlink)
for (d, qid_list) in (
(FlowMatch.UPLINK, ul_qid_list),
(FlowMatch.DOWNLINK, dl_qid_list),
):
for qid_tuple in qid_list:
qid, pqid = qid_tuple
if qid < self._start_idx or qid > (self._max_idx - 1):
LOG.debug("qid %d out of range: (%d - %d)", qid, self._start_idx, self._max_idx)
continue
apn_qid = pqid if pqid != self._max_idx else 0
st[qid] = {
'direction': d,
'ambr_qid': apn_qid,
}
if apn_qid != 0:
apn_qid_list.add(apn_qid)
self._id_manager.restore_state(st)
return st, apn_qid_list
def same_qos_config(
self, d: FlowMatch.Direction,
qid1: int, qid2: int,
) -> bool:
intf = self._uplink if d == FlowMatch.UPLINK else self._downlink
config1 = TrafficClass.get_class_rate(intf, qid1)
config2 = TrafficClass.get_class_rate(intf, qid2)
return config1 == config2
| 36.228792 | 106 | 0.59597 | [
"BSD-3-Clause"
] | BaicellsDev/magma | lte/gateway/python/magma/pipelined/qos/qos_tc_impl.py | 14,093 | Python |
# coding=utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import shutil
import mock
import pytest
from callee import Contains
from .conftest import git_out, search_diff, search_rev
from mozphab import environment, exceptions, mozphab
arc_call_conduit = mock.Mock()
call_conduit = mock.Mock()
def by_line_mock(*args, **_kwargs):
# join args to catch unicode errors
" ".join(*args)
return ["Revision URI: http://example.test/D123"]
check_call_by_line = mock.Mock()
check_call_by_line.side_effect = by_line_mock
initial_sha = None
def test_submit_create_arc(in_process, git_repo_path, init_sha):
call_conduit.side_effect = (
dict(),
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
[{"userName": "alice", "phid": "PHID-USER-1"}],
)
testfile = git_repo_path / "X"
testfile.write_text("a")
git_out("add", ".")
git_out("commit", "--message", "A r?alice")
testfile = git_repo_path / "untracked"
testfile.write_text("a")
mozphab.main(
["submit", "--arc", "--yes", "--bug", "1", init_sha], is_development=True
)
log = git_out("log", "--format=%s%n%n%b", "-1")
expected = """
Bug 1 - A r?alice
Differential Revision: http://example.test/D123
"""
assert log.strip() == expected.strip()
def test_submit_create(in_process, git_repo_path, init_sha):
call_conduit.side_effect = (
# ping
dict(),
# diffusion.repository.search
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
# user search
[dict(userName="alice", phid="PHID-USER-1")],
# differential.creatediff
dict(dict(phid="PHID-DIFF-1", diffid="1")),
# differential.setdiffproperty
dict(),
# differential.revision.edit
dict(object=dict(id="123")),
# differential.setdiffproperty
dict(),
)
(git_repo_path / "X").write_text(u"ą\nb\nc\n", encoding="utf-8")
(git_repo_path / "Y").write_text("no line ending")
git_out("add", ".")
(git_repo_path / "msg").write_text(u"Ą r?alice", encoding="utf-8")
git_out("commit", "--file", "msg")
(git_repo_path / "untracked").write_text("a\n")
mozphab.main(["submit", "--yes", "--bug", "1", init_sha], is_development=True)
log = git_out("log", "--format=%s%n%n%b", "-1")
expected = """
Bug 1 - Ą r?alice
Differential Revision: http://example.test/D123
"""
assert log.strip() == expected.strip()
assert mock.call("conduit.ping", {}) in call_conduit.call_args_list
assert (
mock.call("user.query", dict(usernames=["alice"]))
in call_conduit.call_args_list
)
assert (
mock.call(
"diffusion.repository.search",
dict(limit=1, constraints=dict(callsigns=["TEST"])),
)
in call_conduit.call_args_list
)
assert (
mock.call(
"differential.creatediff",
{
"sourceControlPath": "/",
"sourceControlSystem": "git",
"lintStatus": "none",
"sourcePath": mock.ANY,
"unitStatus": "none",
"sourceMachine": "http://example.test",
"sourceControlBaseRevision": mock.ANY,
"repositoryPHID": "PHID-REPO-1",
"branch": "HEAD",
"changes": [
{
"commitHash": mock.ANY,
"awayPaths": [],
"newProperties": {"unix:filemode": "100644"},
"oldPath": None,
"hunks": [
{
"oldOffset": 0,
"oldLength": 0,
"newOffset": 1,
"newLength": 3,
"addLines": 3,
"delLines": 0,
"corpus": "+ą\n+b\n+c\n",
"isMissingOldNewline": False,
"isMissingNewNewline": False,
}
],
"oldProperties": {},
"currentPath": "X",
"fileType": 1,
"type": 1,
"metadata": {},
},
{
"commitHash": mock.ANY,
"awayPaths": [],
"newProperties": {"unix:filemode": "100644"},
"oldPath": None,
"hunks": [
{
"oldOffset": 0,
"oldLength": 0,
"newOffset": 1,
"newLength": 1,
"addLines": 1,
"delLines": 0,
"corpus": (
"+no line ending\n\\ No newline at end of file\n"
),
"isMissingOldNewline": False,
"isMissingNewNewline": True,
}
],
"oldProperties": {},
"currentPath": "Y",
"fileType": 1,
"type": 1,
"metadata": {},
},
],
"creationMethod": "moz-phab-git",
},
)
in call_conduit.call_args_list
)
assert (
mock.call(
"differential.setdiffproperty",
{"diff_id": "1", "name": "local:commits", "data": ~Contains('"rev":')},
)
in call_conduit.call_args_list
)
assert (
call_conduit.call_args_list.count(
mock.call("differential.setdiffproperty", mock.ANY)
)
== 2
)
def test_submit_create_added_not_commited(in_process, git_repo_path, init_sha):
call_conduit.side_effect = (
# ping
dict(),
# diffusion.repository.search
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
# user search
[dict(userName="alice", phid="PHID-USER-1")],
# differential.creatediff
dict(dict(phid="PHID-DIFF-1", diffid="1")),
# differential.setdiffproperty
dict(),
# differential.revision.edit
dict(object=dict(id="123")),
# differential.setdiffproperty
dict(),
)
(git_repo_path / "X").write_text("ą\r\nb\nc\n", encoding="utf-8")
(git_repo_path / "Y").write_text("no line ending")
git_out("add", ".")
(git_repo_path / "msg").write_text("Ą r?alice", encoding="utf-8")
git_out("commit", "--file", "msg")
(git_repo_path / "untracked").write_text("a\n")
git_out("add", "untracked")
with pytest.raises(exceptions.Error) as excinfo:
mozphab.main(["submit", "--yes", "--bug", "1", init_sha], is_development=True)
assert "Uncommitted changes present." in str(excinfo.value)
def test_submit_create_no_bug(in_process, git_repo_path, init_sha):
call_conduit.reset_mock()
call_conduit.side_effect = (
# ping
dict(),
# diffusion.repository.search
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
# user search
[dict(userName="alice", phid="PHID-USER-1")],
# differential.creatediff
dict(dict(phid="PHID-DIFF-1", diffid="1")),
# differential.setdiffproperty
dict(),
# differential.revision.edit
dict(object=dict(id="123")),
# differential.setdiffproperty
dict(),
)
testfile = git_repo_path / "X"
testfile.write_text("a\n")
git_out("add", ".")
msgfile = git_repo_path / "msg"
msgfile.write_text("A r?alice")
git_out("commit", "--file", "msg")
mozphab.main(["submit", "--yes", "--no-bug", init_sha], is_development=True)
log = git_out("log", "--format=%s%n%n%b", "-1")
expected = """
A r?alice
Differential Revision: http://example.test/D123
"""
assert log.strip() == expected.strip()
def test_submit_create_binary_arc(in_process, git_repo_path, init_sha, data_file):
call_conduit.side_effect = (
dict(),
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
[{"userName": "alice", "phid": "PHID-USER-1"}],
)
shutil.copyfile(str(data_file), str(git_repo_path / "img.png"))
git_out("add", ".")
git_out("commit", "--message", "IMG")
mozphab.main(
["submit", "--arc", "--yes", "--bug", "1", init_sha], is_development=True
)
expected = """
Bug 1 - IMG
Differential Revision: http://example.test/D123
"""
log = git_out("log", "--format=%s%n%n%b", "-1")
assert log.strip() == expected.strip()
def test_submit_create_binary(in_process, git_repo_path, init_sha, data_file):
call_conduit.reset_mock()
call_conduit.side_effect = (
# ping
dict(),
# diffusion.repository.search
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
# file.allocate
dict(dict(filePHID=None, upload=True)),
# file.upload
dict(),
# differential.creatediff
dict(dict(phid="PHID-DIFF-1", diffid="1")),
# differential.setdiffproperty
dict(),
# differential.revision.edit
dict(object=dict(id="123")),
# differential.setdiffproperty
dict(),
)
shutil.copyfile(str(data_file), str(git_repo_path / "img.png"))
git_out("add", ".")
git_out("commit", "-m", "IMG")
mozphab.main(["submit", "--yes", "--bug", "1", init_sha], is_development=True)
log = git_out("log", "--format=%s%n%n%b", "-1")
expected = """
Bug 1 - IMG
Differential Revision: http://example.test/D123
"""
assert log.strip() == expected.strip()
assert (
mock.call(
"file.allocate",
{"name": "img.png", "contentHash": mock.ANY, "contentLength": 182},
)
in call_conduit.call_args_list
)
assert (
mock.call("file.upload", {"data_base64": mock.ANY, "name": "img.png"})
in call_conduit.call_args_list
)
def test_submit_create_binary_existing(in_process, git_repo_path, init_sha, data_file):
call_conduit.reset_mock()
call_conduit.side_effect = (
# ping
dict(),
# diffusion.repository.search
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
# file.allocate
dict(dict(filePHID="PHID-FILE-1", upload=False)),
# no file.upload call
# differential.creatediff
dict(dict(phid="PHID-DIFF-1", diffid="1")),
# differential.setdiffproperty
dict(),
# differential.revision.edit
dict(object=dict(id="123")),
# differential.setdiffproperty
dict(),
)
shutil.copyfile(str(data_file), str(git_repo_path / "img.png"))
git_out("add", ".")
git_out("commit", "-m", "IMG")
mozphab.main(["submit", "--yes", "--bug", "1", init_sha], is_development=True)
log = git_out("log", "--format=%s%n%n%b", "-1")
expected = """
Bug 1 - IMG
Differential Revision: http://example.test/D123
"""
assert log.strip() == expected.strip()
assert (
mock.call(
"file.allocate",
{"name": "img.png", "contentHash": mock.ANY, "contentLength": 182},
)
in call_conduit.call_args_list
)
assert mock.call("file.upload", mock.ANY) not in call_conduit.call_args_list
def test_submit_create_binary_chunked(in_process, git_repo_path, init_sha, data_file):
call_conduit.reset_mock()
call_conduit.side_effect = (
# ping
dict(),
# diffusion.repository.search
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
# file.allocate
dict(dict(filePHID="PHID-FILE-1", upload=True)),
# file.querychunks
[
dict(byteStart="0", byteEnd="4194304", complete=False),
dict(byteStart="4194304", byteEnd="8388608", complete=False),
dict(byteStart="8388608", byteEnd="8425160", complete=False),
],
# file.uploadchunk
dict(),
# file.uploadchunk
dict(),
# file.uploadchunk
dict(),
# differential.creatediff
dict(dict(phid="PHID-DIFF-1", diffid="1")),
# differential.setdiffproperty
dict(),
# differential.revision.edit
dict(object=dict(id="123")),
# differential.setdiffproperty
dict(),
)
shutil.copyfile(str(data_file), str(git_repo_path / "img.png"))
git_out("add", ".")
git_out("commit", "-m", "IMG")
mozphab.main(["submit", "--yes", "--bug", "1", init_sha], is_development=True)
log = git_out("log", "--format=%s%n%n%b", "-1")
expected = """
Bug 1 - IMG
Differential Revision: http://example.test/D123
"""
assert log.strip() == expected.strip()
assert (
mock.call(
"file.allocate",
{"name": "img.png", "contentHash": mock.ANY, "contentLength": 182},
)
in call_conduit.call_args_list
)
assert (
mock.call("file.querychunks", {"filePHID": "PHID-FILE-1"})
in call_conduit.call_args_list
)
assert (
mock.call(
"file.uploadchunk",
{
"filePHID": "PHID-FILE-1",
"byteStart": 0,
"data": mock.ANY,
"dataEncoding": "base64",
},
)
in call_conduit.call_args_list
)
assert (
mock.call(
"file.uploadchunk",
{
"filePHID": "PHID-FILE-1",
"byteStart": 4194304,
"data": mock.ANY,
"dataEncoding": "base64",
},
)
in call_conduit.call_args_list
)
assert (
mock.call(
"file.uploadchunk",
{
"filePHID": "PHID-FILE-1",
"byteStart": 8388608,
"data": mock.ANY,
"dataEncoding": "base64",
},
)
in call_conduit.call_args_list
)
def test_submit_update(in_process, git_repo_path, init_sha):
call_conduit.reset_mock()
call_conduit.side_effect = (
# ping
dict(),
# diffusion.repository.search
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
# diffusion.revision.search
dict(data=[search_rev(rev=123)]),
# diffusion.diff.search
dict(data=[search_diff()]),
# whoami
dict(phid="PHID-USER-1"),
# differential.creatediff
dict(dict(phid="PHID-DIFF-1", diffid="1")),
# differential.setdiffproperty
dict(),
# differential.revision.edit
dict(object=dict(id="123")),
# differential.setdiffproperty
dict(),
)
testfile = git_repo_path / "X"
testfile.write_text("ą", encoding="utf-8")
git_out("add", ".")
msgfile = git_repo_path / "msg"
msgfile.write_text(
"""\
Bug 1 - Ą
Differential Revision: http://example.test/D123
""",
encoding="utf-8",
)
git_out("commit", "--file", "msg")
mozphab.main(
["submit", "--yes"]
+ ["--bug", "1"]
+ ["--message", "update message ćwikła"]
+ [init_sha],
is_development=True,
)
assert call_conduit.call_count == 9
log = git_out("log", "--format=%s%n%n%b", "-1")
expected = """\
Bug 1 - Ą
Differential Revision: http://example.test/D123
"""
assert log == expected
def test_submit_update_no_change(in_process, git_repo_path, init_sha, git_sha):
testfile = git_repo_path / "X"
testfile.write_text("a")
git_out("add", ".")
msgfile = git_repo_path / "msg"
msgfile.write_text(
"""\
Bug 1 - A
Differential Revision: http://example.test/D123
"""
)
git_out("commit", "--file", "msg")
sha = git_sha()
call_conduit.reset_mock()
call_conduit.side_effect = (
# ping
dict(),
# diffusion.repository.search
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
# diffusion.revision.search
dict(data=[search_rev(rev=123)]),
# diffusion.diff.search
dict(data=[search_diff(node=sha)]),
# whoami
dict(phid="PHID-USER-1"),
)
mozphab.main(
["submit", "--yes"] + [init_sha],
is_development=True,
)
assert call_conduit.call_count == 5
def test_submit_remove_cr(in_process, git_repo_path, init_sha):
if environment.IS_WINDOWS:
pytest.skip("Removing CR will not work on Windows.")
call_conduit.side_effect = (
# CREATE
# ping
dict(),
# diffusion.repository.search
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
# user.search
[dict(userName="alice", phid="PHID-USER-1")],
# differential.creatediff
dict(dict(phid="PHID-DIFF-1", diffid="1")),
# differential.setdiffproperty
dict(),
# differential.revision.edit
dict(object=dict(id="123")),
# differential.setdiffproperty
dict(),
# UPDATE
# no need to ping (checked)
# no need to check reviewer
# no need to search for repository repository data is saved in .hg
# differential.creatediff
dict(dict(phid="PHID-DIFF-2", diffid="2")),
# differential.setdiffproperty
dict(),
# differential.revision.edit
dict(object=dict(id="124")),
# differential.setdiffproperty
dict(),
)
test_a = git_repo_path / "X"
test_a.write_text("a\r\nb\n")
git_out("add", "X")
git_out("commit", "-am", "A r?alice")
mozphab.main(["submit", "--yes", "--bug", "1", init_sha], is_development=True)
call_conduit.reset_mock()
# removing CR, leaving LF
test_a.write_text("a\nb\n")
git_out("commit", "-am", "B r?alice")
mozphab.main(["submit", "--yes", "--bug", "1", "HEAD~"], is_development=True)
assert (
mock.call(
"differential.creatediff",
{
"changes": [
{
"metadata": {},
"oldPath": "X",
"currentPath": "X",
"awayPaths": [],
"oldProperties": {},
"newProperties": {},
"commitHash": mock.ANY,
"type": 2,
"fileType": 1,
"hunks": [
{
"oldOffset": 1,
"oldLength": 2,
"newOffset": 1,
"newLength": 2,
"addLines": 1,
"delLines": 1,
"isMissingOldNewline": False,
"isMissingNewNewline": False,
"corpus": "-a\r\n+a\n b\n",
}
],
}
],
"sourceMachine": "http://example.test",
"sourceControlSystem": "git",
"sourceControlPath": "/",
"sourceControlBaseRevision": mock.ANY,
"creationMethod": "moz-phab-git",
"lintStatus": "none",
"unitStatus": "none",
"repositoryPHID": "PHID-REPO-1",
"sourcePath": mock.ANY,
"branch": "HEAD",
},
)
in call_conduit.call_args_list
)
assert (
mock.call(
"differential.setdiffproperty",
{
"diff_id": "2",
"name": "local:commits",
"data": Contains('"summary": "Bug 1 - B r?alice"')
& Contains(
'"message": "'
"Bug 1 - B r?alice\\n\\n"
"Summary:\\n\\n\\n\\n\\n"
"Test Plan:\\n\\n"
"Reviewers: alice\\n\\n"
"Subscribers:\\n\\n"
'Bug #: 1"'
),
},
)
in call_conduit.call_args_list
)
def test_submit_single_last(in_process, git_repo_path, init_sha):
call_conduit.side_effect = (
# ping
dict(),
# diffusion.repository.search
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
# differential.creatediff
dict(dict(phid="PHID-DIFF-1", diffid="1")),
# differential.setdiffproperty
dict(),
# differential.revision.edit
dict(object=dict(id="123")),
# differential.setdiffproperty
dict(),
)
(git_repo_path / "X").write_text("a\n")
git_out("add", "X")
git_out("commit", "-am", "A")
(git_repo_path / "X").write_text("b\n")
git_out("commit", "-am", "B")
mozphab.main(["submit", "--yes", "--bug", "1", "--single"], is_development=True)
log = git_out("log", "--format=%s%n%n%b", "-2")
expected = """\
Bug 1 - B
Differential Revision: http://example.test/D123
A
"""
assert log == expected
def test_submit_single_first(in_process, git_repo_path, init_sha, git_sha):
call_conduit.side_effect = (
# ping
dict(),
# diffusion.repository.search
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
# differential.creatediff
dict(dict(phid="PHID-DIFF-1", diffid="1")),
# differential.setdiffproperty
dict(),
# differential.revision.edit
dict(object=dict(id="123")),
# differential.setdiffproperty
dict(),
)
(git_repo_path / "X").write_text("a\n")
git_out("add", "X")
git_out("commit", "-am", "A")
sha = git_sha()
(git_repo_path / "X").write_text("b\n")
git_out("commit", "-am", "B")
mozphab.main(
["submit", "--yes", "--bug", "1", "--single", sha], is_development=True
)
log = git_out("log", "--format=%s%n%n%b", "-2")
expected = """\
B
Bug 1 - A
Differential Revision: http://example.test/D123
"""
assert log == expected
def test_submit_update_no_message(in_process, git_repo_path, init_sha):
call_conduit.reset_mock()
call_conduit.side_effect = (
# ping
dict(),
# diffusion.repository.search
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
dict(data=[search_rev(rev=123)]),
dict(data=[search_diff()]),
dict(phid="PHID-USER-1"),
# differential.creatediff
dict(dict(phid="PHID-DIFF-1", diffid="1")),
# differential.setdiffproperty
dict(),
# differential.revision.edit
dict(object=dict(id="123")),
# differential.setdiffproperty
dict(),
)
(git_repo_path / "X").write_text(u"ą", encoding="utf-8")
git_out("add", ".")
(git_repo_path / "msg").write_text(
u"""\
Bug 1 - Ą
Differential Revision: http://example.test/D123
""",
encoding="utf-8",
)
git_out("commit", "--file", "msg")
mozphab.main(["submit", "--yes", "--bug", "1", init_sha], is_development=True)
log = git_out("log", "--format=%s%n%n%b", "-1")
expected = """\
Bug 1 - Ą
Differential Revision: http://example.test/D123
"""
assert log == expected
def test_submit_different_author_arc(in_process, git_repo_path, init_sha):
call_conduit.reset_mock()
call_conduit.side_effect = (
dict(),
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
[{"userName": "alice", "phid": "PHID-USER-1"}],
)
testfile = git_repo_path / "X"
testfile.write_text("a")
git_out("add", ".")
git_out(
"commit",
"--date",
"Tue, 22 Jan 2019 13:42:48 +0000",
"--author",
"foo <foo@bar.com>",
"--message",
"A r?alice",
)
testfile.write_text("b")
git_out(
"commit",
"--date",
"Tue, 22 Jan 2019 13:43:48 +0000",
"--author",
"bar <bar@foo.com>",
"--all",
"--message",
"B r?alice",
)
mozphab.main(
["submit", "--arc", "--yes", "--bug", "1", init_sha], is_development=True
)
log = git_out("log", "--format=%aD+++%an+++%ae", "-2")
expected = """\
Tue, 22 Jan 2019 13:43:48 +0000+++bar+++bar@foo.com
Tue, 22 Jan 2019 13:42:48 +0000+++foo+++foo@bar.com
"""
assert log == expected
def test_submit_utf8_author_arc(in_process, git_repo_path, init_sha):
call_conduit.reset_mock()
call_conduit.side_effect = (
dict(),
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
[{"userName": "alice", "phid": "PHID-USER-1"}],
)
testfile = git_repo_path / "X"
testfile.write_text("a")
git_out("add", ".")
git_out(
"commit",
"--date",
"Tue, 22 Jan 2019 13:42:48 +0000",
"--author",
"ćwikła <ćwikła@bar.com>",
"--message",
"A r?alice",
)
mozphab.main(
["submit", "--arc", "--yes", "--bug", "1", init_sha], is_development=True
)
log = git_out("log", "--format=%aD+++%an+++%ae", "-1")
expected = "Tue, 22 Jan 2019 13:42:48 +0000+++ćwikła+++ćwikła@bar.com\n"
assert log == expected
def test_submit_update_arc(in_process, git_repo_path, init_sha):
call_conduit.reset_mock()
call_conduit.side_effect = (
{}, # ping
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
dict(data=[search_rev(rev=123)]),
dict(data=[search_diff()]),
dict(phid="PHID-USER-1"),
)
testfile = git_repo_path / "X"
testfile.write_text("a")
git_out("add", ".")
# Write out our commit message as if the program had already run and appended
# a Differential Revision keyword to the commit body for tracking.
git_out(
"commit",
"--message",
"""\
Bug 1 - A
Differential Revision: http://example.test/D123
""",
)
mozphab.main(
["submit", "--arc", "--yes"]
+ ["--bug", "1"]
+ ["--message", "update message ćwikła"]
+ [init_sha],
is_development=True,
)
log = git_out("log", "--format=%s%n%n%b", "-1")
expected = """\
Bug 1 - A
Differential Revision: http://example.test/D123
"""
assert log == expected
def test_submit_update_bug_id_arc(in_process, git_repo_path, init_sha):
call_conduit.reset_mock()
call_conduit.side_effect = (
dict(),
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
dict(data=[search_rev(rev=123)]),
dict(data=[search_diff()]),
# get reviewers for updated revision
dict(phid="PHID-USER-1"),
)
arc_call_conduit.reset_mock()
arc_call_conduit.side_effect = (
{},
{"data": {}},
)
testfile = git_repo_path / "X"
testfile.write_text("a")
git_out("add", ".")
# Write out our commit message as if the program had already run and appended
# a Differential Revision keyword to the commit body for tracking.
git_out(
"commit",
"--message",
"""\
Bug 1 - A
Differential Revision: http://example.test/D123
""",
)
mozphab.main(
["submit", "--arc", "--yes", "--bug", "2", init_sha], is_development=True
)
arc_call_conduit.assert_called_with(
"differential.revision.edit",
{
"objectIdentifier": "D123",
"transactions": [{"type": "bugzilla.bug-id", "value": "2"}],
},
mock.ANY,
)
def test_submit_update_revision_not_found(in_process, git_repo_path, init_sha):
call_conduit.reset_mock()
call_conduit.side_effect = (
# ping
dict(),
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
# response for searching D123 and D124
dict(data=[search_rev(rev=123)]),
dict(data=[search_diff()]),
# moz-phab asks again for D124
dict(data=[]),
# whoami
dict(phid="PHID-USER-1"),
# moz-phab asks again for D124
dict(data=[]),
# moz-phab asks again for D124
dict(data=[]),
)
testfile = git_repo_path / "X"
testfile.write_text(u"ą", encoding="utf-8")
git_out("add", ".")
msgfile = git_repo_path / "msg"
msgfile.write_text(
u"""\
Bug 1 - Ą
Differential Revision: http://example.test/D123
""",
encoding="utf-8",
)
git_out("commit", "--file", "msg")
testfile.write_text(u"missing repo")
msgfile.write_text(
u"""\
Bug 1 - missing revision
Differential Revision: http://example.test/D124
"""
)
git_out("commit", "--all", "--file", "./msg")
with pytest.raises(exceptions.Error) as excinfo:
mozphab.main(
["submit", "--yes"]
+ ["--bug", "1"]
+ ["--message", "update message ćwikła"]
+ [init_sha],
is_development=True,
)
assert "query result for revision D124" in str(excinfo.value)
def test_empty_file(in_process, git_repo_path, init_sha):
# Add an empty file
call_conduit.side_effect = (
# ping
dict(),
# diffusion.repository.search
dict(data=[dict(phid="PHID-REPO-1", fields=dict(vcs="git"))]),
# differential.creatediff
dict(dict(phid="PHID-DIFF-1", diffid="1")),
# differential.setdiffproperty
dict(),
# differential.revision.edit
dict(object=dict(id="123")),
# differential.setdiffproperty
dict(),
)
testfile = git_repo_path / "X"
testfile.touch()
git_out("add", ".")
git_out("commit", "--message", "A")
mozphab.main(["submit", "--yes", "--bug", "1", init_sha], is_development=True)
log = git_out("log", "--format=%s%n%n%b", "-1")
expected = """
Bug 1 - A
Differential Revision: http://example.test/D123
"""
assert log.strip() == expected.strip()
# Rempve an empty file
call_conduit.reset_mock()
call_conduit.side_effect = (
# differential.creatediff
dict(dict(phid="PHID-DIFF-2", diffid="2")),
# differential.setdiffproperty
dict(),
# differential.revision.edit
dict(object=dict(id="124")),
# differential.setdiffproperty
dict(),
)
testfile.unlink()
git_out("commit", "-a", "--message", "B")
mozphab.main(["submit", "--yes", "--bug", "1", "HEAD~"], is_development=True)
log = git_out("log", "--format=%s%n%n%b", "-1")
expected = """
Bug 1 - B
Differential Revision: http://example.test/D124
"""
assert log.strip() == expected.strip()
| 30.049086 | 87 | 0.526953 | [
"MPL-2.0"
] | dklawren/review | tests/test_integration_git.py | 31,249 | Python |
from .optimizers import GAOptimizer | 35 | 35 | 0.885714 | [
"MIT"
] | simoore/control-in-verilog | controlinverilog/synthesis/__init__.py | 35 | Python |
import unittest
import io
import sys
import random
from unittest.mock import MagicMock, Mock, patch
from snap.grid import Grid
from snap.hand import Hand
from snap.card import Card
class TestGrid(unittest.TestCase):
def test__get__origin__returns_correct_cards(self):
random.seed(1)
expected_card = Card(7)
grid = Grid(3)
mock_position = self.get_mock_position(2, 1)
self.assertEqual(expected_card, grid.get(mock_position))
@patch.object(Hand, "hide_all")
def test__hide_all__calls_hide_all_on_hand(self, mock_hide_all):
height = 3
grid = Grid(height)
grid.hide_all()
mock_hide_all.assert_called()
self.assertEqual(height, len(mock_hide_all.call_args_list))
@patch.object(Hand, "strings")
def test__strings__returns_mock_strings(self, mock_strings_method):
mock_strings = ["line 1", "line 2"]
mock_strings_method.return_value = mock_strings
height = 3
grid = Grid(height)
strings = grid.strings()
mock_strings_method.assert_called()
self.assertEqual(height, len(mock_strings_method.call_args_list))
self.assertEqual(mock_strings * height, strings)
def get_mock_position(self, x, y):
pos = Mock()
pos.x.return_value = x
pos.y.return_value = y
return pos
def get_mock_hand(self):
hand = Mock()
hand.hide_all = MagicMock()
return hand | 31.170213 | 73 | 0.675085 | [
"MIT"
] | gabrielbarker/snap | test/test_grid.py | 1,465 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`accelerometer`
==================
Updated by lkasso <hello@mbientlab.com>
Created by hbldh <henrik.blidh@nedomkull.com>
Created on 2016-04-10
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import time
from pymetawear.discover import select_device
from pymetawear.client import MetaWearClient
address = select_device()
c = MetaWearClient(str(address), 'pygatt', debug=True)
print("New client created: {0}".format(c))
def acc_callback(data):
"""Handle a (epoch, (x,y,z)) accelerometer tuple."""
print("Epoch time: [{0}] - X: {1}, Y: {2}, Z: {3}".format(data[0], *data[1]))
print("Get possible accelerometer settings...")
settings = c.accelerometer.get_possible_settings()
print(settings)
time.sleep(1.0)
print("Write accelerometer settings...")
c.accelerometer.set_settings(data_rate=3.125, data_range=4.0)
time.sleep(1.0)
print("Check accelerometer settings...")
settings = c.accelerometer.get_current_settings()
print(settings)
print("Subscribing to accelerometer signal notifications...")
c.accelerometer.high_frequency_stream = False
c.accelerometer.notifications(acc_callback)
time.sleep(10.0)
print("Unsubscribe to notification...")
c.accelerometer.notifications(None)
time.sleep(5.0)
c.disconnect()
| 22.932203 | 81 | 0.739098 | [
"MIT"
] | somacoder/pymetawear | examples/accelerometer.py | 1,353 | Python |
# Natural Language Toolkit: SVM-based classifier
#
# Copyright (C) 2001-2022 NLTK Project
# Author: Leon Derczynski <leon@dcs.shef.ac.uk>
#
# URL: <https://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
nltk.classify.svm was deprecated. For classification based
on support vector machines SVMs use nltk.classify.scikitlearn
(or `scikit-learn <https://scikit-learn.org>`_ directly).
"""
class SvmClassifier:
def __init__(self, *args, **kwargs):
raise NotImplementedError(__doc__)
| 28.222222 | 61 | 0.73622 | [
"Apache-2.0"
] | LouisJustinTALLOT/nltk | nltk/classify/svm.py | 508 | Python |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
with open('VERSION.py') as f:
exec(f.read())
setup(
name='upsere analysis',
version=__version__,
description='10X Genomics CLI',
long_description=readme,
author='Eddie Belter',
author_email='ebetler@gmail.com',
license=license,
url='https://github.com/ebelter/upserve-analysis.git',
install_requires=[
'click==7.0',
'pyyaml==5.1',
'Jinja2>=2.10.1',
],
entry_points='''
[console_scripts]
upserve=upserve.cli:cli
''',
setup_requires=["pytest-runner"],
tests_require=["pytest"],
packages=find_packages(exclude=('tests', 'docs')),
include_package_data=True,
)
| 22.648649 | 58 | 0.619332 | [
"MIT"
] | ebelter/upserve-analysis | setup.py | 838 | Python |
#!/usr/bin/python2
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import time
class BlacklistedSet(set):
BLACKLIST_TIMEOUT = 60
def __init__(self, items=[], blacklist_timeout=BLACKLIST_TIMEOUT):
self.__dict = {}
self.__blacklist_timeout = blacklist_timeout
for item in items:
set.add(self, item)
def add(self, item):
self.__dict[item] = time.time()
set.add(self, item)
def __contains__(self, item):
return item in self.__dict and time.time() > self.__dict.get(item)
def __iter__(self):
for item in set.__iter__(self):
if time.time() > self.__dict.get(item):
yield item
def get_actual_size(self):
size = 0
for item in self.__iter__():
size += 1
return size
def get_item_at_index(self, index):
i = 0
for item in self.__iter__():
if i == index:
return item
i += 1
return None
def blacklist(self, item):
self.__dict[item] = time.time() + self.__blacklist_timeout
if __name__ == "__main__":
hosts = [1, 2, 3, 4]
bs = BlacklistedSet(hosts)
bs.blacklist(4)
print bs
for a in bs:
print a
time.sleep(2)
bs.blacklist(1)
bs.blacklist(5)
for a in bs:
print a
| 25.945946 | 72 | 0.697917 | [
"Apache-2.0"
] | zyclove/ambari | ambari-metrics/ambari-metrics-host-monitoring/src/main/python/core/blacklisted_set.py | 1,920 | Python |
#!/usr/bin/python
################################################################################
# 20de4144-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "20de4144-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
dword = cli.get_reg_dword(r'HKLM:\Software\Policies\Microsoft\EMET\SysSettings', 'ASLR')
# Output Lines
self.output = [r'HKLM:\Software\Policies\Microsoft\EMET\SysSettings', ('ASLR=' + str(dword))]
if dword == 3:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\EMET'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\EMET\SysSettings'")
cli.powershell(r"Set-ItemProperty -path 'HKLM:\Software\Policies\Microsoft\EMET\SysSettings' -name 'ASLR' -value 3 -Type DWord")
| 35.842105 | 136 | 0.586637 | [
"MIT"
] | phnomcobra/PCAT2PY | pcat2py/class/20de4144-5cc5-11e4-af55-00155d01fe08.py | 1,362 | Python |
import _plotly_utils.basevalidators
class SizeyValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="sizey", parent_name="layout.image", **kwargs):
super(SizeyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
role=kwargs.pop("role", "info"),
**kwargs
)
| 34.230769 | 82 | 0.653933 | [
"MIT"
] | 4RCAN3/CryptoTrader | binbot/Lib/site-packages/plotly/validators/layout/image/_sizey.py | 445 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import io
import os
import sys
from setuptools import find_packages, setup
def read_req(req_file):
with open(os.path.join('requirements', req_file)) as req:
return [line.strip() for line in req.readlines() if line.strip() and not line.strip().startswith('#')]
with io.open('README.rst', encoding='utf-8') as readme:
description = readme.read()
requirements = read_req('base.txt')
requirements_validation = read_req('validation.txt')
def find_versions_from_readme(prefix):
for line in description.splitlines():
line = line.strip()
if line.startswith(prefix):
versions = [v.strip() for v in line[len(prefix):].split(',')]
if versions:
return versions
raise RuntimeError("failed to find supported versions list for '{}'".format(prefix))
python_versions = find_versions_from_readme("- **Python**: ")
django_versions = find_versions_from_readme("- **Django**: ")
python_requires = ">=" + python_versions[0]
python_classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 3',
] + ['Programming Language :: Python :: {}'.format(v) for v in python_versions]
django_classifiers = [
'Framework :: Django',
] + ['Framework :: Django :: {}'.format(v) for v in django_versions]
def drf_yasg_setup(**kwargs):
setup(
name='drf-yasg',
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
install_requires=requirements,
extras_require={
'validation': requirements_validation,
},
license='BSD License',
description='Automated generation of real Swagger/OpenAPI 2.0 schemas from Django Rest Framework code.',
long_description=description,
long_description_content_type='text/x-rst',
url='https://github.com/axnsan12/drf-yasg',
author='Cristi V.',
author_email='cristi@cvjd.me',
keywords='drf django django-rest-framework schema swagger openapi codegen swagger-codegen '
'documentation drf-yasg django-rest-swagger drf-openapi',
python_requires=python_requires,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Environment :: Web Environment',
'Topic :: Documentation',
'Topic :: Software Development :: Code Generators',
] + python_classifiers + django_classifiers,
**kwargs
)
try:
# noinspection PyUnresolvedReferences
import setuptools_scm # noqa: F401
drf_yasg_setup(use_scm_version=True)
except (ImportError, LookupError) as e:
if os.getenv('CI', 'false') == 'true' or os.getenv('TRAVIS', 'false') == 'true':
# don't silently fail on travis - we don't want to accidentally push a dummy version to PyPI
raise
err_msg = str(e)
if 'setuptools-scm' in err_msg or 'setuptools_scm' in err_msg:
import time
import traceback
timestamp_ms = int(time.time() * 1000)
timestamp_str = hex(timestamp_ms)[2:].zfill(16)
dummy_version = '1!0.0.0.dev0+noscm.' + timestamp_str
drf_yasg_setup(version=dummy_version)
traceback.print_exc(file=sys.stderr)
print("failed to detect version, package was built with dummy version " + dummy_version, file=sys.stderr)
else:
raise
| 33.747664 | 113 | 0.649405 | [
"BSD-3-Clause"
] | Core00077/drf-yasg | setup.py | 3,611 | Python |
from tourapi.list import TourAPI
from tourapi.config import ServiceKey, MobileOS, MobileApp, Languages
from mysql_config import MysqlHost, MysqlUser, MysqlPass, MysqlDB
import pymysql
import json
def upload_category_codes(codes, language="Kor", level=0, cat1="", cat2="", cat3=""):
global conn, curs
query = """
INSERT INTO category_code (code, cat1, cat2, cat3, level, name_{0}) VALUES (%s, %s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE name_{0}=%s
""".format(language.lower())
for code in codes:
curs.execute(query, (code["code"], cat1, cat2, cat3, level, code["name"], code["name"]))
# print(code["name"], code["code"])
conn.commit()
return
conn = pymysql.connect(host = MysqlHost, user = MysqlUser, password = MysqlPass, db = MysqlDB)
curs = conn.cursor()
for lan in Languages:
language = lan["code"]
api = TourAPI(ServiceKey, language)
# 대분류 카테고리
cat1_codes = api.list_category_code()
upload_category_codes(cat1_codes, language, 1)
for cat1 in cat1_codes:
cat2_codes = api.list_category_code(cat1["code"])
upload_category_codes(cat2_codes, language, 2, cat1["code"])
print(cat2_codes)
for cat2 in cat2_codes:
cat3_codes = api.list_category_code(cat1["code"], cat2["code"])
upload_category_codes(cat3_codes, language, 3, cat1["code"], cat2["code"])
conn.commit()
conn.close() | 30.2 | 101 | 0.693893 | [
"MIT"
] | darkbright/TourAPI | 02_category_code.py | 1,373 | Python |
import os
import shutil
import subprocess
import re
import string
import pathlib
import timeit
import jmhbenchmark
class JHaskellBenchmark(jmhbenchmark.JMHBenchmark):
def __init__(self, name, source_path, compiler_args=None):
if compiler_args is None:
compiler_args = []
source_path = pathlib.Path(source_path)
super().__init__(name, source_path.stem.lower(), source_path.stem.capitalize())
self._source_path = source_path
self._compiler_args = compiler_args.copy()
def __enter__(self):
ret = super().__enter__()
self._output_jar = (self._temp_dir / self._name).with_suffix(".jar")
return ret
def get_run_args(self):
return ["-jar", f"{self._name}.jar"]
def _compile(self):
self._run_jhaskell_compiler()
def _post_compile(self):
self._results["size"] = jmhbenchmark.get_jar_entry_size(
self._output_jar,
[
f"{self._package_name}/{s}.class"
for s in [self._class_name, "Data", "Function", "BoxedData", "HeapObject"]
],
)
return super()._post_compile()
def _get_classpath(self):
return [f"{self._name}.jar"]
def _run_jhaskell_compiler(self, extra_args=None):
if extra_args is None:
extra_args = []
original_dir = pathlib.Path.cwd()
# Build the source program
args = (
[
"compiler-exe",
"--build-dir",
f"{self._temp_dir / 'out'}",
"--output-jar",
str(self._output_jar),
"--output-class",
self._class_name,
"--runtime-file-dir",
str(original_dir.parent / "runtime"),
]
+ self._compiler_args
+ extra_args
+ [f"programs/{self._package_name}.hs"]
)
try:
return subprocess.check_output(args)
except subprocess.CalledProcessError as e:
print(e.stdout.decode())
raise
# For JHaskell, time for each stage of the compiler
def _benchmark_compilation(self, iterations=50):
number = 1
# Record the output of each invocation
outputs = []
def bench_func():
outputs.append(self._run_jhaskell_compiler(["--time-stages"]).decode())
overall_times = timeit.repeat(stmt=bench_func, setup=self._pre_compile, number=number, repeat=iterations)
time_data = []
data_extractor = re.compile(r"(.+): (.+)ms")
for output, overall_time in zip(outputs, overall_times):
cumulative_time = 0
this_run_data = []
for line in output.splitlines():
match = data_extractor.fullmatch(line)
if match is None:
raise RuntimeError("Invalid line from compiler: " + line)
this_time = float(match.group(2))
this_run_data.append((match.group(1), this_time))
cumulative_time += this_time
#this_run_data.append(("Other", overall_time * 1000 - cumulative_time))
time_data.append(this_run_data)
self._results["times"] = time_data
| 32.376238 | 113 | 0.576758 | [
"BSD-3-Clause"
] | hnefatl/dissertation-project | benchmarks/jhaskellbenchmark.py | 3,270 | Python |
import copy
import logging
from collections import defaultdict
import dask.dataframe as dd
import numpy as np
import pandas as pd
from pandas.api.types import is_dtype_equal, is_numeric_dtype
import featuretools.variable_types.variable as vtypes
from featuretools.entityset import deserialize, serialize
from featuretools.entityset.entity import Entity
from featuretools.entityset.relationship import Relationship, RelationshipPath
from featuretools.utils.gen_utils import import_or_raise
pd.options.mode.chained_assignment = None # default='warn'
logger = logging.getLogger('featuretools.entityset')
class EntitySet(object):
"""
Stores all actual data for a entityset
Attributes:
id
entity_dict
relationships
time_type
Properties:
metadata
"""
def __init__(self, id=None, entities=None, relationships=None):
"""Creates EntitySet
Args:
id (str) : Unique identifier to associate with this instance
entities (dict[str -> tuple(pd.DataFrame, str, str, dict[str -> Variable])]): dictionary of
entities. Entries take the format
{entity id -> (dataframe, id column, (time_index), (variable_types), (make_index))}.
Note that time_index, variable_types and make_index are optional.
relationships (list[(str, str, str, str)]): List of relationships
between entities. List items are a tuple with the format
(parent entity id, parent variable, child entity id, child variable).
Example:
.. code-block:: python
entities = {
"cards" : (card_df, "id"),
"transactions" : (transactions_df, "id", "transaction_time")
}
relationships = [("cards", "id", "transactions", "card_id")]
ft.EntitySet("my-entity-set", entities, relationships)
"""
self.id = id
self.entity_dict = {}
self.relationships = []
self.time_type = None
entities = entities or {}
relationships = relationships or []
for entity in entities:
df = entities[entity][0]
index_column = entities[entity][1]
time_index = None
variable_types = None
make_index = None
if len(entities[entity]) > 2:
time_index = entities[entity][2]
if len(entities[entity]) > 3:
variable_types = entities[entity][3]
if len(entities[entity]) > 4:
make_index = entities[entity][4]
self.entity_from_dataframe(entity_id=entity,
dataframe=df,
index=index_column,
time_index=time_index,
variable_types=variable_types,
make_index=make_index)
for relationship in relationships:
parent_variable = self[relationship[0]][relationship[1]]
child_variable = self[relationship[2]][relationship[3]]
self.add_relationship(Relationship(parent_variable,
child_variable))
self.reset_data_description()
def __sizeof__(self):
return sum([entity.__sizeof__() for entity in self.entities])
def __dask_tokenize__(self):
return (EntitySet, serialize.entityset_to_description(self.metadata))
def __eq__(self, other, deep=False):
if len(self.entity_dict) != len(other.entity_dict):
return False
for eid, e in self.entity_dict.items():
if eid not in other.entity_dict:
return False
if not e.__eq__(other[eid], deep=deep):
return False
for r in other.relationships:
if r not in other.relationships:
return False
return True
def __ne__(self, other, deep=False):
return not self.__eq__(other, deep=deep)
def __getitem__(self, entity_id):
"""Get entity instance from entityset
Args:
entity_id (str): Id of entity.
Returns:
:class:`.Entity` : Instance of entity. None if entity doesn't
exist.
"""
if entity_id in self.entity_dict:
return self.entity_dict[entity_id]
name = self.id or "entity set"
raise KeyError('Entity %s does not exist in %s' % (entity_id, name))
@property
def entities(self):
return list(self.entity_dict.values())
@property
def metadata(self):
'''Returns the metadata for this EntitySet. The metadata will be recomputed if it does not exist.'''
if self._data_description is None:
description = serialize.entityset_to_description(self)
self._data_description = deserialize.description_to_entityset(description)
return self._data_description
def reset_data_description(self):
self._data_description = None
def to_pickle(self, path, compression=None, profile_name=None):
'''Write entityset in the pickle format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='pickle', compression=compression, profile_name=profile_name)
return self
def to_parquet(self, path, engine='auto', compression=None, profile_name=None):
'''Write entityset to disk in the parquet format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str): location on disk to write to (will be created as a directory)
engine (str) : Name of the engine to use. Possible values are: {'auto', 'pyarrow', 'fastparquet'}.
compression (str) : Name of the compression to use. Possible values are: {'snappy', 'gzip', 'brotli', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='parquet', engine=engine, compression=compression, profile_name=profile_name)
return self
def to_csv(self, path, sep=',', encoding='utf-8', engine='python', compression=None, profile_name=None):
'''Write entityset to disk in the csv format, location specified by `path`.
Path could be a local path or a S3 path.
If writing to S3 a tar archive of files will be written.
Args:
path (str) : Location on disk to write to (will be created as a directory)
sep (str) : String of length 1. Field delimiter for the output file.
encoding (str) : A string representing the encoding to use in the output file, defaults to 'utf-8'.
engine (str) : Name of the engine to use. Possible values are: {'c', 'python'}.
compression (str) : Name of the compression to use. Possible values are: {'gzip', 'bz2', 'zip', 'xz', None}.
profile_name (str) : Name of AWS profile to use, False to use an anonymous profile, or None.
'''
serialize.write_data_description(self, path, format='csv', index=False, sep=sep, encoding=encoding, engine=engine, compression=compression, profile_name=profile_name)
return self
def to_dictionary(self):
return serialize.entityset_to_description(self)
###########################################################################
# Public getter/setter methods #########################################
###########################################################################
def __repr__(self):
repr_out = u"Entityset: {}\n".format(self.id)
repr_out += u" Entities:"
for e in self.entities:
if e.df.shape:
repr_out += u"\n {} [Rows: {}, Columns: {}]".format(
e.id, e.df.shape[0], e.df.shape[1])
else:
repr_out += u"\n {} [Rows: None, Columns: None]".format(
e.id)
repr_out += "\n Relationships:"
if len(self.relationships) == 0:
repr_out += u"\n No relationships"
for r in self.relationships:
repr_out += u"\n %s.%s -> %s.%s" % \
(r._child_entity_id, r._child_variable_id,
r._parent_entity_id, r._parent_variable_id)
return repr_out
def add_relationships(self, relationships):
"""Add multiple new relationships to a entityset
Args:
relationships (list[Relationship]) : List of new
relationships.
"""
return [self.add_relationship(r) for r in relationships][-1]
def add_relationship(self, relationship):
"""Add a new relationship between entities in the entityset
Args:
relationship (Relationship) : Instance of new
relationship to be added.
"""
if relationship in self.relationships:
logger.warning(
"Not adding duplicate relationship: %s", relationship)
return self
# _operations?
# this is a new pair of entities
child_e = relationship.child_entity
child_v = relationship.child_variable.id
parent_e = relationship.parent_entity
parent_v = relationship.parent_variable.id
if not isinstance(child_e[child_v], vtypes.Id):
child_e.convert_variable_type(variable_id=child_v,
new_type=vtypes.Id,
convert_data=False)
if not isinstance(parent_e[parent_v], vtypes.Index):
parent_e.convert_variable_type(variable_id=parent_v,
new_type=vtypes.Index,
convert_data=False)
# Empty dataframes (as a result of accessing Entity.metadata)
# default to object dtypes for discrete variables, but
# indexes/ids default to ints. In this case, we convert
# the empty column's type to int
if isinstance(child_e.df, pd.DataFrame) and \
(child_e.df.empty and child_e.df[child_v].dtype == object and
is_numeric_dtype(parent_e.df[parent_v])):
child_e.df[child_v] = pd.Series(name=child_v, dtype=np.int64)
parent_dtype = parent_e.df[parent_v].dtype
child_dtype = child_e.df[child_v].dtype
msg = u"Unable to add relationship because {} in {} is Pandas dtype {}"\
u" and {} in {} is Pandas dtype {}."
if not is_dtype_equal(parent_dtype, child_dtype):
raise ValueError(msg.format(parent_v, parent_e.id, parent_dtype,
child_v, child_e.id, child_dtype))
self.relationships.append(relationship)
self.reset_data_description()
return self
###########################################################################
# Relationship access/helper methods ###################################
###########################################################################
def find_forward_paths(self, start_entity_id, goal_entity_id):
"""
Generator which yields all forward paths between a start and goal
entity. Does not include paths which contain cycles.
Args:
start_entity_id (str) : id of entity to start the search from
goal_entity_id (str) : if of entity to find forward path to
See Also:
:func:`BaseEntitySet.find_backward_paths`
"""
for sub_entity_id, path in self._forward_entity_paths(start_entity_id):
if sub_entity_id == goal_entity_id:
yield path
def find_backward_paths(self, start_entity_id, goal_entity_id):
"""
Generator which yields all backward paths between a start and goal
entity. Does not include paths which contain cycles.
Args:
start_entity_id (str) : Id of entity to start the search from.
goal_entity_id (str) : Id of entity to find backward path to.
See Also:
:func:`BaseEntitySet.find_forward_paths`
"""
for path in self.find_forward_paths(goal_entity_id, start_entity_id):
# Reverse path
yield path[::-1]
def _forward_entity_paths(self, start_entity_id, seen_entities=None):
"""
Generator which yields the ids of all entities connected through forward
relationships, and the path taken to each. An entity will be yielded
multiple times if there are multiple paths to it.
Implemented using depth first search.
"""
if seen_entities is None:
seen_entities = set()
if start_entity_id in seen_entities:
return
seen_entities.add(start_entity_id)
yield start_entity_id, []
for relationship in self.get_forward_relationships(start_entity_id):
next_entity = relationship.parent_entity.id
# Copy seen entities for each next node to allow multiple paths (but
# not cycles).
descendants = self._forward_entity_paths(next_entity, seen_entities.copy())
for sub_entity_id, sub_path in descendants:
yield sub_entity_id, [relationship] + sub_path
def get_forward_entities(self, entity_id, deep=False):
"""
Get entities that are in a forward relationship with entity
Args:
entity_id (str): Id entity of entity to search from.
deep (bool): if True, recursively find forward entities.
Yields a tuple of (descendent_id, path from entity_id to descendant).
"""
for relationship in self.get_forward_relationships(entity_id):
parent_eid = relationship.parent_entity.id
direct_path = RelationshipPath([(True, relationship)])
yield parent_eid, direct_path
if deep:
sub_entities = self.get_forward_entities(parent_eid, deep=True)
for sub_eid, path in sub_entities:
yield sub_eid, direct_path + path
def get_backward_entities(self, entity_id, deep=False):
"""
Get entities that are in a backward relationship with entity
Args:
entity_id (str): Id entity of entity to search from.
deep (bool): if True, recursively find backward entities.
Yields a tuple of (descendent_id, path from entity_id to descendant).
"""
for relationship in self.get_backward_relationships(entity_id):
child_eid = relationship.child_entity.id
direct_path = RelationshipPath([(False, relationship)])
yield child_eid, direct_path
if deep:
sub_entities = self.get_backward_entities(child_eid, deep=True)
for sub_eid, path in sub_entities:
yield sub_eid, direct_path + path
def get_forward_relationships(self, entity_id):
"""Get relationships where entity "entity_id" is the child
Args:
entity_id (str): Id of entity to get relationships for.
Returns:
list[:class:`.Relationship`]: List of forward relationships.
"""
return [r for r in self.relationships if r.child_entity.id == entity_id]
def get_backward_relationships(self, entity_id):
"""
get relationships where entity "entity_id" is the parent.
Args:
entity_id (str): Id of entity to get relationships for.
Returns:
list[:class:`.Relationship`]: list of backward relationships
"""
return [r for r in self.relationships if r.parent_entity.id == entity_id]
def has_unique_forward_path(self, start_entity_id, end_entity_id):
"""
Is the forward path from start to end unique?
This will raise if there is no such path.
"""
paths = self.find_forward_paths(start_entity_id, end_entity_id)
next(paths)
second_path = next(paths, None)
return not second_path
###########################################################################
# Entity creation methods ##############################################
###########################################################################
def entity_from_dataframe(self,
entity_id,
dataframe,
index=None,
variable_types=None,
make_index=False,
time_index=None,
secondary_time_index=None,
already_sorted=False):
"""
Load the data for a specified entity from a Pandas DataFrame.
Args:
entity_id (str) : Unique id to associate with this entity.
dataframe (pandas.DataFrame) : Dataframe containing the data.
index (str, optional): Name of the variable used to index the entity.
If None, take the first column.
variable_types (dict[str -> Variable/str], optional):
Keys are of variable ids and values are variable types or type_strings. Used to to
initialize an entity's store.
make_index (bool, optional) : If True, assume index does not
exist as a column in dataframe, and create a new column of that name
using integers. Otherwise, assume index exists.
time_index (str, optional): Name of the variable containing
time data. Type must be in :class:`variables.DateTime` or be
able to be cast to datetime (e.g. str, float, or numeric.)
secondary_time_index (dict[str -> Variable]): Name of variable
containing time data to use a second time index for the entity.
already_sorted (bool, optional) : If True, assumes that input dataframe
is already sorted by time. Defaults to False.
Notes:
Will infer variable types from Pandas dtype
Example:
.. ipython:: python
import featuretools as ft
import pandas as pd
transactions_df = pd.DataFrame({"id": [1, 2, 3, 4, 5, 6],
"session_id": [1, 2, 1, 3, 4, 5],
"amount": [100.40, 20.63, 33.32, 13.12, 67.22, 1.00],
"transaction_time": pd.date_range(start="10:00", periods=6, freq="10s"),
"fraud": [True, False, True, False, True, True]})
es = ft.EntitySet("example")
es.entity_from_dataframe(entity_id="transactions",
index="id",
time_index="transaction_time",
dataframe=transactions_df)
es["transactions"]
es["transactions"].df
"""
variable_types = variable_types or {}
if time_index is not None and time_index == index:
raise ValueError("time_index and index cannot be the same value, %s" % (time_index))
if time_index is None:
for variable, variable_type in variable_types.items():
if variable_type == vtypes.DatetimeTimeIndex:
raise ValueError("DatetimeTimeIndex variable %s must be set using time_index parameter" % (variable))
if len(self.entities) > 0:
if not isinstance(dataframe, type(self.entities[0].df)):
raise ValueError("All entity dataframes must be of the same type. "
"Cannot add entity of type {} to an entityset with existing entities "
"of type {}".format(type(dataframe), type(self.entities[0].df)))
entity = Entity(
entity_id,
dataframe,
self,
variable_types=variable_types,
index=index,
time_index=time_index,
secondary_time_index=secondary_time_index,
already_sorted=already_sorted,
make_index=make_index)
self.entity_dict[entity.id] = entity
self.reset_data_description()
return self
def normalize_entity(self, base_entity_id, new_entity_id, index,
additional_variables=None, copy_variables=None,
make_time_index=None,
make_secondary_time_index=None,
new_entity_time_index=None,
new_entity_secondary_time_index=None):
"""Create a new entity and relationship from unique values of an existing variable.
Args:
base_entity_id (str) : Entity id from which to split.
new_entity_id (str): Id of the new entity.
index (str): Variable in old entity
that will become index of new entity. Relationship
will be created across this variable.
additional_variables (list[str]):
List of variable ids to remove from
base_entity and move to new entity.
copy_variables (list[str]): List of
variable ids to copy from old entity
and move to new entity.
make_time_index (bool or str, optional): Create time index for new entity based
on time index in base_entity, optionally specifying which variable in base_entity
to use for time_index. If specified as True without a specific variable,
uses the primary time index. Defaults to True if base entity has a time index.
make_secondary_time_index (dict[str -> list[str]], optional): Create a secondary time index
from key. Values of dictionary
are the variables to associate with the secondary time index. Only one
secondary time index is allowed. If None, only associate the time index.
new_entity_time_index (str, optional): Rename new entity time index.
new_entity_secondary_time_index (str, optional): Rename new entity secondary time index.
"""
base_entity = self.entity_dict[base_entity_id]
additional_variables = additional_variables or []
copy_variables = copy_variables or []
# Check base entity to make sure time index is valid
if base_entity.time_index is not None:
t_index = base_entity[base_entity.time_index]
if not isinstance(t_index, (vtypes.NumericTimeIndex, vtypes.DatetimeTimeIndex)):
base_error = "Time index '{0}' is not a NumericTimeIndex or DatetimeTimeIndex, but type {1}. Use set_time_index on entity '{2}' to set the time_index."
raise TypeError(base_error.format(base_entity.time_index, type(t_index), str(base_entity.id)))
if not isinstance(additional_variables, list):
raise TypeError("'additional_variables' must be a list, but received type {}"
.format(type(additional_variables)))
if len(additional_variables) != len(set(additional_variables)):
raise ValueError("'additional_variables' contains duplicate variables. All variables must be unique.")
if not isinstance(copy_variables, list):
raise TypeError("'copy_variables' must be a list, but received type {}"
.format(type(copy_variables)))
if len(copy_variables) != len(set(copy_variables)):
raise ValueError("'copy_variables' contains duplicate variables. All variables must be unique.")
for v in additional_variables + copy_variables:
if v == index:
raise ValueError("Not copying {} as both index and variable".format(v))
for v in additional_variables:
if v == base_entity.time_index:
raise ValueError("Not moving {} as it is the base time index variable. Perhaps, move the variable to the copy_variables.".format(v))
if isinstance(make_time_index, str):
if make_time_index not in base_entity.df.columns:
raise ValueError("'make_time_index' must be a variable in the base entity")
elif make_time_index not in additional_variables + copy_variables:
raise ValueError("'make_time_index' must be specified in 'additional_variables' or 'copy_variables'")
if index == base_entity.index:
raise ValueError("'index' must be different from the index column of the base entity")
transfer_types = {}
transfer_types[index] = type(base_entity[index])
for v in additional_variables + copy_variables:
if type(base_entity[v]) == vtypes.DatetimeTimeIndex:
transfer_types[v] = vtypes.Datetime
elif type(base_entity[v]) == vtypes.NumericTimeIndex:
transfer_types[v] = vtypes.Numeric
else:
transfer_types[v] = type(base_entity[v])
# create and add new entity
new_entity_df = self[base_entity_id].df.copy()
if make_time_index is None and base_entity.time_index is not None:
make_time_index = True
if isinstance(make_time_index, str):
# Set the new time index to make_time_index.
base_time_index = make_time_index
new_entity_time_index = make_time_index
already_sorted = (new_entity_time_index == base_entity.time_index)
elif make_time_index:
# Create a new time index based on the base entity time index.
base_time_index = base_entity.time_index
if new_entity_time_index is None:
new_entity_time_index = "first_%s_time" % (base_entity.id)
already_sorted = True
assert base_entity.time_index is not None, \
"Base entity doesn't have time_index defined"
if base_time_index not in [v for v in additional_variables]:
copy_variables.append(base_time_index)
transfer_types[new_entity_time_index] = type(base_entity[base_entity.time_index])
else:
new_entity_time_index = None
already_sorted = False
if new_entity_time_index is not None and new_entity_time_index == index:
raise ValueError("time_index and index cannot be the same value, %s" % (new_entity_time_index))
selected_variables = [index] +\
[v for v in additional_variables] +\
[v for v in copy_variables]
new_entity_df2 = new_entity_df. \
drop_duplicates(index, keep='first')[selected_variables]
if make_time_index:
new_entity_df2 = new_entity_df2.rename(columns={base_time_index: new_entity_time_index})
if make_secondary_time_index:
assert len(make_secondary_time_index) == 1, "Can only provide 1 secondary time index"
secondary_time_index = list(make_secondary_time_index.keys())[0]
secondary_variables = [index, secondary_time_index] + list(make_secondary_time_index.values())[0]
secondary_df = new_entity_df. \
drop_duplicates(index, keep='last')[secondary_variables]
if new_entity_secondary_time_index:
secondary_df = secondary_df.rename(columns={secondary_time_index: new_entity_secondary_time_index})
secondary_time_index = new_entity_secondary_time_index
else:
new_entity_secondary_time_index = secondary_time_index
secondary_df = secondary_df.set_index(index)
new_entity_df = new_entity_df2.join(secondary_df, on=index)
else:
new_entity_df = new_entity_df2
base_entity_index = index
transfer_types[index] = vtypes.Categorical
if make_secondary_time_index:
old_ti_name = list(make_secondary_time_index.keys())[0]
ti_cols = list(make_secondary_time_index.values())[0]
ti_cols = [c if c != old_ti_name else secondary_time_index for c in ti_cols]
make_secondary_time_index = {secondary_time_index: ti_cols}
self.entity_from_dataframe(
new_entity_id,
new_entity_df,
index,
already_sorted=already_sorted,
time_index=new_entity_time_index,
secondary_time_index=make_secondary_time_index,
variable_types=transfer_types)
self.entity_dict[base_entity_id].delete_variables(additional_variables)
new_entity = self.entity_dict[new_entity_id]
base_entity.convert_variable_type(base_entity_index, vtypes.Id, convert_data=False)
self.add_relationship(Relationship(new_entity[index], base_entity[base_entity_index]))
self.reset_data_description()
return self
###########################################################################
# Data wrangling methods ###############################################
###########################################################################
def concat(self, other, inplace=False):
'''Combine entityset with another to create a new entityset with the
combined data of both entitysets.
'''
assert_string = "Entitysets must have the same entities, relationships"\
", and variable_ids"
assert (self.__eq__(other) and
self.relationships == other.relationships), assert_string
for entity in self.entities:
assert entity.id in other.entity_dict, assert_string
assert (len(self[entity.id].variables) ==
len(other[entity.id].variables)), assert_string
other_variable_ids = [o_variable.id for o_variable in
other[entity.id].variables]
assert (all([variable.id in other_variable_ids
for variable in self[entity.id].variables])), assert_string
if inplace:
combined_es = self
else:
combined_es = copy.deepcopy(self)
has_last_time_index = []
for entity in self.entities:
self_df = entity.df
other_df = other[entity.id].df
combined_df = pd.concat([self_df, other_df])
if entity.created_index == entity.index:
columns = [col for col in combined_df.columns if
col != entity.index or col != entity.time_index]
else:
columns = [entity.index]
combined_df.drop_duplicates(columns, inplace=True)
if entity.time_index:
combined_df.sort_values([entity.time_index, entity.index], inplace=True)
else:
combined_df.sort_index(inplace=True)
if (entity.last_time_index is not None or
other[entity.id].last_time_index is not None):
has_last_time_index.append(entity.id)
combined_es[entity.id].update_data(df=combined_df,
recalculate_last_time_indexes=False)
combined_es.add_last_time_indexes(updated_entities=has_last_time_index)
self.reset_data_description()
return combined_es
###########################################################################
# Indexing methods ###############################################
###########################################################################
def add_last_time_indexes(self, updated_entities=None):
"""
Calculates the last time index values for each entity (the last time
an instance or children of that instance were observed). Used when
calculating features using training windows
Args:
updated_entities (list[str]): List of entity ids to update last_time_index for
(will update all parents of those entities as well)
"""
# Generate graph of entities to find leaf entities
children = defaultdict(list) # parent --> child mapping
child_vars = defaultdict(dict)
for r in self.relationships:
children[r.parent_entity.id].append(r.child_entity)
child_vars[r.parent_entity.id][r.child_entity.id] = r.child_variable
updated_entities = updated_entities or []
if updated_entities:
# find parents of updated_entities
parent_queue = updated_entities[:]
parents = set()
while len(parent_queue):
e = parent_queue.pop(0)
if e in parents:
continue
parents.add(e)
for parent_id, _ in self.get_forward_entities(e):
parent_queue.append(parent_id)
queue = [self[p] for p in parents]
to_explore = parents
else:
to_explore = set([e.id for e in self.entities[:]])
queue = self.entities[:]
explored = set()
for e in queue:
e.last_time_index = None
# We will explore children of entities on the queue,
# which may not be in the to_explore set. Therefore,
# we check whether all elements of to_explore are in
# explored, rather than just comparing length
while not to_explore.issubset(explored):
entity = queue.pop(0)
if entity.last_time_index is None:
if entity.time_index is not None:
lti = entity.df[entity.time_index].copy()
if isinstance(entity.df, dd.DataFrame):
# The current Dask implementation doesn't set the index of the dataframe
# to the entity's index, so we have to do it manually here
lti.index = entity.df[entity.index].copy()
else:
lti = entity.df[entity.index].copy()
if isinstance(entity.df, dd.DataFrame):
lti.index = entity.df[entity.index].copy()
lti = lti.apply(lambda x: None)
else:
lti[:] = None
entity.last_time_index = lti
if entity.id in children:
child_entities = children[entity.id]
# if all children not explored, skip for now
if not set([e.id for e in child_entities]).issubset(explored):
# Now there is a possibility that a child entity
# was not explicitly provided in updated_entities,
# and never made it onto the queue. If updated_entities
# is None then we just load all entities onto the queue
# so we didn't need this logic
for e in child_entities:
if e.id not in explored and e.id not in [q.id for q in queue]:
queue.append(e)
queue.append(entity)
continue
# updated last time from all children
for child_e in child_entities:
if child_e.last_time_index is None:
continue
link_var = child_vars[entity.id][child_e.id].id
if isinstance(child_e.last_time_index, dd.Series):
to_join = child_e.df[link_var]
to_join.index = child_e.df[child_e.index]
lti_df = child_e.last_time_index.to_frame(name='last_time').join(
to_join.to_frame(name=entity.index)
)
new_index = lti_df.index.copy()
new_index.name = None
lti_df.index = new_index
lti_df = lti_df.groupby(lti_df[entity.index]).agg('max')
lti_df = entity.last_time_index.to_frame(name='last_time_old').join(lti_df)
else:
lti_df = pd.DataFrame({'last_time': child_e.last_time_index,
entity.index: child_e.df[link_var]})
# sort by time and keep only the most recent
lti_df.sort_values(['last_time', entity.index],
kind="mergesort", inplace=True)
lti_df.drop_duplicates(entity.index,
keep='last',
inplace=True)
lti_df.set_index(entity.index, inplace=True)
lti_df = lti_df.reindex(entity.last_time_index.index)
lti_df['last_time_old'] = entity.last_time_index
if not isinstance(lti_df, dd.DataFrame) and lti_df.empty:
# Pandas errors out if it tries to do fillna and then max on an empty dataframe
lti_df = pd.Series()
else:
lti_df['last_time'] = lti_df['last_time'].astype('datetime64[ns]')
lti_df['last_time_old'] = lti_df['last_time_old'].astype('datetime64[ns]')
lti_df = lti_df.fillna(pd.to_datetime('1800-01-01 00:00')).max(axis=1)
lti_df = lti_df.replace(pd.to_datetime('1800-01-01 00:00'), pd.NaT)
# lti_df = lti_df.apply(lambda x: x.dropna().max(), axis=1)
entity.last_time_index = lti_df
entity.last_time_index.name = 'last_time'
explored.add(entity.id)
self.reset_data_description()
###########################################################################
# Other ###############################################
###########################################################################
def add_interesting_values(self, max_values=5, verbose=False):
"""Find interesting values for categorical variables, to be used to generate "where" clauses
Args:
max_values (int) : Maximum number of values per variable to add.
verbose (bool) : If True, print summary of interesting values found.
Returns:
None
"""
for entity in self.entities:
entity.add_interesting_values(max_values=max_values, verbose=verbose)
self.reset_data_description()
def plot(self, to_file=None):
"""
Create a UML diagram-ish graph of the EntitySet.
Args:
to_file (str, optional) : Path to where the plot should be saved.
If set to None (as by default), the plot will not be saved.
Returns:
graphviz.Digraph : Graph object that can directly be displayed in
Jupyter notebooks.
"""
GRAPHVIZ_ERR_MSG = ('Please install graphviz to plot entity sets.' +
' (See https://docs.featuretools.com/en/stable/getting_started/install.html#installing-graphviz for' +
' details)')
graphviz = import_or_raise("graphviz", GRAPHVIZ_ERR_MSG)
# Try rendering a dummy graph to see if a working backend is installed
try:
graphviz.Digraph().pipe()
except graphviz.backend.ExecutableNotFound:
raise RuntimeError(
"To plot entity sets, a graphviz backend is required.\n" +
"Install the backend using one of the following commands:\n" +
" Mac OS: brew install graphviz\n" +
" Linux (Ubuntu): sudo apt-get install graphviz\n" +
" Windows: conda install python-graphviz\n" +
" For more details visit: https://docs.featuretools.com/en/stable/getting_started/install.html"
)
if to_file:
# Explicitly cast to str in case a Path object was passed in
to_file = str(to_file)
split_path = to_file.split('.')
if len(split_path) < 2:
raise ValueError("Please use a file extension like '.pdf'" +
" so that the format can be inferred")
format = split_path[-1]
valid_formats = graphviz.backend.FORMATS
if format not in valid_formats:
raise ValueError("Unknown format. Make sure your format is" +
" amongst the following: %s" % valid_formats)
else:
format = None
# Initialize a new directed graph
graph = graphviz.Digraph(self.id, format=format,
graph_attr={'splines': 'ortho'})
# Draw entities
for entity in self.entities:
variables_string = '\l'.join([var.id + ' : ' + var.type_string # noqa: W605
for var in entity.variables])
nrows = entity.shape[0]
label = '{%s (%d row%s)|%s\l}' % (entity.id, nrows, 's' * (nrows > 1), variables_string) # noqa: W605
graph.node(entity.id, shape='record', label=label)
# Draw relationships
for rel in self.relationships:
# Display the key only once if is the same for both related entities
if rel._parent_variable_id == rel._child_variable_id:
label = rel._parent_variable_id
else:
label = '%s -> %s' % (rel._parent_variable_id,
rel._child_variable_id)
graph.edge(rel._child_entity_id, rel._parent_entity_id, xlabel=label)
if to_file:
# Graphviz always appends the format to the file name, so we need to
# remove it manually to avoid file names like 'file_name.pdf.pdf'
offset = len(format) + 1 # Add 1 for the dot
output_path = to_file[:-offset]
graph.render(output_path, cleanup=True)
return graph
| 44.426516 | 174 | 0.573924 | [
"BSD-3-Clause"
] | esyyes/featuretools | featuretools/entityset/entityset.py | 43,227 | Python |
from __future__ import unicode_literals
from future.builtins import int, zip
from functools import reduce
from operator import ior, iand
from string import punctuation
from django.core.exceptions import ImproperlyConfigured
from django.db.models import Manager, Q, CharField, TextField
from django.db.models.loading import get_models
from django.db.models.manager import ManagerDescriptor
from django.db.models.query import QuerySet
from django.contrib.sites.managers import CurrentSiteManager as DjangoCSM
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.utils.models import get_model
from mezzanine.utils.sites import current_site_id
from mezzanine.utils.urls import home_slug
class PublishedManager(Manager):
"""
Provides filter for restricting items returned by status and
publish date when the given user is not a staff member.
"""
def published(self, for_user=None):
"""
For non-staff users, return items with a published status and
whose publish and expiry dates fall before and after the
current date when specified.
"""
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
if for_user is not None and for_user.is_staff:
return self.all()
return self.filter(
Q(publish_date__lte=now()) | Q(publish_date__isnull=True),
Q(expiry_date__gte=now()) | Q(expiry_date__isnull=True),
Q(status=CONTENT_STATUS_PUBLISHED))
def get_by_natural_key(self, slug):
return self.get(slug=slug)
def search_fields_to_dict(fields):
"""
In ``SearchableQuerySet`` and ``SearchableManager``, search fields
can either be a sequence, or a dict of fields mapped to weights.
This function converts sequences to a dict mapped to even weights,
so that we're consistently dealing with a dict of fields mapped to
weights, eg: ("title", "content") -> {"title": 1, "content": 1}
"""
if not fields:
return {}
try:
int(list(dict(fields).values())[0])
except (TypeError, ValueError):
fields = dict(zip(fields, [1] * len(fields)))
return fields
class SearchableQuerySet(QuerySet):
"""
QuerySet providing main search functionality for
``SearchableManager``.
"""
def __init__(self, *args, **kwargs):
self._search_ordered = False
self._search_terms = set()
self._search_fields = kwargs.pop("search_fields", {})
super(SearchableQuerySet, self).__init__(*args, **kwargs)
def search(self, query, search_fields=None):
"""
Build a queryset matching words in the given search query,
treating quoted terms as exact phrases and taking into
account + and - symbols as modifiers controlling which terms
to require and exclude.
"""
# ### DETERMINE FIELDS TO SEARCH ###
# Use search_fields arg if given, otherwise use search_fields
# initially configured by the manager class.
if search_fields:
self._search_fields = search_fields_to_dict(search_fields)
if not self._search_fields:
return self.none()
# ### BUILD LIST OF TERMS TO SEARCH FOR ###
# Remove extra spaces, put modifiers inside quoted terms.
terms = " ".join(query.split()).replace("+ ", "+") \
.replace('+"', '"+') \
.replace("- ", "-") \
.replace('-"', '"-') \
.split('"')
# Strip punctuation other than modifiers from terms and create
# terms list, first from quoted terms and then remaining words.
terms = [("" if t[0:1] not in "+-" else t[0:1]) + t.strip(punctuation)
for t in terms[1::2] + "".join(terms[::2]).split()]
# Remove stop words from terms that aren't quoted or use
# modifiers, since words with these are an explicit part of
# the search query. If doing so ends up with an empty term
# list, then keep the stop words.
terms_no_stopwords = [t for t in terms if t.lower() not in
settings.STOP_WORDS]
get_positive_terms = lambda terms: [t.lower().strip(punctuation)
for t in terms if t[0:1] != "-"]
positive_terms = get_positive_terms(terms_no_stopwords)
if positive_terms:
terms = terms_no_stopwords
else:
positive_terms = get_positive_terms(terms)
# Append positive terms (those without the negative modifier)
# to the internal list for sorting when results are iterated.
if not positive_terms:
return self.none()
else:
self._search_terms.update(positive_terms)
# ### BUILD QUERYSET FILTER ###
# Create the queryset combining each set of terms.
excluded = [reduce(iand, [~Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "-"]
required = [reduce(ior, [Q(**{"%s__icontains" % f: t[1:]}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] == "+"]
optional = [reduce(ior, [Q(**{"%s__icontains" % f: t}) for f in
self._search_fields.keys()]) for t in terms if t[0:1] not in "+-"]
queryset = self
if excluded:
queryset = queryset.filter(reduce(iand, excluded))
if required:
queryset = queryset.filter(reduce(iand, required))
# Optional terms aren't relevant to the filter if there are
# terms that are explicitly required.
elif optional:
queryset = queryset.filter(reduce(ior, optional))
return queryset.distinct()
def _clone(self, *args, **kwargs):
"""
Ensure attributes are copied to subsequent queries.
"""
for attr in ("_search_terms", "_search_fields", "_search_ordered"):
kwargs[attr] = getattr(self, attr)
return super(SearchableQuerySet, self)._clone(*args, **kwargs)
def order_by(self, *field_names):
"""
Mark the filter as being ordered if search has occurred.
"""
if not self._search_ordered:
self._search_ordered = len(self._search_terms) > 0
return super(SearchableQuerySet, self).order_by(*field_names)
def iterator(self):
"""
If search has occurred and no ordering has occurred, decorate
each result with the number of search terms so that it can be
sorted by the number of occurrence of terms.
In the case of search fields that span model relationships, we
cannot accurately match occurrences without some very
complicated traversal code, which we won't attempt. So in this
case, namely when there are no matches for a result (count=0),
and search fields contain relationships (double underscores),
we assume one match for one of the fields, and use the average
weight of all search fields with relationships.
"""
results = super(SearchableQuerySet, self).iterator()
if self._search_terms and not self._search_ordered:
results = list(results)
for i, result in enumerate(results):
count = 0
related_weights = []
for (field, weight) in self._search_fields.items():
if "__" in field:
related_weights.append(weight)
for term in self._search_terms:
field_value = getattr(result, field, None)
if field_value:
count += field_value.lower().count(term) * weight
if not count and related_weights:
count = int(sum(related_weights) / len(related_weights))
results[i].result_count = count
return iter(results)
return results
class SearchableManager(Manager):
"""
Manager providing a chainable queryset.
Adapted from http://www.djangosnippets.org/snippets/562/
search method supports spanning across models that subclass the
model being used to search.
"""
def __init__(self, *args, **kwargs):
self._search_fields = kwargs.pop("search_fields", {})
super(SearchableManager, self).__init__(*args, **kwargs)
def get_search_fields(self):
"""
Returns the search field names mapped to weights as a dict.
Used in ``get_queryset`` below to tell ``SearchableQuerySet``
which search fields to use. Also used by ``DisplayableAdmin``
to populate Django admin's ``search_fields`` attribute.
Search fields can be populated via
``SearchableManager.__init__``, which then get stored in
``SearchableManager._search_fields``, which serves as an
approach for defining an explicit set of fields to be used.
Alternatively and more commonly, ``search_fields`` can be
defined on models themselves. In this case, we look at the
model and all its base classes, and build up the search
fields from all of those, so the search fields are implicitly
built up from the inheritence chain.
Finally if no search fields have been defined at all, we
fall back to any fields that are ``CharField`` or ``TextField``
instances.
"""
search_fields = self._search_fields.copy()
if not search_fields:
for cls in reversed(self.model.__mro__):
super_fields = getattr(cls, "search_fields", {})
search_fields.update(search_fields_to_dict(super_fields))
if not search_fields:
search_fields = []
for f in self.model._meta.fields:
if isinstance(f, (CharField, TextField)):
search_fields.append(f.name)
search_fields = search_fields_to_dict(search_fields)
return search_fields
def get_queryset(self):
search_fields = self.get_search_fields()
return SearchableQuerySet(self.model, search_fields=search_fields)
def contribute_to_class(self, model, name):
"""
Django 1.5 explicitly prevents managers being accessed from
abstract classes, which is behaviour the search API has relied
on for years. Here we reinstate it.
"""
super(SearchableManager, self).contribute_to_class(model, name)
setattr(model, name, ManagerDescriptor(self))
def search(self, *args, **kwargs):
"""
Proxy to queryset's search method for the manager's model and
any models that subclass from this manager's model if the
model is abstract.
"""
if not settings.SEARCH_MODEL_CHOICES:
# No choices defined - build a list of leaf models (those
# without subclasses) that inherit from Displayable.
models = [m for m in get_models() if issubclass(m, self.model)]
parents = reduce(ior, [m._meta.get_parent_list() for m in models])
models = [m for m in models if m not in parents]
elif getattr(self.model._meta, "abstract", False):
# When we're combining model subclasses for an abstract
# model (eg Displayable), we only want to use models that
# are represented by the ``SEARCH_MODEL_CHOICES`` setting.
# Now this setting won't contain an exact list of models
# we should use, since it can define superclass models such
# as ``Page``, so we check the parent class list of each
# model when determining whether a model falls within the
# ``SEARCH_MODEL_CHOICES`` setting.
search_choices = set()
models = set()
parents = set()
errors = []
for name in settings.SEARCH_MODEL_CHOICES:
try:
model = get_model(*name.split(".", 1))
except LookupError:
errors.append(name)
else:
search_choices.add(model)
if errors:
raise ImproperlyConfigured("Could not load the model(s) "
"%s defined in the 'SEARCH_MODEL_CHOICES' setting."
% ", ".join(errors))
for model in get_models():
# Model is actually a subclasses of what we're
# searching (eg Displayabale)
is_subclass = issubclass(model, self.model)
# Model satisfies the search choices list - either
# there are no search choices, model is directly in
# search choices, or its parent is.
this_parents = set(model._meta.get_parent_list())
in_choices = not search_choices or model in search_choices
in_choices = in_choices or this_parents & search_choices
if is_subclass and (in_choices or not search_choices):
# Add to models we'll seach. Also maintain a parent
# set, used below for further refinement of models
# list to search.
models.add(model)
parents.update(this_parents)
# Strip out any models that are superclasses of models,
# specifically the Page model which will generally be the
# superclass for all custom content types, since if we
# query the Page model as well, we will get duplicate
# results.
models -= parents
else:
models = [self.model]
all_results = []
user = kwargs.pop("for_user", None)
for model in models:
try:
queryset = model.objects.published(for_user=user)
except AttributeError:
queryset = model.objects.get_queryset()
all_results.extend(queryset.search(*args, **kwargs))
return sorted(all_results, key=lambda r: r.result_count, reverse=True)
class CurrentSiteManager(DjangoCSM):
"""
Extends Django's site manager to first look up site by ID stored in
the request, the session, then domain for the current request
(accessible via threadlocals in ``mezzanine.core.request``), the
environment variable ``MEZZANINE_SITE_ID`` (which can be used by
management commands with the ``--site`` arg, finally falling back
to ``settings.SITE_ID`` if none of those match a site.
"""
def __init__(self, field_name=None, *args, **kwargs):
super(DjangoCSM, self).__init__(*args, **kwargs)
self.__field_name = field_name
self.__is_validated = False
def get_queryset(self):
if not self.__is_validated:
try:
# Django <= 1.6
self._validate_field_name()
except AttributeError:
# Django >= 1.7: will populate "self.__field_name".
self._get_field_name()
lookup = {self.__field_name + "__id__exact": current_site_id()}
return super(DjangoCSM, self).get_queryset().filter(**lookup)
class DisplayableManager(CurrentSiteManager, PublishedManager,
SearchableManager):
"""
Manually combines ``CurrentSiteManager``, ``PublishedManager``
and ``SearchableManager`` for the ``Displayable`` model.
"""
def url_map(self, for_user=None, **kwargs):
"""
Returns a dictionary of urls mapped to Displayable subclass
instances, including a fake homepage instance if none exists.
Used in ``mezzanine.core.sitemaps``.
"""
home = self.model(title=_("Home"))
setattr(home, "get_absolute_url", home_slug)
items = {home.get_absolute_url(): home}
for model in get_models():
if issubclass(model, self.model):
for item in (model.objects.published(for_user=for_user)
.filter(**kwargs)
.exclude(slug__startswith="http://")
.exclude(slug__startswith="https://")):
items[item.get_absolute_url()] = item
return items
| 43.364116 | 78 | 0.61369 | [
"BSD-2-Clause"
] | abendig/mezzanine | mezzanine/core/managers.py | 16,435 | Python |
import pdfplumber
import re
import pandas as pd
from datetime import datetime
import sys
# AUTHOR: Simon Rosen
# -----------------------------------
# DEPENDENCIES
# This module requires 'pdfplumber'
#
# Install: pip install pdfplumber
# -----------------------------------
def extract_data(file_path):
pdfp_obj = pdfplumber.open(file_path)
# Helper functions
# text - string you are finding substring in
def get_string_between_2_strings(text, string1, string2):
# print("text: {}\n string1: {}, string2:{}".format("text", string1, string2))
try:
regex_str = string1 + '(.+?)' + string2
# print('regex_str: {}'.format(regex_str))
# all_found = [x.group() for x in re.finditer(regex_str, text)]
all_found = re.search(regex_str, text, re.DOTALL).group(1)
# print(all_found)
except AttributeError:
# no text found between two substrings
# print('Not found')
all_found = [] # apply your error handling
return all_found
# GP data contained in paragraph under following heading
# GAUTENG CONFIRMED COVID-19 CASES DISTRICT BREAKDOWN
# GP cases, recoveries, deaths, contacts traced, people de-isolated & hospitalisations
def get_gp_breakdown_data():
district_pg =0
first_page_txt = pdfp_obj.pages[0].extract_text()
# GAUTENG CONFIRMED COVID-19 CASES DISTRICT BREAKDOWN
heading_txt_1 = "GAUTENG CONFIRMED COVID-19 CASES DISTRICT BREAKDOWN"
heading_txt_2 = "BREAKDOWN PER DISTRICT"
breakdown_txt = get_string_between_2_strings(first_page_txt, heading_txt_1, heading_txt_2)
if len(breakdown_txt)==0:
breakdown_txt = get_string_between_2_strings(pdfp_obj.pages[1].extract_text(), heading_txt_1, heading_txt_2)
district_pg=1
if len(breakdown_txt)==0:
breakdown_txt = get_string_between_2_strings(pdfp_obj.pages[1].extract_text(), "^", heading_txt_2)
district_pg=1
str_list = list(filter(lambda x: False if x == ' ' else True, breakdown_txt.splitlines()))
str_body = "".join(str_list)
sentences = str_body.split('.')
def find_date(text):
return re.search(r'(\d{2}|\d{1}) [a-zA-Z]* \d{4}', text).group(0)
def get_nums(text, exclude_texts=['COVID-19']):
for exclude_text in exclude_texts:
text = text.replace(exclude_text, '')
num_tuples = re.findall(r'(\d{3}|\d{2}|\d{1})( \d{3}|\d{2}|\d{1})*', text)
num_list = [int(x[0] + x[1].replace(' ', '')) for x in num_tuples]
return num_list
date_txt = get_string_between_2_strings(pdfp_obj.pages[0].extract_text(), heading_txt_1, "$")
sentences = "".join(date_txt).split(".")
_gp_covid_stats = {"date": find_date(date_txt)}
# First Sentence
tmp_dict = dict(zip(['cases', 'recoveries', 'deaths'], get_nums(sentences[0])[2:]))
_gp_covid_stats.update(tmp_dict)
# Second Sentence
tmp_dict = dict(zip(['traced', 'de_isolated'], get_nums(sentences[1])[:2]))
_gp_covid_stats.update(tmp_dict)
# Third Sentence
tmp_dict = dict(zip(['hospitalised'], get_nums(sentences[2])))
_gp_covid_stats.update(tmp_dict)
return district_pg, _gp_covid_stats
district_pg, gp_covid_stats = get_gp_breakdown_data()
# DISTRICT BREAKDOWN
def get_district_data():
district_table_list = pdfp_obj.pages[district_pg].extract_tables()[0]
print(type(district_table_list))
dl = []
for i, row in enumerate(district_table_list):
print(i,row)
dl.append(list(filter(lambda x: x != None and len(x) !=0, row)))
dl[-2]=dl[-2]+[0,0,0]
print(dl)
all_list = [[x[i] for x in dl] for i in range(0, len(dl[0]))]
print(all_list,"*******")
gp_breakdown_dict = {curr_list[0]: curr_list[1:] for curr_list in all_list}
gp_breakdown_df = pd.DataFrame.from_dict(gp_breakdown_dict)
print(gp_breakdown_df)
gp_breakdown_df.fillna(0, inplace=True)
gp_breakdown_df.set_index("DISTRICT", inplace=True)
gp_breakdown_df.rename(inplace=True, columns={gp_breakdown_df.columns[0]: "CASES",
gp_breakdown_df.columns[1]: "NEW CASES"})
for i in range(0, 4):
gp_breakdown_df.iloc[:, i] = gp_breakdown_df.iloc[:, i].apply(lambda x: x if type(x)==int else x.replace(' ', ''))
return gp_breakdown_df
gp_district_df = get_district_data()
# ---------------
# SUB-DISTRICTS
# ---------------
def get_extracted_raw_list(page_no):
currPage = pdfp_obj.pages[page_no]
bounding_box = (300, 0, currPage.width, currPage.height)
cropped_page = currPage.crop(bounding_box)
# table_settings = {"vertical_strategy": "text"}
table_settings = {"snap_tolerance": 10, "join_tolerance": 15}
extracted_raw_list = cropped_page.extract_tables(table_settings)[0]
return extracted_raw_list
def get_sub_districts_data(raw_list):
sub_districts_list = []
curr_sub_district = []
prev_sub_district = []
for i in range(1, len(raw_list)):
curr_list = raw_list[i]
if curr_sub_district == [] or not (curr_list[0] == None or curr_list[0] == ''):
# print(prev_sub_district)
if prev_sub_district != []:
sub_districts_list.append(curr_sub_district)
curr_sub_district = curr_list
prev_sub_district = curr_sub_district
# print(curr_sub_district)
if (curr_sub_district[1] == '' and curr_list[1] != '' and curr_list[1] != None):
curr_sub_district[1] = curr_list[1]
if (curr_sub_district[2] == '' and curr_list[2] != '' and curr_list[2] != None):
curr_sub_district[2] = curr_list[2]
if (i == len(raw_list) - 1):
sub_districts_list.append(curr_sub_district)
# Check if first item of list is valid e.g. total and/or recoveries has values
prev_sub_district = sub_districts_list[0]
if (prev_sub_district[1] == '' or prev_sub_district[1] == None) and (prev_sub_district[2] == '' or \
prev_sub_district[2] == None):
sub_districts_list.pop(0)
return sub_districts_list
def get_table_list(page_no):
currPage = pdfp_obj.pages[page_no]
bounding_box = (300, 0, currPage.width, currPage.height)
cropped_page = currPage.crop(bounding_box)
# table_settings = {"vertical_strategy": "text"}
table_settings = {"snap_tolerance": 10, "join_tolerance": 15}
extracted_raw_list = cropped_page.extract_tables(table_settings)[0]
return extracted_raw_list
def get_all_sub_districts(page_start, page_end):
all_sub_districts = []
for i in range(page_start, page_end + 1):
all_sub_districts.extend(get_sub_districts_data(get_table_list(i)))
def remove_spaces(str_no):
if type(str_no)==str:
return str_no.replace(" ", "")
else:
return str_no
all_sub_districts = [[x[0], remove_spaces(x[1]), remove_spaces(x[2])] for x in all_sub_districts]
return all_sub_districts
all_sub_dists = get_all_sub_districts(district_pg+1, district_pg+4)
pdfp_obj.close()
def get_district_map():
# Johannesburg
jhb_dict = dict(zip(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'Unallocated'],
[[x[1], x[2]] for x in all_sub_dists[0:8]]))
# Tshwane
tsh_keys = list(range(1, 8))
tsh_keys.append('Unallocated')
tsh_dict = dict(zip(tsh_keys, [[x[1], x[2]] for x in all_sub_dists[8:16]]))
# Ekurhuleni
eku_keys = "e1 e2 n1 n2 s1 s2 Unallocated".split(" ")
eku_dict = dict(zip(eku_keys, [[x[1], x[2]] for x in all_sub_dists[16:23]]))
# Sedibeng
sed_keys = "Lesedi Emfuleni Midvaal Unallocated".split(" ")
sed_dict = dict(zip(sed_keys, [[x[1], x[2]] for x in all_sub_dists[23:27]]))
# West Rand
wr_keys = "Mogale Rand_West Merafong Unallocated".split(" ")
wr_dict = dict(zip(wr_keys, [[x[1], x[2]] for x in all_sub_dists[27:31]]))
# All Districts
district_map = {
'Johannesburg': jhb_dict,
'Tshwane': tsh_dict,
'Ekurhuleni': eku_dict,
'Sedibeng': sed_dict,
'West Rand': wr_dict
}
return district_map
district_map = get_district_map()
# DATE
curr_date = datetime.strptime(gp_covid_stats['date'], '%d %B %Y')
date_formatted = datetime.strftime(curr_date, '%d-%m-%Y')
date_yyyymmdd = datetime.strftime(curr_date, '%Y%m%d')
# print(gp_covid_stats['date'], date_formatted, date_yyyymmdd)
##############################
# OUT LIST #
# DETERMINES ORDER OF OUTPUT #
##############################
# List later gets converted to formatted string
jhb_districts = [x for x in 'ABCDEFG']+['Unallocated']
tsh_districts = [x for x in range(1,8)]+['Unallocated']
wr_districts=['Mogale',"Rand_West","Merafong","Unallocated"]
out_list = [
# Date
date_yyyymmdd, date_formatted,
# Gauteng Data
gp_covid_stats['cases'], 'Check', 'Check',
gp_covid_stats['recoveries'], gp_covid_stats['deaths'], 'Check','Check',
gp_covid_stats['hospitalised'],
# DISTRICT TOTALS DATA
# ----------------------
# Johannesburg
gp_district_df.loc['Johannesburg']['CASES'],
gp_district_df.loc['Ekurhuleni']['CASES'],
gp_district_df.loc['Tshwane']['CASES'],
gp_district_df.loc['Sedibeng']['CASES'],
gp_district_df.loc['West Rand']['CASES'],
gp_district_df.loc['Unallocated']['CASES'],
' Check',
gp_district_df.loc['Johannesburg']['DEATHS'],
gp_district_df.loc['Ekurhuleni']['DEATHS'],
gp_district_df.loc['Tshwane']['DEATHS'],
gp_district_df.loc['Sedibeng']['DEATHS'],
gp_district_df.loc['West Rand']['DEATHS'],
gp_district_df.loc['Johannesburg']['RECOVERIES'],
gp_district_df.loc['Ekurhuleni']['RECOVERIES'],
gp_district_df.loc['Tshwane']['RECOVERIES'],
gp_district_df.loc['Sedibeng']['RECOVERIES'],
gp_district_df.loc['West Rand']['RECOVERIES'], ' Check', ' Check'] + \
[district_map['Johannesburg'][x][0] for x in jhb_districts]+\
['Check']+\
[district_map['Johannesburg'][x][1] for x in jhb_districts]+\
['Check']+\
[district_map['Tshwane'][x][0] for x in tsh_districts]+\
['Check']+\
[district_map['Tshwane'][x][1] for x in tsh_districts]+\
['Check']+\
[district_map['Ekurhuleni'][x][0] for x in ['e1','e2','n1','n2','s1','s2','Unallocated']]+\
['Check']+\
[district_map['Ekurhuleni'][x][1] for x in ['e1','e2','n1','n2','s1','s2','Unallocated']]+\
['Check']+\
[district_map['Sedibeng'][x][0] for x in ['Lesedi','Emfuleni','Midvaal','Unallocated']]+\
['Check']+\
[district_map['Sedibeng'][x][1] for x in ['Lesedi','Emfuleni','Midvaal','Unallocated']]+\
['Check']+\
[district_map['West Rand'][x][0] for x in wr_districts]+\
[district_map['West Rand'][x][1] for x in wr_districts]+\
['Check']
def list_to_formatted(in_list, delimiter='\t'):
return delimiter.join(map(str, in_list))
out_str = list_to_formatted(out_list)
# return district_map
return out_str
if __name__ == "__main__":
print(extract_data(sys.argv[1]))
| 40.036667 | 126 | 0.590542 | [
"MIT"
] | delenamalan/covid19za | scripts/gp_pdf_extractor.py | 12,011 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=not-callable
import re
import unittest
from unittest import mock
import pytest
from google.cloud.bigquery import DEFAULT_RETRY, DatasetReference, Table, TableReference
from google.cloud.bigquery.dataset import AccessEntry, Dataset, DatasetListItem
from google.cloud.exceptions import NotFound
from parameterized import parameterized
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.bigquery import (
BigQueryCursor,
BigQueryHook,
_api_resource_configs_duplication_check,
_cleanse_time_partitioning,
_split_tablename,
_validate_src_fmt_configs,
_validate_value,
)
PROJECT_ID = "bq-project"
CREDENTIALS = "bq-credentials"
DATASET_ID = "bq_dataset"
TABLE_ID = "bq_table"
PARTITION_ID = "20200101"
VIEW_ID = 'bq_view'
JOB_ID = "1234"
LOCATION = 'europe-north1'
TABLE_REFERENCE_REPR = {
'tableId': TABLE_ID,
'datasetId': DATASET_ID,
'projectId': PROJECT_ID,
}
TABLE_REFERENCE = TableReference.from_api_repr(TABLE_REFERENCE_REPR)
class _BigQueryBaseTestClass(unittest.TestCase):
def setUp(self) -> None:
class MockedBigQueryHook(BigQueryHook):
def _get_credentials_and_project_id(self):
return CREDENTIALS, PROJECT_ID
self.hook = MockedBigQueryHook()
class TestBigQueryHookMethods(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryConnection")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook._authorize")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.build")
def test_bigquery_client_creation(self, mock_build, mock_authorize, mock_bigquery_connection):
result = self.hook.get_conn()
mock_build.assert_called_once_with(
'bigquery', 'v2', http=mock_authorize.return_value, cache_discovery=False
)
mock_bigquery_connection.assert_called_once_with(
service=mock_build.return_value,
project_id=PROJECT_ID,
hook=self.hook,
use_legacy_sql=self.hook.use_legacy_sql,
location=self.hook.location,
num_retries=self.hook.num_retries,
)
assert mock_bigquery_connection.return_value == result
@mock.patch("airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__")
def test_bigquery_bigquery_conn_id_deprecation_warning(
self,
mock_base_hook_init,
):
bigquery_conn_id = "bigquery conn id"
warning_message = (
"The bigquery_conn_id parameter has been deprecated. "
"You should pass the gcp_conn_id parameter."
)
with pytest.warns(DeprecationWarning) as warnings:
BigQueryHook(bigquery_conn_id=bigquery_conn_id)
mock_base_hook_init.assert_called_once_with(
delegate_to=None,
gcp_conn_id='bigquery conn id',
impersonation_chain=None,
)
assert warning_message == str(warnings[0].message)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_location_propagates_properly(self, run_with_config, _):
# TODO: this creates side effect
assert self.hook.location is None
self.hook.run_query(sql='select 1', location='US')
assert run_with_config.call_count == 1
assert self.hook.location == 'US'
def test_bigquery_insert_rows_not_implemented(self):
with pytest.raises(NotImplementedError):
self.hook.insert_rows(table="table", rows=[1, 2])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_exists_true(self, mock_client):
result = self.hook.table_exists(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_exists_false(self, mock_client):
mock_client.return_value.get_table.side_effect = NotFound("Dataset not found")
result = self.hook.table_exists(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_true(self, mock_client):
mock_client.return_value.list_partitions.return_value = [PARTITION_ID]
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_false_no_table(self, mock_client):
mock_client.return_value.get_table.side_effect = NotFound("Dataset not found")
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_bigquery_table_partition_exists_false_no_partition(self, mock_client):
mock_client.return_value.list_partitions.return_value = []
result = self.hook.table_partition_exists(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, partition_id=PARTITION_ID
)
mock_client.return_value.list_partitions.assert_called_once_with(TABLE_REFERENCE)
mock_client.assert_called_once_with(project_id=PROJECT_ID)
assert result is False
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.read_gbq')
def test_get_pandas_df(self, mock_read_gbq):
self.hook.get_pandas_df('select 1')
mock_read_gbq.assert_called_once_with(
'select 1', credentials=CREDENTIALS, dialect='legacy', project_id=PROJECT_ID, verbose=False
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_invalid_schema_update_options(self, mock_get_service):
with pytest.raises(
Exception,
match=(
r"\['THIS IS NOT VALID'\] contains invalid schema update options."
r"Please only use one or more of the following options: "
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'\]"
),
):
self.hook.run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=["THIS IS NOT VALID"],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_invalid_schema_update_and_write_disposition(self, mock_get_service):
with pytest.raises(
Exception,
match="schema_update_options is only allowed if"
" write_disposition is 'WRITE_APPEND' or 'WRITE_TRUNCATE'.",
):
self.hook.run_load(
"test.test",
"test_schema.json",
["test_data.json"],
schema_update_options=['ALLOW_FIELD_ADDITION'],
write_disposition='WRITE_EMPTY',
)
@mock.patch(
"airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete",
side_effect=[False, True],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_cancel_queries(self, mock_client, mock_poll_job_complete):
running_job_id = 3
self.hook.running_job_id = running_job_id
self.hook.cancel_query()
mock_poll_job_complete.has_calls(mock.call(running_job_id), mock.call(running_job_id))
mock_client.assert_called_once_with(project_id=PROJECT_ID, location=None)
mock_client.return_value.cancel_job.assert_called_once_with(job_id=running_job_id)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_sql_dialect_default(
self,
mock_insert,
_,
):
self.hook.run_query('query')
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['useLegacySql'] is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_sql_dialect(self, mock_insert, _):
self.hook.run_query('query', use_legacy_sql=False)
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['useLegacySql'] is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_sql_dialect_legacy_with_query_params(self, mock_insert, _):
params = [
{
'name': "param_name",
'parameterType': {'type': "STRING"},
'parameterValue': {'value': "param_value"},
}
]
self.hook.run_query('query', use_legacy_sql=False, query_params=params)
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['useLegacySql'] is False
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_run_query_sql_dialect_legacy_with_query_params_fails(self, _):
params = [
{
'name': "param_name",
'parameterType': {'type': "STRING"},
'parameterValue': {'value': "param_value"},
}
]
with pytest.raises(ValueError, match="Query parameters are not allowed when using legacy SQL"):
self.hook.run_query('query', use_legacy_sql=True, query_params=params)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_run_query_without_sql_fails(self, _):
with pytest.raises(
TypeError, match=r"`BigQueryBaseCursor.run_query` missing 1 required positional argument: `sql`"
):
self.hook.run_query(sql=None)
@parameterized.expand(
[
(['ALLOW_FIELD_ADDITION'], 'WRITE_APPEND'),
(['ALLOW_FIELD_RELAXATION'], 'WRITE_APPEND'),
(['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'], 'WRITE_APPEND'),
(['ALLOW_FIELD_ADDITION'], 'WRITE_TRUNCATE'),
(['ALLOW_FIELD_RELAXATION'], 'WRITE_TRUNCATE'),
(['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'], 'WRITE_TRUNCATE'),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_schema_update_options(
self,
schema_update_options,
write_disposition,
mock_insert,
mock_get_service,
):
self.hook.run_query(
sql='query',
destination_dataset_table='my_dataset.my_table',
schema_update_options=schema_update_options,
write_disposition=write_disposition,
)
_, kwargs = mock_insert.call_args
assert kwargs['configuration']['query']['schemaUpdateOptions'] == schema_update_options
assert kwargs['configuration']['query']['writeDisposition'] == write_disposition
@parameterized.expand(
[
(
['INCORRECT_OPTION'],
None,
r"\['INCORRECT_OPTION'\] contains invalid schema update options\. "
r"Please only use one or more of the following options: "
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'\]",
),
(
['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION', 'INCORRECT_OPTION'],
None,
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION', 'INCORRECT_OPTION'\] contains invalid "
r"schema update options\. Please only use one or more of the following options: "
r"\['ALLOW_FIELD_ADDITION', 'ALLOW_FIELD_RELAXATION'\]",
),
(
['ALLOW_FIELD_ADDITION'],
None,
r"schema_update_options is only allowed if write_disposition is "
r"'WRITE_APPEND' or 'WRITE_TRUNCATE'",
),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_run_query_schema_update_options_incorrect(
self,
schema_update_options,
write_disposition,
expected_regex,
mock_get_service,
):
with pytest.raises(ValueError, match=expected_regex):
self.hook.run_query(
sql='query',
destination_dataset_table='my_dataset.my_table',
schema_update_options=schema_update_options,
write_disposition=write_disposition,
)
@parameterized.expand([(True,), (False,)])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_api_resource_configs(
self,
bool_val,
mock_insert,
_,
):
self.hook.run_query('query', api_resource_configs={'query': {'useQueryCache': bool_val}})
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['useQueryCache'] is bool_val
assert kwargs["configuration"]['query']['useLegacySql'] is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_api_resource_configs_duplication_warning(self, mock_get_service):
with pytest.raises(
ValueError,
match=(
r"Values of useLegacySql param are duplicated\. api_resource_configs "
r"contained useLegacySql param in `query` config and useLegacySql was "
r"also provided with arg to run_query\(\) method\. Please remove duplicates\."
),
):
self.hook.run_query(
'query', use_legacy_sql=True, api_resource_configs={'query': {'useLegacySql': False}}
)
def test_validate_value(self):
with pytest.raises(
TypeError, match="case_1 argument must have a type <class 'dict'> not <class 'str'>"
):
_validate_value("case_1", "a", dict)
assert _validate_value("case_2", 0, int) is None
def test_duplication_check(self):
with pytest.raises(
ValueError,
match=r"Values of key_one param are duplicated. api_resource_configs contained key_one param in"
r" `query` config and key_one was also provided with arg to run_query\(\) method. "
r"Please remove duplicates.",
):
key_one = True
_api_resource_configs_duplication_check("key_one", key_one, {"key_one": False})
assert _api_resource_configs_duplication_check("key_one", key_one, {"key_one": True}) is None
def test_validate_src_fmt_configs(self):
source_format = "test_format"
valid_configs = ["test_config_known", "compatibility_val"]
backward_compatibility_configs = {"compatibility_val": "val"}
with pytest.raises(
ValueError, match="test_config_unknown is not a valid src_fmt_configs for type test_format."
):
# This config should raise a value error.
src_fmt_configs = {"test_config_unknown": "val"}
_validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
src_fmt_configs = {"test_config_known": "val"}
src_fmt_configs = _validate_src_fmt_configs(
source_format, src_fmt_configs, valid_configs, backward_compatibility_configs
)
assert (
"test_config_known" in src_fmt_configs
), "src_fmt_configs should contain al known src_fmt_configs"
assert (
"compatibility_val" in src_fmt_configs
), "_validate_src_fmt_configs should add backward_compatibility config"
@parameterized.expand([("AVRO",), ("PARQUET",), ("NEWLINE_DELIMITED_JSON",), ("DATASTORE_BACKUP",)])
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_non_csv_as_src_fmt(self, fmt, _):
try:
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
source_uris=[],
source_format=fmt,
autodetect=True,
)
except ValueError:
self.fail("run_load() raised ValueError unexpectedly!")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_extract(self, mock_insert):
source_project_dataset_table = f"{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}"
destination_cloud_storage_uris = ["gs://bucket/file.csv"]
expected_configuration = {
"extract": {
"sourceTable": {
"projectId": PROJECT_ID,
"datasetId": DATASET_ID,
"tableId": TABLE_ID,
},
"compression": "NONE",
"destinationUris": destination_cloud_storage_uris,
"destinationFormat": "CSV",
"fieldDelimiter": ",",
"printHeader": True,
}
}
self.hook.run_extract(
source_project_dataset_table=source_project_dataset_table,
destination_cloud_storage_uris=destination_cloud_storage_uris,
)
mock_insert.assert_called_once_with(configuration=expected_configuration, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.SchemaField")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_list_rows(self, mock_client, mock_schema, mock_table):
self.hook.list_rows(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
max_results=10,
selected_fields=["field_1", "field_2"],
page_token="page123",
start_index=5,
location=LOCATION,
)
mock_table.from_api_repr.assert_called_once_with({"tableReference": TABLE_REFERENCE_REPR})
mock_schema.has_calls([mock.call(x, "") for x in ["field_1", "field_2"]])
mock_client.return_value.list_rows.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
max_results=10,
selected_fields=mock.ANY,
page_token='page123',
start_index=5,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_list_rows_with_empty_selected_fields(self, mock_client, mock_table):
self.hook.list_rows(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
max_results=10,
page_token="page123",
selected_fields=[],
start_index=5,
location=LOCATION,
)
mock_table.from_api_repr.assert_called_once_with({"tableReference": TABLE_REFERENCE_REPR})
mock_client.return_value.list_rows.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
max_results=10,
page_token='page123',
selected_fields=None,
start_index=5,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_run_table_delete(self, mock_client, mock_table):
source_project_dataset_table = f"{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}"
self.hook.run_table_delete(source_project_dataset_table, ignore_if_missing=False)
mock_table.from_string.assert_called_once_with(source_project_dataset_table)
mock_client.return_value.delete_table.assert_called_once_with(
table=mock_table.from_string.return_value, not_found_ok=False
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables")
def test_table_upsert_create_new_table(self, mock_get, mock_create):
table_resource = {"tableReference": {"tableId": TABLE_ID}}
mock_get.return_value = []
self.hook.run_table_upsert(dataset_id=DATASET_ID, table_resource=table_resource)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
mock_create.assert_called_once_with(table_resource=table_resource, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset_tables")
def test_table_upsert_already_exists(self, mock_get, mock_update):
table_resource = {"tableReference": {"tableId": TABLE_ID}}
mock_get.return_value = [{"tableId": TABLE_ID}]
self.hook.run_table_upsert(dataset_id=DATASET_ID, table_resource=table_resource)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
mock_update.assert_called_once_with(table_resource=table_resource)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset")
def test_run_grant_dataset_view_access_granting(self, mock_update, mock_get):
view_table = f"{TABLE_ID}_view"
view_dataset = f"{DATASET_ID}_view"
view_access = AccessEntry(
role=None,
entity_type="view",
entity_id={'projectId': PROJECT_ID, 'datasetId': view_dataset, 'tableId': view_table},
)
dataset = Dataset(DatasetReference.from_string(DATASET_ID, PROJECT_ID))
dataset.access_entries = []
mock_get.return_value = dataset
self.hook.run_grant_dataset_view_access(
source_dataset=DATASET_ID, view_dataset=view_dataset, view_table=view_table
)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
assert view_access in dataset.access_entries
mock_update.assert_called_once_with(
fields=["access"],
dataset_resource=dataset.to_api_repr(),
project_id=PROJECT_ID,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_dataset")
def test_run_grant_dataset_view_access_already_granted(self, mock_update, mock_get):
view_table = f"{TABLE_ID}_view"
view_dataset = f"{DATASET_ID}_view"
view_access = AccessEntry(
role=None,
entity_type="view",
entity_id={'projectId': PROJECT_ID, 'datasetId': view_dataset, 'tableId': view_table},
)
dataset = Dataset(DatasetReference.from_string(DATASET_ID, PROJECT_ID))
dataset.access_entries = [view_access]
mock_get.return_value = dataset
self.hook.run_grant_dataset_view_access(
source_dataset=DATASET_ID, view_dataset=view_dataset, view_table=view_table
)
mock_get.assert_called_once_with(project_id=PROJECT_ID, dataset_id=DATASET_ID)
assert len(mock_update.calls) == 0
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_dataset_tables_list(self, mock_client):
table_list = [
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "a-1"},
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "b-1"},
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "a-2"},
{"projectId": PROJECT_ID, "datasetId": DATASET_ID, "tableId": "b-2"},
]
table_list_response = [Table.from_api_repr({"tableReference": t}) for t in table_list]
mock_client.return_value.list_tables.return_value = table_list_response
dataset_reference = DatasetReference(PROJECT_ID, DATASET_ID)
result = self.hook.get_dataset_tables_list(dataset_id=DATASET_ID, project_id=PROJECT_ID)
mock_client.return_value.list_tables.assert_called_once_with(
dataset=dataset_reference, max_results=None
)
assert table_list == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_poll_job_complete(self, mock_client):
self.hook.poll_job_complete(job_id=JOB_ID, location=LOCATION, project_id=PROJECT_ID)
mock_client.assert_called_once_with(location=LOCATION, project_id=PROJECT_ID)
mock_client.return_value.get_job.assert_called_once_with(job_id=JOB_ID)
mock_client.return_value.get_job.return_value.done.assert_called_once_with(retry=DEFAULT_RETRY)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete")
@mock.patch("logging.Logger.info")
def test_cancel_query_jobs_to_cancel(
self,
mock_logger_info,
poll_job_complete,
):
poll_job_complete.return_value = True
self.hook.running_job_id = JOB_ID
self.hook.cancel_query()
poll_job_complete.assert_called_once_with(job_id=JOB_ID)
mock_logger_info.has_call(mock.call("No running BigQuery jobs to cancel."))
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete")
@mock.patch("time.sleep")
@mock.patch("logging.Logger.info")
def test_cancel_query_cancel_timeout(
self,
mock_logger_info,
mock_sleep,
poll_job_complete,
mock_client,
):
poll_job_complete.side_effect = [False] * 13
self.hook.running_job_id = JOB_ID
self.hook.cancel_query()
mock_client.return_value.cancel_job.assert_called_once_with(job_id=JOB_ID)
assert poll_job_complete.call_count == 13
assert mock_sleep.call_count == 11
mock_logger_info.has_call(
mock.call(
f"Stopping polling due to timeout. Job with id {JOB_ID} "
"has not completed cancel and may or may not finish."
)
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.poll_job_complete")
@mock.patch("time.sleep")
@mock.patch("logging.Logger.info")
def test_cancel_query_cancel_completed(
self,
mock_logger_info,
mock_sleep,
poll_job_complete,
mock_client,
):
poll_job_complete.side_effect = [False] * 12 + [True]
self.hook.running_job_id = JOB_ID
self.hook.cancel_query()
mock_client.return_value.cancel_job.assert_called_once_with(job_id=JOB_ID)
assert poll_job_complete.call_count == 13
assert mock_sleep.call_count == 11
mock_logger_info.has_call(mock.call(f"Job successfully canceled: {PROJECT_ID}, {PROJECT_ID}"))
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_schema(self, mock_client):
table = {
"tableReference": TABLE_REFERENCE_REPR,
"schema": {
"fields": [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
]
},
}
mock_client.return_value.get_table.return_value = Table.from_api_repr(table)
result = self.hook.get_schema(dataset_id=DATASET_ID, table_id=TABLE_ID)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
assert "fields" in result
assert len(result["fields"]) == 2
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema')
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table')
def test_update_table_schema_with_policy_tags(self, mock_update, mock_get_schema):
mock_get_schema.return_value = {
"fields": [
{'name': 'emp_name', 'type': 'STRING', 'mode': 'REQUIRED'},
{
'name': 'salary',
'type': 'INTEGER',
'mode': 'REQUIRED',
'policyTags': {'names': ['sensitive']},
},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'fields': [
{
'name': 'field_1',
'type': 'STRING',
'mode': 'REQUIRED',
'policyTags': {'names': ['sensitive']},
},
],
},
]
}
schema_fields_updates = [
{'name': 'emp_name', 'description': 'Name of employee', 'policyTags': {'names': ['sensitive']}},
{
'name': 'salary',
'description': 'Monthly salary in USD',
'policyTags': {},
},
{
'name': 'subrecord',
'description': 'Some Desc',
'fields': [
{'name': 'field_1', 'description': 'Some nested desc'},
],
},
]
expected_result_schema = {
'fields': [
{
'name': 'emp_name',
'type': 'STRING',
'mode': 'REQUIRED',
'description': 'Name of employee',
'policyTags': {'names': ['sensitive']},
},
{
'name': 'salary',
'type': 'INTEGER',
'mode': 'REQUIRED',
'description': 'Monthly salary in USD',
'policyTags': {},
},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'description': 'Some Desc',
'fields': [
{
'name': 'field_1',
'type': 'STRING',
'mode': 'REQUIRED',
'description': 'Some nested desc',
'policyTags': {'names': ['sensitive']},
}
],
},
]
}
self.hook.update_table_schema(
schema_fields_updates=schema_fields_updates,
include_policy_tags=True,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
)
mock_update.assert_called_once_with(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
table_resource={'schema': expected_result_schema},
fields=['schema'],
)
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_schema')
@mock.patch('airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.update_table')
def test_update_table_schema_without_policy_tags(self, mock_update, mock_get_schema):
mock_get_schema.return_value = {
"fields": [
{'name': 'emp_name', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'salary', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'fields': [
{'name': 'field_1', 'type': 'STRING', 'mode': 'REQUIRED'},
],
},
]
}
schema_fields_updates = [
{'name': 'emp_name', 'description': 'Name of employee'},
{
'name': 'salary',
'description': 'Monthly salary in USD',
'policyTags': {'names': ['sensitive']},
},
{
'name': 'subrecord',
'description': 'Some Desc',
'fields': [
{'name': 'field_1', 'description': 'Some nested desc'},
],
},
]
expected_result_schema = {
'fields': [
{'name': 'emp_name', 'type': 'STRING', 'mode': 'REQUIRED', 'description': 'Name of employee'},
{
'name': 'salary',
'type': 'INTEGER',
'mode': 'REQUIRED',
'description': 'Monthly salary in USD',
},
{'name': 'not_changed', 'type': 'INTEGER', 'mode': 'REQUIRED'},
{
'name': 'subrecord',
'type': 'RECORD',
'mode': 'REQUIRED',
'description': 'Some Desc',
'fields': [
{
'name': 'field_1',
'type': 'STRING',
'mode': 'REQUIRED',
'description': 'Some nested desc',
}
],
},
]
}
self.hook.update_table_schema(
schema_fields_updates=schema_fields_updates,
include_policy_tags=False,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
)
mock_update.assert_called_once_with(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
table_resource={'schema': expected_result_schema},
fields=['schema'],
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_invalid_source_format(self, mock_get_service):
with pytest.raises(
Exception,
match=r"JSON is not a valid source format. Please use one of the following types: \['CSV', "
r"'NEWLINE_DELIMITED_JSON', 'AVRO', 'GOOGLE_SHEETS', 'DATASTORE_BACKUP', 'PARQUET'\]",
):
self.hook.run_load("test.test", "test_schema.json", ["test_data.json"], source_format="json")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_insert_all_succeed(self, mock_client):
rows = [{"json": {"a_key": "a_value_0"}}]
self.hook.insert_all(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
rows=rows,
ignore_unknown_values=True,
skip_invalid_rows=True,
)
mock_client.return_value.get_table.assert_called_once_with(TABLE_REFERENCE)
mock_client.return_value.insert_rows.assert_called_once_with(
table=mock_client.return_value.get_table.return_value,
rows=rows,
ignore_unknown_values=True,
skip_invalid_rows=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_insert_all_fail(self, mock_client):
rows = [{"json": {"a_key": "a_value_0"}}]
mock_client.return_value.insert_rows.return_value = ["some", "errors"]
with pytest.raises(AirflowException, match="insert error"):
self.hook.insert_all(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, rows=rows, fail_on_error=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_arg(self, mock_insert):
self.hook.run_query(
sql='select 1',
destination_dataset_table='my_dataset.my_table',
labels={'label1': 'test1', 'label2': 'test2'},
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['labels'] == {'label1': 'test1', 'label2': 'test2'}
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.QueryJob")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_insert_job(self, mock_client, mock_query_job):
job_conf = {
"query": {
"query": "SELECT * FROM test",
"useLegacySql": "False",
}
}
mock_query_job._JOB_TYPE = "query"
self.hook.insert_job(
configuration=job_conf,
job_id=JOB_ID,
project_id=PROJECT_ID,
location=LOCATION,
)
mock_client.assert_called_once_with(
project_id=PROJECT_ID,
location=LOCATION,
)
mock_query_job.from_api_repr.assert_called_once_with(
{
'configuration': job_conf,
'jobReference': {'jobId': JOB_ID, 'projectId': PROJECT_ID, 'location': LOCATION},
},
mock_client.return_value,
)
mock_query_job.from_api_repr.return_value.result.assert_called_once_with()
class TestBigQueryTableSplitter(unittest.TestCase):
def test_internal_need_default_project(self):
with pytest.raises(Exception, match="INTERNAL: No default project is specified"):
_split_tablename("dataset.table", None)
@parameterized.expand(
[
("project", "dataset", "table", "dataset.table"),
("alternative", "dataset", "table", "alternative:dataset.table"),
("alternative", "dataset", "table", "alternative.dataset.table"),
("alt1:alt", "dataset", "table", "alt1:alt.dataset.table"),
("alt1:alt", "dataset", "table", "alt1:alt:dataset.table"),
]
)
def test_split_tablename(self, project_expected, dataset_expected, table_expected, table_input):
default_project_id = "project"
project, dataset, table = _split_tablename(table_input, default_project_id)
assert project_expected == project
assert dataset_expected == dataset
assert table_expected == table
@parameterized.expand(
[
("alt1:alt2:alt3:dataset.table", None, "Use either : or . to specify project got {}"),
(
"alt1.alt.dataset.table",
None,
r"Expect format of \(<project\.\|<project\:\)<dataset>\.<table>, got {}",
),
(
"alt1:alt2:alt.dataset.table",
"var_x",
"Format exception for var_x: Use either : or . to specify project got {}",
),
(
"alt1:alt2:alt:dataset.table",
"var_x",
"Format exception for var_x: Use either : or . to specify project got {}",
),
(
"alt1.alt.dataset.table",
"var_x",
r"Format exception for var_x: Expect format of "
r"\(<project\.\|<project:\)<dataset>.<table>, got {}",
),
]
)
def test_invalid_syntax(self, table_input, var_name, exception_message):
default_project_id = "project"
with pytest.raises(Exception, match=exception_message.format(table_input)):
_split_tablename(table_input, default_project_id, var_name)
class TestTableOperations(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_view(self, mock_bq_client, mock_table):
view = {
'query': 'SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*`',
"useLegacySql": False,
}
self.hook.create_empty_table(
project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID, view=view, retry=DEFAULT_RETRY
)
body = {'tableReference': TABLE_REFERENCE_REPR, 'view': view}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
exists_ok=True,
retry=DEFAULT_RETRY,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_patch_table(self, mock_client, mock_table):
description_patched = 'Test description.'
expiration_time_patched = 2524608000000
friendly_name_patched = 'Test friendly name.'
labels_patched = {'label1': 'test1', 'label2': 'test2'}
schema_patched = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'balance', 'type': 'FLOAT', 'mode': 'NULLABLE'},
{'name': 'new_field', 'type': 'STRING', 'mode': 'NULLABLE'},
]
time_partitioning_patched = {'expirationMs': 10000000}
require_partition_filter_patched = True
view_patched = {
'query': "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
'useLegacySql': False,
}
self.hook.patch_table(
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
description=description_patched,
expiration_time=expiration_time_patched,
friendly_name=friendly_name_patched,
labels=labels_patched,
schema=schema_patched,
time_partitioning=time_partitioning_patched,
require_partition_filter=require_partition_filter_patched,
view=view_patched,
)
body = {
"description": description_patched,
"expirationTime": expiration_time_patched,
"friendlyName": friendly_name_patched,
"labels": labels_patched,
"schema": {"fields": schema_patched},
"timePartitioning": time_partitioning_patched,
"view": view_patched,
"requirePartitionFilter": require_partition_filter_patched,
}
fields = list(body.keys())
body["tableReference"] = TABLE_REFERENCE_REPR
mock_table.from_api_repr.assert_called_once_with(body)
mock_client.return_value.update_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, fields=fields
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_table_succeed(self, mock_bq_client, mock_table):
self.hook.create_empty_table(project_id=PROJECT_ID, dataset_id=DATASET_ID, table_id=TABLE_ID)
body = {
'tableReference': {
'tableId': TABLE_ID,
'projectId': PROJECT_ID,
'datasetId': DATASET_ID,
}
}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, exists_ok=True, retry=DEFAULT_RETRY
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_table_with_extras_succeed(self, mock_bq_client, mock_table):
schema_fields = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'created', 'type': 'DATE', 'mode': 'REQUIRED'},
]
time_partitioning = {"field": "created", "type": "DAY"}
cluster_fields = ['name']
self.hook.create_empty_table(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
schema_fields=schema_fields,
time_partitioning=time_partitioning,
cluster_fields=cluster_fields,
)
body = {
'tableReference': {
'tableId': TABLE_ID,
'projectId': PROJECT_ID,
'datasetId': DATASET_ID,
},
'schema': {'fields': schema_fields},
'timePartitioning': time_partitioning,
'clustering': {'fields': cluster_fields},
}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, exists_ok=True, retry=DEFAULT_RETRY
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_tables_list(self, mock_client):
table_list = [
{
"kind": "bigquery#table",
"id": "your-project:your_dataset.table1",
"tableReference": {
"projectId": "your-project",
"datasetId": "your_dataset",
"tableId": "table1",
},
"type": "TABLE",
"creationTime": "1565781859261",
},
{
"kind": "bigquery#table",
"id": "your-project:your_dataset.table2",
"tableReference": {
"projectId": "your-project",
"datasetId": "your_dataset",
"tableId": "table2",
},
"type": "TABLE",
"creationTime": "1565782713480",
},
]
table_list_response = [Table.from_api_repr(t) for t in table_list]
mock_client.return_value.list_tables.return_value = table_list_response
dataset_reference = DatasetReference(PROJECT_ID, DATASET_ID)
result = self.hook.get_dataset_tables(dataset_id=DATASET_ID, project_id=PROJECT_ID)
mock_client.return_value.list_tables.assert_called_once_with(
dataset=dataset_reference,
max_results=None,
retry=DEFAULT_RETRY,
)
for res, exp in zip(result, table_list):
assert res["tableId"] == exp["tableReference"]["tableId"]
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_materialized_view(self, mock_bq_client, mock_table):
query = """
SELECT product, SUM(amount)
FROM `test-project-id.test_dataset_id.test_table_prefix*`
GROUP BY product
"""
materialized_view = {
'query': query,
'enableRefresh': True,
'refreshIntervalMs': 2000000,
}
self.hook.create_empty_table(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
materialized_view=materialized_view,
retry=DEFAULT_RETRY,
)
body = {'tableReference': TABLE_REFERENCE_REPR, 'materializedView': materialized_view}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
exists_ok=True,
retry=DEFAULT_RETRY,
)
class TestBigQueryCursor(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_execute_with_parameters(self, mock_insert, _):
bq_cursor = self.hook.get_cursor()
bq_cursor.execute("SELECT %(foo)s", {"foo": "bar"})
conf = {
'query': {
'query': "SELECT 'bar'",
'priority': 'INTERACTIVE',
'useLegacySql': True,
'schemaUpdateOptions': [],
}
}
mock_insert.assert_called_once_with(configuration=conf, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_execute_many(self, mock_insert, _):
bq_cursor = self.hook.get_cursor()
bq_cursor.executemany("SELECT %(foo)s", [{"foo": "bar"}, {"foo": "baz"}])
assert mock_insert.call_count == 2
assert mock_insert.has_calls(
mock.call(
configuration={
'query': {
'query': "SELECT 'bar'",
'priority': 'INTERACTIVE',
'useLegacySql': True,
'schemaUpdateOptions': [],
}
},
project_id=PROJECT_ID,
),
mock.call(
configuration={
'query': {
'query': "SELECT 'baz'",
'priority': 'INTERACTIVE',
'useLegacySql': True,
'schemaUpdateOptions': [],
}
},
project_id=PROJECT_ID,
),
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_description(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
with pytest.raises(NotImplementedError):
bq_cursor.description
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_close(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.close() # pylint: disable=assignment-from-no-return
assert result is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_rowcount(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.rowcount
assert -1 == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.next")
def test_fetchone(self, mock_next, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.fetchone()
mock_next.call_count == 1
assert mock_next.return_value == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch(
"airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.fetchone", side_effect=[1, 2, 3, None]
)
def test_fetchall(self, mock_fetchone, mock_get_service):
bq_cursor = self.hook.get_cursor()
result = bq_cursor.fetchall()
assert [1, 2, 3] == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.fetchone")
def test_fetchmany(self, mock_fetchone, mock_get_service):
side_effect_values = [1, 2, 3, None]
bq_cursor = self.hook.get_cursor()
mock_fetchone.side_effect = side_effect_values
result = bq_cursor.fetchmany()
assert [1] == result
mock_fetchone.side_effect = side_effect_values
result = bq_cursor.fetchmany(2)
assert [1, 2] == result
mock_fetchone.side_effect = side_effect_values
result = bq_cursor.fetchmany(5)
assert [1, 2, 3] == result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_next_no_jobid(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = None
result = bq_cursor.next()
assert result is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_next_buffer(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = JOB_ID
bq_cursor.buffer = [1, 2]
result = bq_cursor.next()
assert 1 == result
result = bq_cursor.next()
assert 2 == result
bq_cursor.all_pages_loaded = True
result = bq_cursor.next()
assert result is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_next(self, mock_get_service):
mock_get_query_results = mock_get_service.return_value.jobs.return_value.getQueryResults
mock_execute = mock_get_query_results.return_value.execute
mock_execute.return_value = {
"rows": [
{"f": [{"v": "one"}, {"v": 1}]},
{"f": [{"v": "two"}, {"v": 2}]},
],
"pageToken": None,
"schema": {
"fields": [
{"name": "field_1", "type": "STRING"},
{"name": "field_2", "type": "INTEGER"},
]
},
}
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = JOB_ID
bq_cursor.location = LOCATION
result = bq_cursor.next()
assert ['one', 1] == result
result = bq_cursor.next()
assert ['two', 2] == result
mock_get_query_results.assert_called_once_with(
jobId=JOB_ID, location=LOCATION, pageToken=None, projectId='bq-project'
)
mock_execute.assert_called_once_with(num_retries=bq_cursor.num_retries)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.flush_results")
def test_next_no_rows(self, mock_flush_results, mock_get_service):
mock_get_query_results = mock_get_service.return_value.jobs.return_value.getQueryResults
mock_execute = mock_get_query_results.return_value.execute
mock_execute.return_value = {}
bq_cursor = self.hook.get_cursor()
bq_cursor.job_id = JOB_ID
result = bq_cursor.next()
assert result is None
mock_get_query_results.assert_called_once_with(
jobId=JOB_ID, location=None, pageToken=None, projectId='bq-project'
)
mock_execute.assert_called_once_with(num_retries=bq_cursor.num_retries)
assert mock_flush_results.call_count == 1
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryCursor.flush_results")
def test_flush_cursor_in_execute(self, _, mock_insert, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.execute("SELECT %(foo)s", {"foo": "bar"})
assert mock_insert.call_count == 1
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_flush_cursor(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
bq_cursor.page_token = '456dcea9-fcbf-4f02-b570-83f5297c685e'
bq_cursor.job_id = 'c0a79ae4-0e72-4593-a0d0-7dbbf726f193'
bq_cursor.all_pages_loaded = True
bq_cursor.buffer = [('a', 100, 200), ('b', 200, 300)]
bq_cursor.flush_results()
assert bq_cursor.page_token is None
assert bq_cursor.job_id is None
assert not bq_cursor.all_pages_loaded
assert bq_cursor.buffer == []
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_arraysize(self, mock_get_service):
bq_cursor = self.hook.get_cursor()
assert bq_cursor.buffersize is None
assert bq_cursor.arraysize == 1
bq_cursor.set_arraysize(10)
assert bq_cursor.buffersize == 10
assert bq_cursor.arraysize == 10
class TestDatasetsOperations(_BigQueryBaseTestClass):
def test_create_empty_dataset_no_dataset_id_err(self):
with pytest.raises(ValueError, match=r"Please specify `datasetId`"):
self.hook.create_empty_dataset(dataset_id=None, project_id=None)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_dataset_with_params(self, mock_client, mock_dataset):
self.hook.create_empty_dataset(project_id=PROJECT_ID, dataset_id=DATASET_ID, location=LOCATION)
expected_body = {
"location": LOCATION,
"datasetReference": {"datasetId": DATASET_ID, "projectId": PROJECT_ID},
}
api_repr = mock_dataset.from_api_repr
api_repr.assert_called_once_with(expected_body)
mock_client.return_value.create_dataset.assert_called_once_with(
dataset=api_repr.return_value, exists_ok=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_dataset_with_object(self, mock_client, mock_dataset):
dataset = {
"location": "LOCATION",
"datasetReference": {"datasetId": "DATASET_ID", "projectId": "PROJECT_ID"},
}
self.hook.create_empty_dataset(dataset_reference=dataset)
api_repr = mock_dataset.from_api_repr
api_repr.assert_called_once_with(dataset)
mock_client.return_value.create_dataset.assert_called_once_with(
dataset=api_repr.return_value, exists_ok=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_dataset_use_values_from_object(self, mock_client, mock_dataset):
dataset = {
"location": "LOCATION",
"datasetReference": {"datasetId": "DATASET_ID", "projectId": "PROJECT_ID"},
}
self.hook.create_empty_dataset(
dataset_reference=dataset,
location="Unknown location",
dataset_id="Fashionable Dataset",
project_id="Amazing Project",
)
api_repr = mock_dataset.from_api_repr
api_repr.assert_called_once_with(dataset)
mock_client.return_value.create_dataset.assert_called_once_with(
dataset=api_repr.return_value, exists_ok=True
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_dataset(self, mock_client):
_expected_result = {
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_2_test"},
}
expected_result = Dataset.from_api_repr(_expected_result)
mock_client.return_value.get_dataset.return_value = expected_result
result = self.hook.get_dataset(dataset_id=DATASET_ID, project_id=PROJECT_ID)
mock_client.return_value.get_dataset.assert_called_once_with(
dataset_ref=DatasetReference(PROJECT_ID, DATASET_ID)
)
assert result == expected_result
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_get_datasets_list(self, mock_client):
datasets = [
{
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_2_test"},
},
{
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_1_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_1_test"},
},
]
return_value = [DatasetListItem(d) for d in datasets]
mock_client.return_value.list_datasets.return_value = return_value
result = self.hook.get_datasets_list(project_id=PROJECT_ID)
mock_client.return_value.list_datasets.assert_called_once_with(
project=PROJECT_ID,
include_all=False,
filter=None,
max_results=None,
page_token=None,
retry=DEFAULT_RETRY,
)
for exp, res in zip(datasets, result):
assert res.full_dataset_id == exp["id"]
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_delete_dataset(self, mock_client):
delete_contents = True
self.hook.delete_dataset(
project_id=PROJECT_ID, dataset_id=DATASET_ID, delete_contents=delete_contents
)
mock_client.return_value.delete_dataset.assert_called_once_with(
dataset=DatasetReference(PROJECT_ID, DATASET_ID),
delete_contents=delete_contents,
retry=DEFAULT_RETRY,
not_found_ok=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
def test_patch_dataset(self, mock_get_service):
dataset_resource = {"access": [{"role": "WRITER", "groupByEmail": "cloud-logs@google.com"}]}
method = mock_get_service.return_value.datasets.return_value.patch
self.hook.patch_dataset(
dataset_id=DATASET_ID, project_id=PROJECT_ID, dataset_resource=dataset_resource
)
method.assert_called_once_with(projectId=PROJECT_ID, datasetId=DATASET_ID, body=dataset_resource)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Dataset")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_update_dataset(self, mock_client, mock_dataset):
dataset_resource = {
"kind": "bigquery#dataset",
"location": "US",
"id": "your-project:dataset_2_test",
"datasetReference": {"projectId": "your-project", "datasetId": "dataset_2_test"},
}
method = mock_client.return_value.update_dataset
dataset = Dataset.from_api_repr(dataset_resource)
mock_dataset.from_api_repr.return_value = dataset
method.return_value = dataset
result = self.hook.update_dataset(
dataset_id=DATASET_ID,
project_id=PROJECT_ID,
dataset_resource=dataset_resource,
fields=["location"],
)
mock_dataset.from_api_repr.assert_called_once_with(dataset_resource)
method.assert_called_once_with(
dataset=dataset,
fields=["location"],
retry=DEFAULT_RETRY,
)
assert result == dataset
class TestTimePartitioningInRunJob(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_default(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load'].get('timePartitioning') is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_with_auto_detect(self, mock_insert):
destination_project_dataset_table = "autodetect.table"
self.hook.run_load(destination_project_dataset_table, [], [], autodetect=True)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['autodetect'] is True
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_arg(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table=f"{DATASET_ID}.{TABLE_ID}",
schema_fields=[],
source_uris=[],
time_partitioning={'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
)
configuration = {
'load': {
'autodetect': False,
'createDisposition': 'CREATE_IF_NEEDED',
'destinationTable': {'projectId': PROJECT_ID, 'datasetId': DATASET_ID, 'tableId': TABLE_ID},
'sourceFormat': 'CSV',
'sourceUris': [],
'writeDisposition': 'WRITE_EMPTY',
'ignoreUnknownValues': False,
'timePartitioning': {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
'skipLeadingRows': 0,
'fieldDelimiter': ',',
'quote': None,
'allowQuotedNewlines': False,
'encoding': 'UTF-8',
}
}
mock_insert.assert_called_once_with(configuration=configuration, project_id=PROJECT_ID)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_arg(self, mock_insert):
self.hook.run_query(
sql='select 1',
destination_dataset_table=f"{DATASET_ID}.{TABLE_ID}",
time_partitioning={'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
)
configuration = {
'query': {
'query': 'select 1',
'priority': 'INTERACTIVE',
'useLegacySql': True,
'timePartitioning': {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000},
'schemaUpdateOptions': [],
'destinationTable': {'projectId': PROJECT_ID, 'datasetId': DATASET_ID, 'tableId': TABLE_ID},
'allowLargeResults': False,
'flattenResults': None,
'writeDisposition': 'WRITE_EMPTY',
'createDisposition': 'CREATE_IF_NEEDED',
}
}
mock_insert.assert_called_once_with(configuration=configuration, project_id=PROJECT_ID)
def test_dollar_makes_partition(self):
tp_out = _cleanse_time_partitioning('test.teast$20170101', {})
expect = {'type': 'DAY'}
assert tp_out == expect
def test_extra_time_partitioning_options(self):
tp_out = _cleanse_time_partitioning(
'test.teast', {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
)
expect = {'type': 'DAY', 'field': 'test_field', 'expirationMs': 1000}
assert tp_out == expect
class TestClusteringInRunJob(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_default(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load'].get('clustering') is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_arg(self, mock_insert):
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
cluster_fields=['field1', 'field2'],
time_partitioning={'type': 'DAY'},
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['clustering'] == {'fields': ['field1', 'field2']}
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_default(self, mock_insert):
self.hook.run_query(sql='select 1')
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query'].get('clustering') is None
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_arg(self, mock_insert):
self.hook.run_query(
sql='select 1',
destination_dataset_table='my_dataset.my_table',
cluster_fields=['field1', 'field2'],
time_partitioning={'type': 'DAY'},
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['clustering'] == {'fields': ['field1', 'field2']}
class TestBigQueryHookLegacySql(_BigQueryBaseTestClass):
"""Ensure `use_legacy_sql` param in `BigQueryHook` propagates properly."""
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_hook_uses_legacy_sql_by_default(self, mock_insert, _):
self.hook.get_first('query')
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['useLegacySql'] is True
@mock.patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id',
return_value=(CREDENTIALS, PROJECT_ID),
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_service")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_legacy_sql_override_propagates_properly(
self, mock_insert, mock_get_service, mock_get_creds_and_proj_id
):
bq_hook = BigQueryHook(use_legacy_sql=False)
bq_hook.get_first('query')
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['query']['useLegacySql'] is False
class TestBigQueryHookRunWithConfiguration(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.LoadJob")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.get_client")
def test_run_with_configuration_location(self, mock_client, mock_job):
running_job_id = 'job_vjdi28vskdui2onru23'
location = 'asia-east1'
mock_job._JOB_TYPE = "load"
conf = {"load": {}}
self.hook.running_job_id = running_job_id
self.hook.location = location
self.hook.run_with_configuration(conf)
mock_client.assert_called_once_with(project_id=PROJECT_ID, location=location)
mock_job.from_api_repr.assert_called_once_with(
{
"configuration": conf,
"jobReference": {"jobId": mock.ANY, "projectId": PROJECT_ID, "location": location},
},
mock_client.return_value,
)
class TestBigQueryWithKMS(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_create_empty_table_with_kms(self, mock_bq_client, mock_table):
schema_fields = [{"name": "id", "type": "STRING", "mode": "REQUIRED"}]
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.create_empty_table(
project_id=PROJECT_ID,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
schema_fields=schema_fields,
encryption_configuration=encryption_configuration,
)
body = {
"tableReference": {"tableId": TABLE_ID, 'projectId': PROJECT_ID, 'datasetId': DATASET_ID},
"schema": {"fields": schema_fields},
"encryptionConfiguration": encryption_configuration,
}
mock_table.from_api_repr.assert_called_once_with(body)
mock_bq_client.return_value.create_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value,
exists_ok=True,
retry=DEFAULT_RETRY,
)
# pylint: disable=too-many-locals
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
def test_create_external_table_with_kms(self, mock_create):
external_project_dataset_table = f"{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}"
source_uris = ['test_data.csv']
source_format = 'CSV'
autodetect = False
compression = 'NONE'
ignore_unknown_values = False
max_bad_records = 10
skip_leading_rows = 1
field_delimiter = ','
quote_character = None
allow_quoted_newlines = False
allow_jagged_rows = False
encoding = "UTF-8"
labels = {'label1': 'test1', 'label2': 'test2'}
schema_fields = [{'mode': 'REQUIRED', 'name': 'id', 'type': 'STRING', 'description': None}]
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.create_external_table(
external_project_dataset_table=external_project_dataset_table,
source_uris=source_uris,
source_format=source_format,
autodetect=autodetect,
compression=compression,
ignore_unknown_values=ignore_unknown_values,
max_bad_records=max_bad_records,
skip_leading_rows=skip_leading_rows,
field_delimiter=field_delimiter,
quote_character=quote_character,
allow_jagged_rows=allow_jagged_rows,
encoding=encoding,
allow_quoted_newlines=allow_quoted_newlines,
labels=labels,
schema_fields=schema_fields,
encryption_configuration=encryption_configuration,
)
body = {
'externalDataConfiguration': {
'autodetect': autodetect,
'sourceFormat': source_format,
'sourceUris': source_uris,
'compression': compression,
'ignoreUnknownValues': ignore_unknown_values,
'schema': {'fields': schema_fields},
'maxBadRecords': max_bad_records,
'csvOptions': {
'skipLeadingRows': skip_leading_rows,
'fieldDelimiter': field_delimiter,
'quote': quote_character,
'allowQuotedNewlines': allow_quoted_newlines,
'allowJaggedRows': allow_jagged_rows,
'encoding': encoding,
},
},
'tableReference': {
'projectId': PROJECT_ID,
'datasetId': DATASET_ID,
'tableId': TABLE_ID,
},
'labels': labels,
"encryptionConfiguration": encryption_configuration,
}
mock_create.assert_called_once_with(
table_resource=body,
project_id=PROJECT_ID,
location=None,
exists_ok=True,
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Table")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.Client")
def test_update_table(self, mock_client, mock_table):
description_patched = 'Test description.'
expiration_time_patched = 2524608000000
friendly_name_patched = 'Test friendly name.'
labels_patched = {'label1': 'test1', 'label2': 'test2'}
schema_patched = [
{'name': 'id', 'type': 'STRING', 'mode': 'REQUIRED'},
{'name': 'name', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'balance', 'type': 'FLOAT', 'mode': 'NULLABLE'},
{'name': 'new_field', 'type': 'STRING', 'mode': 'NULLABLE'},
]
time_partitioning_patched = {'expirationMs': 10000000}
require_partition_filter_patched = True
view_patched = {
'query': "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 500",
'useLegacySql': False,
}
body = {
"tableReference": {
"projectId": PROJECT_ID,
"datasetId": DATASET_ID,
"tableId": TABLE_ID,
},
"description": description_patched,
"expirationTime": expiration_time_patched,
"friendlyName": friendly_name_patched,
"labels": labels_patched,
"schema": {"fields": schema_patched},
"timePartitioning": time_partitioning_patched,
"view": view_patched,
"requirePartitionFilter": require_partition_filter_patched,
}
fields = list(body.keys())
self.hook.update_table(
table_resource=body,
fields=fields,
dataset_id=DATASET_ID,
table_id=TABLE_ID,
project_id=PROJECT_ID,
)
mock_table.from_api_repr.assert_called_once_with(body)
mock_client.return_value.update_table.assert_called_once_with(
table=mock_table.from_api_repr.return_value, fields=fields
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_query_with_kms(self, mock_insert):
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.run_query(sql='query', encryption_configuration=encryption_configuration)
_, kwargs = mock_insert.call_args
assert (
kwargs["configuration"]['query']['destinationEncryptionConfiguration'] is encryption_configuration
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_copy_with_kms(self, mock_insert):
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.run_copy(
source_project_dataset_tables='p.d.st',
destination_project_dataset_table='p.d.dt',
encryption_configuration=encryption_configuration,
)
_, kwargs = mock_insert.call_args
assert (
kwargs["configuration"]['copy']['destinationEncryptionConfiguration'] is encryption_configuration
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_with_kms(self, mock_insert):
encryption_configuration = {"kms_key_name": "projects/p/locations/l/keyRings/k/cryptoKeys/c"}
self.hook.run_load(
destination_project_dataset_table='p.d.dt',
source_uris=['abc.csv'],
autodetect=True,
encryption_configuration=encryption_configuration,
)
_, kwargs = mock_insert.call_args
assert (
kwargs["configuration"]['load']['destinationEncryptionConfiguration'] is encryption_configuration
)
class TestBigQueryBaseCursorMethodsDeprecationWarning(unittest.TestCase):
@parameterized.expand(
[
("create_empty_table",),
("create_empty_dataset",),
("get_dataset_tables",),
("delete_dataset",),
("create_external_table",),
("patch_table",),
("insert_all",),
("update_dataset",),
("patch_dataset",),
("get_dataset_tables_list",),
("get_datasets_list",),
("get_dataset",),
("run_grant_dataset_view_access",),
("run_table_upsert",),
("run_table_delete",),
("get_tabledata",),
("get_schema",),
("poll_job_complete",),
("cancel_query",),
("run_with_configuration",),
("run_load",),
("run_copy",),
("run_extract",),
("run_query",),
]
)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook")
def test_deprecation_warning(self, func_name, mock_bq_hook):
args, kwargs = [1], {"param1": "val1"}
new_path = re.escape(f"`airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.{func_name}`")
message_pattern = fr"This method is deprecated\.\s+Please use {new_path}"
message_regex = re.compile(message_pattern, re.MULTILINE)
mocked_func = getattr(mock_bq_hook, func_name)
bq_cursor = BigQueryCursor(mock.MagicMock(), PROJECT_ID, mock_bq_hook)
func = getattr(bq_cursor, func_name)
with pytest.warns(DeprecationWarning, match=message_regex):
_ = func(*args, **kwargs)
mocked_func.assert_called_once_with(*args, **kwargs)
assert re.search(f".*{new_path}.*", func.__doc__)
class TestBigQueryWithLabelsAndDescription(_BigQueryBaseTestClass):
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_labels(self, mock_insert):
labels = {'label1': 'test1', 'label2': 'test2'}
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
labels=labels,
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['destinationTableProperties']['labels'] is labels
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.insert_job")
def test_run_load_description(self, mock_insert):
description = "Test Description"
self.hook.run_load(
destination_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
description=description,
)
_, kwargs = mock_insert.call_args
assert kwargs["configuration"]['load']['destinationTableProperties']['description'] is description
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
def test_create_external_table_labels(self, mock_create):
labels = {'label1': 'test1', 'label2': 'test2'}
self.hook.create_external_table(
external_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
labels=labels,
)
_, kwargs = mock_create.call_args
self.assertDictEqual(kwargs['table_resource']['labels'], labels)
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryHook.create_empty_table")
def test_create_external_table_description(self, mock_create):
description = "Test Description"
self.hook.create_external_table(
external_project_dataset_table='my_dataset.my_table',
schema_fields=[],
source_uris=[],
description=description,
)
_, kwargs = mock_create.call_args
assert kwargs['table_resource']['description'] is description
| 42.402087 | 110 | 0.631674 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | AliMuhammadOfficial/airflow | tests/providers/google/cloud/hooks/test_bigquery.py | 85,313 | Python |
from math import sqrt
import networkx as nx
import matplotlib.pyplot as plt
import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
class Distribution:
from random import random
from random import gauss
from numpy.random import poisson
_h = [0]
h = property(lambda s: s._h[0])
drop_rate = 0
move_rate = 0
move_int = 600
tx_rate = 0
em_rate = 0
aw_rate = lambda s, n: 0
@classmethod
def aloha(cls, k, n):
r = cls.random()
return r
@classmethod
def tx_chn(cls, a, g):
return 0
@classmethod
def tx_awt(cls, a, g):
global awt
fold = sum(p.timeout for p in a.buffer)
return fold + cls.aw_rate(len(b.children))
@classmethod
def emit(cls, k):
return cls.poisson(cls.em_rate*k)
@classmethod
def tx(cls, a, b, g):
return cls.tx_awt(a, b, g) + cls.tx_chn(a, b, g)
@classmethod
def mv(cls):
if cls.random() < cls.move_rate:
return cls.random()*cls.move_int
@classmethod
def drop(cls):
return cls.random() < cls.drop_rate
class Abonent(Distribution):
drop_rate = 1e-8
move_rate = 0
aw_rate = 1.0/1e9
em_rate = property(lambda s: s.h/100.0)
class MobileAbonent(Abonent):
move_rate = 0.5
class Operator(Distribution):
drop_rate = 1e-8
move_rate = 0
aw_rate = 1.0/1e10
em_rate = 0
class Server(Distribution):
drop_rate = 1e-8
move_rate = 0
aw_rate = 1.0/5e9
em_rate = property(lambda s: s.h/100.0)
class WiFi(Distribution):
mu, sigma = 2e-6, 1e-6
drop_rate = 0.005
tx_rate = 0.1
aw_rate = lambda s, n: s.aloha(s.mu, n)
class Fiber(Distribution):
mu, sigma = 2e-8, 1e-8
drop_rate = 1e-12
tx_rate = 10
aw_rate = lambda s, n: s.aloha(s.mu, n)
class Ethernet(Distribution):
mu = 2e-7
drop_rate = 1e-10
tx_rate = property(lambda s: 6 - s.random()*5)
aw_rate = lambda s, n: s.aloha(s.mu, 2)
class LTE(Distribution):
mu, sigma = 2e-7, 1e-7
drop_rate = 1e-10
tx_rate = property(lambda s: 6 - s.random()*5)
aw_rate = lambda s, n: s.gauss(s.mu*n, s.sigma*sqrt(n))
class Node:
def __init__(self, id, g):
self.id = id
self.g = g
def __getattr__(self, key):
return self.g.node[self.id][key]
@property
def buffer(self):
return filter(lambda p: p.curr == self, map(lambda e: e.obj, self.g.events))
class Graph(nx.DiGraph):
c = root = 12007
def iterate(self, r, n, d, node, channel):
for _ in xrange(0, n):
self.c += 1
self.add_node(self.c, deep=d, distr=node)
self.add_edge(r, self.c, distr=channel)
self.add_edge(self.c, r, distr=Ethernet)
yield self.c
def paths(self, a, b):
return self.all_shortest_paths(a.id, b.id)
def __init__(self, deep=5, icount=3, operators=10):
nx.DiGraph.__init__(self)
q = [self.root + i for i in xrange(0, operators)]
self.c += operators - 1
self.deep = deep
for r in q:
self.add_node(r, distr=Operator, deep=0)
if operators > 1:
for u, v in zip(q[1:], q[:-1]):
self.add_edge(u, v, distr=Fiber)
for deep in xrange(1, deep+1):
q, last = [], q
for r in last:
for v in self.iterate(r, icount + 1 if deep == self.deep else icount, deep, Operator, Ethernet):
q.append(v)
@property
def operators(self):
return filter(lambda x: self.node[x]["deep"] != self.deep, self.nodes())
@property
def leaves(self):
return filter(lambda x: self.node[x]["deep"] == self.deep, self.nodes())
def show(self):
print len(self.nodes())
pos = graphviz_layout(self, prog="sfdp", args="")
plt.rcParams["axes.facecolor"] = "black"
nx.draw_networkx_nodes(self, pos, nodelist=self.operators, node_color="gray", node_size=10)
nx.draw_networkx_nodes(self, pos, nodelist=self.leaves, node_color="red", node_size=10)
nx.draw_networkx_edges(self, pos, edge_color="white", arrows=False)
plt.show()
if __name__ == "__main__":
Graph().show() | 24.169492 | 112 | 0.586489 | [
"MIT"
] | stronklab/netpy | netpy/earl/__init__.py | 4,278 | Python |
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"Config": "00_learner.ipynb",
"energy_score": "00_learner.ipynb",
"EnsemblePredict": "00_learner.ipynb",
"EnsembleLearner": "00_learner.ipynb",
"ARCHITECTURES": "01_models.ipynb",
"ENCODERS": "01_models.ipynb",
"get_pretrained_options": "01_models.ipynb",
"create_smp_model": "01_models.ipynb",
"save_smp_model": "01_models.ipynb",
"load_smp_model": "01_models.ipynb",
"show": "02_data.ipynb",
"preprocess_mask": "02_data.ipynb",
"DeformationField": "02_data.ipynb",
"BaseDataset": "02_data.ipynb",
"RandomTileDataset": "02_data.ipynb",
"TileDataset": "02_data.ipynb",
"Dice": "03_metrics.ipynb",
"Iou": "03_metrics.ipynb",
"Recorder.plot_metrics": "03_metrics.ipynb",
"LOSSES": "05_losses.ipynb",
"FastaiLoss": "05_losses.ipynb",
"WeightedLoss": "05_losses.ipynb",
"JointLoss": "05_losses.ipynb",
"get_loss": "05_losses.ipynb",
"unzip": "06_utils.ipynb",
"install_package": "06_utils.ipynb",
"import_package": "06_utils.ipynb",
"compose_albumentations": "06_utils.ipynb",
"ensemble_results": "06_utils.ipynb",
"plot_results": "06_utils.ipynb",
"iou": "06_utils.ipynb",
"label_mask": "06_utils.ipynb",
"get_candidates": "06_utils.ipynb",
"iou_mapping": "06_utils.ipynb",
"calculate_roi_measures": "06_utils.ipynb",
"export_roi_set": "06_utils.ipynb",
"calc_iterations": "06_utils.ipynb",
"get_label_fn": "06_utils.ipynb",
"save_mask": "06_utils.ipynb",
"save_unc": "06_utils.ipynb",
"rot90": "07_tta.ipynb",
"hflip": "07_tta.ipynb",
"vflip": "07_tta.ipynb",
"BaseTransform": "07_tta.ipynb",
"Chain": "07_tta.ipynb",
"Transformer": "07_tta.ipynb",
"Compose": "07_tta.ipynb",
"Merger": "07_tta.ipynb",
"HorizontalFlip": "07_tta.ipynb",
"VerticalFlip": "07_tta.ipynb",
"Rotate90": "07_tta.ipynb",
"GRID_COLS": "08_gui.ipynb",
"set_css_in_cell_output": "08_gui.ipynb",
"tooltip_css": "08_gui.ipynb",
"ZipUpload": "08_gui.ipynb",
"ItemsPerPage": "08_gui.ipynb",
"BaseParamWidget": "08_gui.ipynb",
"BaseUI": "08_gui.ipynb",
"PathSelector": "08_gui.ipynb",
"PathDownloads": "08_gui.ipynb",
"PathConfig": "08_gui.ipynb",
"GTDataSB": "08_gui.ipynb",
"GTEstSB": "08_gui.ipynb",
"GTEstUI": "08_gui.ipynb",
"TrainDataSB": "08_gui.ipynb",
"TrainModelSB": "08_gui.ipynb",
"TrainValidSB": "08_gui.ipynb",
"LRWidget": "08_gui.ipynb",
"BasePopUpParamWidget": "08_gui.ipynb",
"ParamWidget": "08_gui.ipynb",
"MWWidget": "08_gui.ipynb",
"TrainUI": "08_gui.ipynb",
"PredInputSB": "08_gui.ipynb",
"PredSB": "08_gui.ipynb",
"PredUI": "08_gui.ipynb",
"GUI": "08_gui.ipynb",
"import_sitk": "09_gt.ipynb",
"staple": "09_gt.ipynb",
"m_voting": "09_gt.ipynb",
"msk_show": "09_gt.ipynb",
"GTEstimator": "09_gt.ipynb"}
modules = ["learner.py",
"models.py",
"data.py",
"metrics.py",
"losses.py",
"utils.py",
"tta.py",
"gui.py",
"gt.py"]
doc_url = "https://matjesg.github.io/deepflash2/"
git_url = "https://github.com/matjesg/deepflash2/tree/master/"
def custom_doc_links(name): return None
| 37.088235 | 62 | 0.560666 | [
"Apache-2.0"
] | adriHei/deepflash2 | deepflash2/_nbdev.py | 3,783 | Python |
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Group(models.Model):
title = models.CharField(max_length=200)
slug = models.SlugField(unique=True)
description = models.TextField()
class Post(models.Model):
text = models.TextField()
pub_date = models.DateTimeField("date published", auto_now_add=True)
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name="post_author")
group = models.ForeignKey(Group, on_delete=models.CASCADE, blank=True, null=True)
image = models.ImageField(upload_to='posts/', blank=True, null=True)
class Comment(models.Model):
post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name='comments')
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='comments')
text = models.TextField()
created = models.DateTimeField('Дата и время публикации', auto_now_add=True, db_index=True)
def __str__(self):
return self.text
class Follow(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='follower') #тот который подписывается
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='following') #тот на которого подписываются
def __str__(self):
return self.text
| 36.888889 | 119 | 0.743223 | [
"MIT"
] | SergeyKorobenkov/hw05_final | posts/models.py | 1,397 | Python |
import sys
import numpy as np
from numpy.lib import recfunctions as recFunc
from ..frequency_domain.survey import Survey
from ...data import Data as BaseData
from ...utils import mkvc
from .sources import Planewave_xy_1Dprimary, Planewave_xy_1DhomotD
from .receivers import Point3DImpedance, Point3DTipper
from .utils.plot_utils import DataNSEMPlotMethods
#########
# Survey
#########
# class Survey(BaseSurvey):
# """
# Survey class for NSEM.
# **Requried**
# :param list srcList: List of sources associated with the survey
# **Optional**
# """
# srcPair = BaseNSEMSrc
# def __init__(self, srcList, **kwargs):
# # Sort these by frequency
# self.source_list = srcList
# BaseSurvey.__init__(self, **kwargs)
# _freqDict = {}
# for src in srcList:
# if src.freq not in _freqDict:
# _freqDict[src.freq] = []
# _freqDict[src.freq] += [src]
# self._freqDict = _freqDict
# self._freqs = sorted([f for f in self._freqDict])
# @property
# def freqs(self):
# """Frequencies"""
# return self._freqs
# @property
# def nFreq(self):
# """Number of frequencies"""
# return len(self._freqDict)
# def getSrcByFreq(self, freq):
# """Returns the sources associated with a specific frequency."""
# assert freq in self._freqDict, "The requested frequency is not in this survey."
# return self._freqDict[freq]
# def eval(self, f):
# """
# Evalute and return Data given calculated fields
# :param SimPEG.electromagnetics.frequency_domain.fields.FieldsFDEM f: A NSEM fileds object to evaluate data from
# :retype: SimPEG.EM.NSEM.Data
# :return: NSEM Data object
# """
# data = Data(self)
# for src in self.source_list:
# sys.stdout.flush()
# for rx in src.receiver_list:
# data[src, rx] = rx.eval(src, self.mesh, f)
# return data
# def evalDeriv(self, f):
# raise Exception('Use Sources to project fields deriv.')
#########
# Data
#########
class Data(BaseData, DataNSEMPlotMethods):
"""
Data class for NSEMdata. Stores the data vector indexed by the survey.
"""
def __init__(self, survey, dobs=None, relative_error=None, noise_floor=None):
BaseData.__init__(self, survey, dobs, relative_error, noise_floor)
def toRecArray(self, returnType="RealImag"):
"""
Returns a numpy.recarray for a SimpegNSEM impedance data object.
:param returnType: Switches between returning a rec array where the impedance is split to real and imaginary ('RealImag') or is a complex ('Complex')
:type returnType: str, optional
:rtype: numpy.recarray
:return: Record array with data, with indexed columns
"""
# Define the record fields
dtRI = [
("freq", float),
("x", float),
("y", float),
("z", float),
("zxxr", float),
("zxxi", float),
("zxyr", float),
("zxyi", float),
("zyxr", float),
("zyxi", float),
("zyyr", float),
("zyyi", float),
("tzxr", float),
("tzxi", float),
("tzyr", float),
("tzyi", float),
]
dtCP = [
("freq", float),
("x", float),
("y", float),
("z", float),
("zxx", complex),
("zxy", complex),
("zyx", complex),
("zyy", complex),
("tzx", complex),
("tzy", complex),
]
for src in self.survey.source_list:
# Temp array for all the receivers of the source.
# Note: needs to be written more generally,
# using diffterent rxTypes and not all the data at the locations
# Assume the same locs for all RX
locs = src.receiver_list[0].locations
if locs.shape[1] == 1:
locs = np.hstack((np.array([[0.0, 0.0]]), locs))
elif locs.shape[1] == 2:
locs = np.hstack((np.array([[0.0]]), locs))
tArrRec = np.concatenate(
(
src.freq * np.ones((locs.shape[0], 1)),
locs,
np.nan * np.ones((locs.shape[0], 12)),
),
axis=1,
).view(dtRI)
# Get the type and the value for the DataNSEM object as a list
typeList = [
[rx.orientation, rx.component, self[src, rx]]
for rx in src.receiver_list
]
# Insert the values to the temp array
for nr, (k, c, val) in enumerate(typeList):
zt_type = "t" if "z" in k else "z"
key = zt_type + k + c[0]
tArrRec[key] = mkvc(val, 2)
# Masked array
try:
outTemp = recFunc.stack_arrays((outTemp, tArrRec))
except NameError:
outTemp = tArrRec.copy()
if "RealImag" in returnType:
outArr = outTemp.copy()
elif "Complex" in returnType:
# Add the real and imaginary to a complex number
outArr = np.empty(outTemp.shape, dtype=dtCP)
for comp in ["freq", "x", "y", "z"]:
outArr[comp] = outTemp[comp].copy()
for comp in ["zxx", "zxy", "zyx", "zyy", "tzx", "tzy"]:
outArr[comp] = (
outTemp[comp + "r"].copy() + 1j * outTemp[comp + "i"].copy()
)
else:
raise NotImplementedError(
"{:s} is not implemented, as to be RealImag or Complex."
)
# Return
return outArr
@classmethod
def fromRecArray(cls, recArray, srcType="primary"):
"""
Class method that reads in a numpy record array to NSEMdata object.
:param recArray: Record array with the data. Has to have ('freq','x','y','z') columns and some ('zxx','zxy','zyx','zyy','tzx','tzy')
:type recArray: numpy.recarray
:param srcType: The type of SimPEG.EM.NSEM.SrcNSEM to be used
:type srcType: str, optional
"""
if srcType == "primary":
src = Planewave_xy_1Dprimary
elif srcType == "total":
src = Planewave_xy_1DhomotD
else:
raise NotImplementedError("{:s} is not a valid source type for NSEMdata")
# Find all the frequencies in recArray
uniFreq = np.unique(recArray["freq"].copy())
srcList = []
dataList = []
for freq in uniFreq:
# Initiate rxList
rxList = []
# Find that data for freq
dFreq = recArray[recArray["freq"] == freq].copy()
# Find the impedance rxTypes in the recArray.
rxTypes = [
comp
for comp in recArray.dtype.names
if (len(comp) == 4 or len(comp) == 3) and "z" in comp
]
for rxType in rxTypes:
# Find index of not nan values in rxType
notNaNind = ~np.isnan(dFreq[rxType].copy())
if np.any(notNaNind): # Make sure that there is any data to add.
locs = _rec_to_ndarr(dFreq[["x", "y", "z"]][notNaNind].copy())
if dFreq[rxType].dtype.name in "complex128":
if "t" in rxType:
rxList.append(Point3DTipper(locs, rxType[1:3], "real"))
dataList.append(dFreq[rxType][notNaNind].real.copy())
rxList.append(Point3DTipper(locs, rxType[1:3], "imag"))
dataList.append(dFreq[rxType][notNaNind].imag.copy())
elif "z" in rxType:
rxList.append(Point3DImpedance(locs, rxType[1:3], "real"))
dataList.append(dFreq[rxType][notNaNind].real.copy())
rxList.append(Point3DImpedance(locs, rxType[1:3], "imag"))
dataList.append(dFreq[rxType][notNaNind].imag.copy())
else:
component = "real" if "r" in rxType else "imag"
if "z" in rxType:
rxList.append(
Point3DImpedance(locs, rxType[1:3], component)
)
dataList.append(dFreq[rxType][notNaNind].copy())
if "t" in rxType:
rxList.append(Point3DTipper(locs, rxType[1:3], component))
dataList.append(dFreq[rxType][notNaNind].copy())
srcList.append(src(rxList, freq))
# Make a survey
survey = Survey(srcList)
dataVec = np.hstack(dataList)
return cls(survey, dataVec)
def _rec_to_ndarr(rec_arr, data_type=float):
"""
Function to transform a numpy record array to a nd array.
dupe of SimPEG.electromagnetics.natural_source.utils.rec_to_ndarr to avoid circular import
"""
# fix for numpy >= 1.16.0
# https://numpy.org/devdocs/release/1.16.0-notes.html#multi-field-views-return-a-view-instead-of-a-copy
return np.array(recFunc.structured_to_unstructured(recFunc.repack_fields(rec_arr[list(rec_arr.dtype.names)])),
dtype=data_type)
| 36.732824 | 157 | 0.520989 | [
"MIT"
] | JKutt/simpeg | SimPEG/electromagnetics/natural_source/survey.py | 9,624 | Python |
# (C) Copyright 1996-2016 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
# importing Magics module
from Magics.macro import *
ref = 'era'
# Setting of the output file name
output = output(output_formats=['png'],
output_name_first_page_number='off',
output_name=ref)
# Setting the coordinates of the geographical area
projection = mmap(
subpage_x_length=24.,
subpage_upper_right_longitude=50.00,
subpage_upper_right_latitude=65.00,
subpage_lower_left_latitude=25.00,
subpage_lower_left_longitude=-20.0,
subpage_map_projection='cylindrical',
)
# Coastlines setting
coast = mcoast(map_grid='on', map_grid_colour='grey',
map_grid_thickness=2,
map_coastline_colour='RGB(0.4,0.4,0.4)',
map_coastline_thickness=3)
obs = mobs(
obsjson_input_filename = "small.json",
obs_template_file_name = "obs.template",
obs_identification = "on",
obs_size = 0.5,
obs_distance_apart = 0.
)
title = mtext(text_lines=["Observation plotting ..." ],
text_justification='left', text_font_size=0.8,
text_colour='charcoal')
# To the plot
plot(
output,
projection,
obs,
coast,
title,
)
| 25.032787 | 80 | 0.698756 | [
"ECL-2.0",
"Apache-2.0"
] | EduardRosert/magics | regression/era/era.py | 1,527 | Python |
#!/usr/bin/env python3
"""A utility script for automating the beets release process.
"""
import click
import os
import re
import subprocess
from contextlib import contextmanager
import datetime
BASE = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CHANGELOG = os.path.join(BASE, 'docs', 'changelog.rst')
@contextmanager
def chdir(d):
"""A context manager that temporary changes the working directory.
"""
olddir = os.getcwd()
os.chdir(d)
yield
os.chdir(olddir)
@click.group()
def release():
pass
# Locations (filenames and patterns) of the version number.
VERSION_LOCS = [
(
os.path.join(BASE, 'beets', '__init__.py'),
[
(
r'__version__\s*=\s*u[\'"]([0-9\.]+)[\'"]',
"__version__ = '{version}'",
)
]
),
(
os.path.join(BASE, 'docs', 'conf.py'),
[
(
r'version\s*=\s*[\'"]([0-9\.]+)[\'"]',
"version = '{minor}'",
),
(
r'release\s*=\s*[\'"]([0-9\.]+)[\'"]',
"release = '{version}'",
),
]
),
(
os.path.join(BASE, 'setup.py'),
[
(
r'\s*version\s*=\s*[\'"]([0-9\.]+)[\'"]',
" version='{version}',",
)
]
),
]
GITHUB_USER = 'beetbox'
GITHUB_REPO = 'beets'
def bump_version(version):
"""Update the version number in setup.py, docs config, changelog,
and root module.
"""
version_parts = [int(p) for p in version.split('.')]
assert len(version_parts) == 3, "invalid version number"
minor = '{}.{}'.format(*version_parts)
major = '{}'.format(*version_parts)
# Replace the version each place where it lives.
for filename, locations in VERSION_LOCS:
# Read and transform the file.
out_lines = []
with open(filename) as f:
found = False
for line in f:
for pattern, template in locations:
match = re.match(pattern, line)
if match:
# Check that this version is actually newer.
old_version = match.group(1)
old_parts = [int(p) for p in old_version.split('.')]
assert version_parts > old_parts, \
"version must be newer than {}".format(
old_version
)
# Insert the new version.
out_lines.append(template.format(
version=version,
major=major,
minor=minor,
) + '\n')
found = True
break
else:
# Normal line.
out_lines.append(line)
if not found:
print(f"No pattern found in {filename}")
# Write the file back.
with open(filename, 'w') as f:
f.write(''.join(out_lines))
# Generate bits to insert into changelog.
header_line = f'{version} (in development)'
header = '\n\n' + header_line + '\n' + '-' * len(header_line) + '\n\n'
header += 'Changelog goes here!\n'
# Insert into the right place.
with open(CHANGELOG) as f:
contents = f.read()
location = contents.find('\n\n') # First blank line.
contents = contents[:location] + header + contents[location:]
# Write back.
with open(CHANGELOG, 'w') as f:
f.write(contents)
@release.command()
@click.argument('version')
def bump(version):
"""Bump the version number.
"""
bump_version(version)
def get_latest_changelog():
"""Extract the first section of the changelog.
"""
started = False
lines = []
with open(CHANGELOG) as f:
for line in f:
if re.match(r'^--+$', line.strip()):
# Section boundary. Start or end.
if started:
# Remove last line, which is the header of the next
# section.
del lines[-1]
break
else:
started = True
elif started:
lines.append(line)
return ''.join(lines).strip()
def rst2md(text):
"""Use Pandoc to convert text from ReST to Markdown.
"""
pandoc = subprocess.Popen(
['pandoc', '--from=rst', '--to=markdown', '--wrap=none'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, _ = pandoc.communicate(text.encode('utf-8'))
md = stdout.decode('utf-8').strip()
# Fix up odd spacing in lists.
return re.sub(r'^- ', '- ', md, flags=re.M)
def changelog_as_markdown():
"""Get the latest changelog entry as hacked up Markdown.
"""
rst = get_latest_changelog()
# Replace plugin links with plugin names.
rst = re.sub(r':doc:`/plugins/(\w+)`', r'``\1``', rst)
# References with text.
rst = re.sub(r':ref:`([^<]+)(<[^>]+>)`', r'\1', rst)
# Other backslashes with verbatim ranges.
rst = re.sub(r'(\s)`([^`]+)`([^_])', r'\1``\2``\3', rst)
# Command links with command names.
rst = re.sub(r':ref:`(\w+)-cmd`', r'``\1``', rst)
# Bug numbers.
rst = re.sub(r':bug:`(\d+)`', r'#\1', rst)
# Users.
rst = re.sub(r':user:`(\w+)`', r'@\1', rst)
# Convert with Pandoc.
md = rst2md(rst)
# Restore escaped issue numbers.
md = re.sub(r'\\#(\d+)\b', r'#\1', md)
return md
@release.command()
def changelog():
"""Get the most recent version's changelog as Markdown.
"""
print(changelog_as_markdown())
def get_version(index=0):
"""Read the current version from the changelog.
"""
with open(CHANGELOG) as f:
cur_index = 0
for line in f:
match = re.search(r'^\d+\.\d+\.\d+', line)
if match:
if cur_index == index:
return match.group(0)
else:
cur_index += 1
@release.command()
def version():
"""Display the current version.
"""
print(get_version())
@release.command()
def datestamp():
"""Enter today's date as the release date in the changelog.
"""
dt = datetime.datetime.now()
stamp = '({} {}, {})'.format(dt.strftime('%B'), dt.day, dt.year)
marker = '(in development)'
lines = []
underline_length = None
with open(CHANGELOG) as f:
for line in f:
if marker in line:
# The header line.
line = line.replace(marker, stamp)
lines.append(line)
underline_length = len(line.strip())
elif underline_length:
# This is the line after the header. Rewrite the dashes.
lines.append('-' * underline_length + '\n')
underline_length = None
else:
lines.append(line)
with open(CHANGELOG, 'w') as f:
for line in lines:
f.write(line)
@release.command()
def prep():
"""Run all steps to prepare a release.
- Tag the commit.
- Build the sdist package.
- Generate the Markdown changelog to ``changelog.md``.
- Bump the version number to the next version.
"""
cur_version = get_version()
# Tag.
subprocess.check_call(['git', 'tag', f'v{cur_version}'])
# Build.
with chdir(BASE):
subprocess.check_call(['python', 'setup.py', 'sdist'])
# Generate Markdown changelog.
cl = changelog_as_markdown()
with open(os.path.join(BASE, 'changelog.md'), 'w') as f:
f.write(cl)
# Version number bump.
# FIXME It should be possible to specify this as an argument.
version_parts = [int(n) for n in cur_version.split('.')]
version_parts[-1] += 1
next_version = '.'.join(map(str, version_parts))
bump_version(next_version)
@release.command()
def publish():
"""Unleash a release unto the world.
- Push the tag to GitHub.
- Upload to PyPI.
"""
version = get_version(1)
# Push to GitHub.
with chdir(BASE):
subprocess.check_call(['git', 'push'])
subprocess.check_call(['git', 'push', '--tags'])
# Upload to PyPI.
path = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call(['twine', 'upload', path])
@release.command()
def ghrelease():
"""Create a GitHub release using the `github-release` command-line
tool.
Reads the changelog to upload from `changelog.md`. Uploads the
tarball from the `dist` directory.
"""
version = get_version(1)
tag = 'v' + version
# Load the changelog.
with open(os.path.join(BASE, 'changelog.md')) as f:
cl_md = f.read()
# Create the release.
subprocess.check_call([
'github-release', 'release',
'-u', GITHUB_USER, '-r', GITHUB_REPO,
'--tag', tag,
'--name', f'{GITHUB_REPO} {version}',
'--description', cl_md,
])
# Attach the release tarball.
tarball = os.path.join(BASE, 'dist', f'beets-{version}.tar.gz')
subprocess.check_call([
'github-release', 'upload',
'-u', GITHUB_USER, '-r', GITHUB_REPO,
'--tag', tag,
'--name', os.path.basename(tarball),
'--file', tarball,
])
if __name__ == '__main__':
release()
| 26.90678 | 77 | 0.526089 | [
"MIT"
] | DucNg/beets | extra/release.py | 9,525 | Python |
import re
from Error import Error4,Error6,Error9
from DataBase import BaseDatos,BdRow
from .precompilada import precompilada
from typing import Pattern
def getEtiqueta(linea:str)->str:
"""Obtiene el nombre de la captura
Args:
linea (str): Linea donde se va a buscar la etiqueta
Returns:
str: Regresa el nombre de la etiqueta
"""
# Buscamos el mnemonico
pattern='\s+([a-z]{1,5})\s+([a-z]{1,24})'
busqueda=re.search(pattern,linea,re.IGNORECASE)
# Obtenemos el mnemonico-------------------------------
etiqueta =busqueda.group(2)
return etiqueta
def calcularEtiqueta(sustraendo:str,minuendo:str)-> str:
"""Resta la diferencia entre dos PC en hexadecimal
sustraendo - minuendo
- Si
- Sustraendo - minuendo
- En caso de error regresa 'e10' operando muy grande
Args:
sustraendo (str): Ejemplo '0x7'
minuendo (str): Ejemplo '0x1'
Returns:
str: Ejemplo '0x06'
"""
print(sustraendo)
print(minuendo)
sustraendo=int(sustraendo,16)
minuendo=int(minuendo,16)
resultado:int= sustraendo-minuendo
print(resultado)
if resultado <-127 or 128<resultado:
return 'e10' #E10 el salto relativo es muy lejano
# Si es negativa
elif resultado<0:
return convertirA2Hex(resultado)
# si es positiva
else:
return hex(resultado)
def bindigits(n:int, bits:int)->str:
"""Convierte a binario un numero de complemento A2 en caso de negativo, normal en caso de ser positivo
Args:
n (int): E.g 7
bits (int): eg 3
Returns:
str: E.g '001'
"""
s = bin(n & int("1"*bits, 2))[2:]
return ("{0:0>%s}" % (bits)).format(s)
def convertirA2Hex(numero:int)-> str:
"""Convierte un numero decimal a hexadecimal
- Si el número es decimal lo convierte a complemento A2
Args:
numero (int): Número decimal que se quiere convertir Eg. 07
Returns:
str: Eg. 0x07
"""
# cuantos bits ocupa el número hexadecimal
cuantosBits=(len(hex(numero))-2) *4 # el -2 es 0x, el 4 es porque 1 hex equivale a 4 bits
#numero convertido a binario
binario=bindigits(numero,cuantosBits)
return hex(int(binario, 2))
def precompilarPasada1(numLinea:int,modo:str,linea:str,pc: str)->precompilada:
# variables globales
# Buscamos el mnemonico
pattern='\s+([a-z]{1,5})\s+([a-z]{1,24})'
busqueda=re.search(pattern,linea,re.IGNORECASE)
# Obtenemos el mnemonico-------------------------------
mnemonico =busqueda.group(1)
etiqueta=busqueda.group(2)
# Consulta a la base de datos-------------------------------
consultaBd:BdRow = BaseDatos.bdSearch(mnemonico,6)
# obtenemos el Pc Actual=pc + bytesOcupados
pcActual=hex(int(pc,16) +2) # El más 2 es porque todas las relativos usan 2 bytes
# Datos directos--------------------------------------
lineaPrecompilada=precompilada(numLinea,modo,pcActual,consultaBd.opcode,etiqueta,consultaBd.byte)
# Datos detivados-----------------------------------
lineaPrecompilada.bytesOcupados=consultaBd.byte
return lineaPrecompilada
def precompilarPasada2(lineaPrecompilada:precompilada,pcEtiqueta:str)->precompilada:
# obtenemos el Pc Actual=pc + bytesOcupados
pcActual=hex(int(lineaPrecompilada.pcActual,16) ) # El más 2 es porque todas las relativos usan 2 bytes
lineaPrecompilada1:precompilada
# Calculamos el operando
operandoPrecompilado=calcularEtiqueta(pcEtiqueta,pcActual)
# Verificamos si el salto relaitvo no es tan grande
if operandoPrecompilado=='e10': # en caso de error salto muy lejando
lineaPrecompilada1=precompilada(0,'','','','',0)
lineaPrecompilada1.error='e10'
else:
operandoPrecompilado=operandoPrecompilado[2:]
# hacer una copia
lineaPrecompilada1=precompilada(lineaPrecompilada.numLinea,lineaPrecompilada.modo,hex(int(lineaPrecompilada.pcActual,16)-2),lineaPrecompilada.opcode,operandoPrecompilado,lineaPrecompilada.byte)
print(operandoPrecompilado)
return lineaPrecompilada1
#return lineaPrecompilada1 | 29.595745 | 201 | 0.656602 | [
"MIT"
] | EzioFenix/Compilador-M68HC11 | Precompilar/relativo.py | 4,178 | Python |
db.blog_category.ondelete = 'CASCADE'
db.blog.ondelete = 'CASCADE'
db.branch_rating.ondelete = 'CASCADE'
db.branch.ondelete = 'CASCADE'
db.floor.ondelete = 'CASCADE'
db.guest.ondelete = 'CASCADE'
db.news_category.ondelete = 'CASCADE'
db.news.ondelete = 'CASCADE'
db.photo_album.ondelete = 'CASCADE'
db.photo.ondelete = 'CASCADE'
db.room_category.ondelete = 'CASCADE'
db.room_status.ondelete = 'CASCADE'
db.room.ondelete = 'CASCADE'
db.video_category.ondelete = 'CASCADE'
db.video.ondelete = 'CASCADE'
| 31.3125 | 38 | 0.760479 | [
"BSD-3-Clause"
] | wantsomechocolate/WantsomeBeanstalk | web2py-appliances-master/HotelManagementExample/models/db_wizard_ondelete.py | 501 | Python |
# Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from _restobject import RestObject
def ex17_mount_virtual_media_iso(restobj, iso_url, boot_on_next_server_reset):
sys.stdout.write("\nEXAMPLE 17: Mount iLO Virtual Media DVD ISO from URL\n")
instances = restobj.search_for_type("Manager.")
for instance in instances:
rsp = restobj.rest_get(instance["href"])
rsp = restobj.rest_get(rsp.dict["links"]["VirtualMedia"]["href"])
for vmlink in rsp.dict["links"]["Member"]:
response = restobj.rest_get(vmlink["href"])
if response.status == 200 and "DVD" in response.dict["MediaTypes"]:
body = {"Image": iso_url}
if (iso_url is not None and \
boot_on_next_server_reset is not None):
body["Oem"] = {"Hp": {"BootOnNextServerReset": \
boot_on_next_server_reset}}
response = restobj.rest_patch(vmlink["href"], body)
restobj.error_handler(response)
elif response.status != 200:
restobj.error_handler(response)
if __name__ == "__main__":
# When running on the server locally use the following commented values
# iLO_https_url = "blobstore://."
# iLO_account = "None"
# iLO_password = "None"
# When running remotely connect using the iLO secured (https://) address,
# iLO account name, and password to send https requests
# iLO_https_url acceptable examples:
# "https://10.0.0.100"
# "https://f250asha.americas.hpqcorp.net"
iLO_https_url = "https://10.0.0.100"
iLO_account = "admin"
iLO_password = "password"
#Create a REST object
REST_OBJ = RestObject(iLO_https_url, iLO_account, iLO_password)
ex17_mount_virtual_media_iso(REST_OBJ, "http://10.0.0.100/test.iso", True)
| 42.716667 | 81 | 0.630901 | [
"Apache-2.0"
] | HewlettPackard/python-ilorest-library-EOL | examples/Rest/ex17_mount_virtual_media_iso.py | 2,563 | Python |
from gpiozero import Servo
from gpiozero import LED
from time import sleep
from WeatherDataCW import WeatherData
class WeatherDashboard:
servo_pin = 17
led_pin = 14
servoCorrection=0.5
maxPW=(2.0+servoCorrection)/1000
minPW=(1.0-servoCorrection)/1000
def __init__(self, servo_position=0, led_status=0):
self.servo = Servo(self.servo_pin, min_pulse_width=self.minPW, max_pulse_width=self.maxPW)
self.led = LED(self.led_pin)
self.move_servo(servo_position)
self.set_led_status(led_status)
def move_servo(self, servo_position=0):
self.servo.value = self.convert_percentage_to_integer(servo_position)
def turnOffServo(self):
sleep(2)
self.servo.close()
def set_led_status(self, led_status=0):
if(led_status==0):
self.led.off()
elif (led_status==1):
self.led.on()
else:
self.led.blink()
def convert_percentage_to_integer(self, percentage_amount):
#adjust for servos that turn counter clockwise by default
adjusted_percentage_amount = 100 - percentage_amount
return (adjusted_percentage_amount*0.02)-1
if __name__=="__main__":
weather_data = WeatherData('Yekaterinburg')
print("%s %sC %s wind speed %s km/h"
%(weather_data.getCity(),
weather_data.getTemperature(),
weather_data.getWeatherConditions(),
weather_data.getWindSpeed()))
print(weather_data.getServoValue())
print(weather_data.getLEDValue())
weather_dashboard = WeatherDashboard(
weather_data.getServoValue(),
weather_data.getLEDValue())
weather_dashboard.turnOffServo() | 30.672414 | 98 | 0.649241 | [
"MIT"
] | kuzned/rpi_weather | WeatherDashboardCW.py | 1,779 | Python |
# Generated by Django 2.0.3 on 2018-05-28 23:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_task_order'),
]
operations = [
migrations.AlterField(
model_name='task',
name='order',
field=models.IntegerField(blank=True),
),
]
| 19.526316 | 50 | 0.58221 | [
"MIT"
] | rkcf/dailio | backend/api/migrations/0004_auto_20180528_2342.py | 371 | Python |
import datetime
from django.conf import settings
from django.core.cache import cache
from django.db import models
from django.db.models import Sum
import commonware.log
import waffle
import amo
import mkt.constants.comm as comm
from amo.utils import cache_ns_key
from mkt.comm.utils import create_comm_note
from mkt.site.mail import send_mail_jinja
from mkt.site.models import ManagerBase, ModelBase, skip_cache
from mkt.tags.models import Tag
from mkt.translations.fields import save_signal, TranslatedField
from mkt.users.models import UserProfile
from mkt.webapps.indexers import WebappIndexer
from mkt.webapps.models import Webapp
user_log = commonware.log.getLogger('z.users')
QUEUE_TARAKO = 'tarako'
class CannedResponse(ModelBase):
name = TranslatedField()
response = TranslatedField(short=False)
sort_group = models.CharField(max_length=255)
class Meta:
db_table = 'cannedresponses'
def __unicode__(self):
return unicode(self.name)
models.signals.pre_save.connect(save_signal, sender=CannedResponse,
dispatch_uid='cannedresponses_translations')
class EditorSubscription(ModelBase):
user = models.ForeignKey(UserProfile)
addon = models.ForeignKey(Webapp)
class Meta:
db_table = 'editor_subscriptions'
class ReviewerScore(ModelBase):
user = models.ForeignKey(UserProfile, related_name='_reviewer_scores')
addon = models.ForeignKey(Webapp, blank=True, null=True, related_name='+')
score = models.SmallIntegerField()
# For automated point rewards.
note_key = models.SmallIntegerField(choices=amo.REVIEWED_CHOICES.items(),
default=0)
# For manual point rewards with a note.
note = models.CharField(max_length=255, blank=True)
class Meta:
db_table = 'reviewer_scores'
ordering = ('-created',)
@classmethod
def get_key(cls, key=None, invalidate=False):
namespace = 'riscore'
if not key: # Assuming we're invalidating the namespace.
cache_ns_key(namespace, invalidate)
return
else:
# Using cache_ns_key so each cache val is invalidated together.
ns_key = cache_ns_key(namespace, invalidate)
return '%s:%s' % (ns_key, key)
@classmethod
def get_event(cls, addon, status, **kwargs):
"""Return the review event type constant.
This is determined by the app type and the queue the addon is
currently in (which is determined from the status).
Note: We're not using addon.status because this is called after the
status has been updated by the reviewer action.
"""
if addon.is_packaged:
if status in amo.WEBAPPS_APPROVED_STATUSES:
return amo.REVIEWED_WEBAPP_UPDATE
else: # If it's not PUBLIC, assume it's a new submission.
return amo.REVIEWED_WEBAPP_PACKAGED
else: # It's a hosted app.
in_rereview = kwargs.pop('in_rereview', False)
if status in amo.WEBAPPS_APPROVED_STATUSES and in_rereview:
return amo.REVIEWED_WEBAPP_REREVIEW
else:
return amo.REVIEWED_WEBAPP_HOSTED
@classmethod
def award_points(cls, user, addon, status, **kwargs):
"""Awards points to user based on an event and the queue.
`event` is one of the `REVIEWED_` keys in constants.
`status` is one of the `STATUS_` keys in constants.
"""
event = cls.get_event(addon, status, **kwargs)
score = amo.REVIEWED_SCORES.get(event)
if score:
cls.objects.create(user=user, addon=addon, score=score,
note_key=event)
cls.get_key(invalidate=True)
user_log.info(
(u'Awarding %s points to user %s for "%s" for addon %s'
% (score, user, amo.REVIEWED_CHOICES[event], addon.id))
.encode('utf-8'))
return score
@classmethod
def award_moderation_points(cls, user, addon, review_id):
"""Awards points to user based on moderated review."""
event = amo.REVIEWED_APP_REVIEW
score = amo.REVIEWED_SCORES.get(event)
cls.objects.create(user=user, addon=addon, score=score, note_key=event)
cls.get_key(invalidate=True)
user_log.info(
u'Awarding %s points to user %s for "%s" for review %s' % (
score, user, amo.REVIEWED_CHOICES[event], review_id))
@classmethod
def get_total(cls, user):
"""Returns total points by user."""
key = cls.get_key('get_total:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
val = (ReviewerScore.objects.no_cache().filter(user=user)
.aggregate(total=Sum('score'))
.values())[0]
if val is None:
val = 0
cache.set(key, val, None)
return val
@classmethod
def get_recent(cls, user, limit=5):
"""Returns most recent ReviewerScore records."""
key = cls.get_key('get_recent:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
val = ReviewerScore.objects.no_cache().filter(user=user)
val = list(val[:limit])
cache.set(key, val, None)
return val
@classmethod
def get_performance(cls, user):
"""Returns sum of reviewer points."""
key = cls.get_key('get_performance:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
sql = """
SELECT `reviewer_scores`.*,
SUM(`reviewer_scores`.`score`) AS `total`
FROM `reviewer_scores`
LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)
WHERE `reviewer_scores`.`user_id` = %s
ORDER BY `total` DESC
"""
with skip_cache():
val = list(ReviewerScore.objects.raw(sql, [user.id]))
cache.set(key, val, None)
return val
@classmethod
def get_performance_since(cls, user, since):
"""
Returns sum of reviewer points since the given datetime.
"""
key = cls.get_key('get_performance:%s:%s' % (user.id, since.isoformat()))
val = cache.get(key)
if val is not None:
return val
sql = """
SELECT `reviewer_scores`.*,
SUM(`reviewer_scores`.`score`) AS `total`
FROM `reviewer_scores`
LEFT JOIN `addons` ON (`reviewer_scores`.`addon_id`=`addons`.`id`)
WHERE `reviewer_scores`.`user_id` = %s AND
`reviewer_scores`.`created` >= %s
ORDER BY `total` DESC
"""
with skip_cache():
val = list(ReviewerScore.objects.raw(sql, [user.id, since]))
cache.set(key, val, 3600)
return val
@classmethod
def _leaderboard_query(cls, since=None, types=None):
"""
Returns common SQL to leaderboard calls.
"""
query = (cls.objects
.values_list('user__id', 'user__display_name')
.annotate(total=Sum('score'))
.exclude(user__groups__name__in=('No Reviewer Incentives',
'Staff', 'Admins'))
.order_by('-total'))
if since is not None:
query = query.filter(created__gte=since)
if types is not None:
query = query.filter(note_key__in=types)
return query
@classmethod
def get_leaderboards(cls, user, days=7, types=None):
"""Returns leaderboards with ranking for the past given days.
This will return a dict of 3 items::
{'leader_top': [...],
'leader_near: [...],
'user_rank': (int)}
If the user is not in the leaderboard, or if the user is in the top 5,
'leader_near' will be an empty list and 'leader_top' will contain 5
elements instead of the normal 3.
"""
key = cls.get_key('get_leaderboards:%s' % user.id)
val = cache.get(key)
if val is not None:
return val
week_ago = datetime.date.today() - datetime.timedelta(days=days)
leader_top = []
leader_near = []
query = cls._leaderboard_query(since=week_ago, types=types)
scores = []
user_rank = 0
in_leaderboard = False
for rank, row in enumerate(query, 1):
user_id, name, total = row
scores.append({
'user_id': user_id,
'name': name,
'rank': rank,
'total': int(total),
})
if user_id == user.id:
user_rank = rank
in_leaderboard = True
if not in_leaderboard:
leader_top = scores[:5]
else:
if user_rank <= 5: # User is in top 5, show top 5.
leader_top = scores[:5]
else:
leader_top = scores[:3]
leader_near = [scores[user_rank - 2], scores[user_rank - 1]]
try:
leader_near.append(scores[user_rank])
except IndexError:
pass # User is last on the leaderboard.
val = {
'leader_top': leader_top,
'leader_near': leader_near,
'user_rank': user_rank,
}
cache.set(key, val, None)
return val
@classmethod
def all_users_by_score(cls):
"""
Returns reviewers ordered by highest total points first.
"""
query = cls._leaderboard_query()
scores = []
for row in query:
user_id, name, total = row
user_level = len(amo.REVIEWED_LEVELS) - 1
for i, level in enumerate(amo.REVIEWED_LEVELS):
if total < level['points']:
user_level = i - 1
break
# Only show level if it changes.
if user_level < 0:
level = ''
else:
level = amo.REVIEWED_LEVELS[user_level]['name']
scores.append({
'user_id': user_id,
'name': name,
'total': int(total),
'level': level,
})
prev = None
for score in reversed(scores):
if score['level'] == prev:
score['level'] = ''
else:
prev = score['level']
return scores
class EscalationQueue(ModelBase):
addon = models.ForeignKey(Webapp)
class Meta:
db_table = 'escalation_queue'
class RereviewQueue(ModelBase):
addon = models.ForeignKey(Webapp)
class Meta:
db_table = 'rereview_queue'
@classmethod
def flag(cls, addon, event, message=None):
cls.objects.get_or_create(addon=addon)
if message:
amo.log(event, addon, addon.current_version,
details={'comments': message})
else:
amo.log(event, addon, addon.current_version)
# TODO: if we ever get rid of ActivityLog for reviewer notes, replace
# all flag calls to use the comm constant and not have to use
# ACTION_MAP.
create_comm_note(addon, addon.current_version, None, message,
note_type=comm.ACTION_MAP(event))
def send_tarako_mail(review):
if not waffle.switch_is_active('comm-dashboard'):
send_mail_jinja(
'Low-memory devices review {passed}'.format(
passed='passed' if review.passed else 'failed'),
'reviewers/emails/tarako_review_complete.txt',
{'review': review},
recipient_list=[a.email for a in review.app.authors.all()],
from_email=settings.MKT_REVIEWERS_EMAIL)
def tarako_passed(review):
"""Add the tarako tag to the app."""
tag = Tag(tag_text='tarako')
tag.save_tag(review.app)
WebappIndexer.index_ids([review.app.pk])
send_tarako_mail(review)
def tarako_failed(review):
"""Remove the tarako tag from the app."""
tag = Tag(tag_text='tarako')
tag.remove_tag(review.app)
WebappIndexer.index_ids([review.app.pk])
send_tarako_mail(review)
class AdditionalReviewManager(ManagerBase):
def unreviewed(self, queue, and_approved=False):
query = {
'passed': None,
'queue': queue,
}
if and_approved:
query['app__status__in'] = amo.WEBAPPS_APPROVED_STATUSES
return self.get_queryset().no_cache().filter(**query)
def latest_for_queue(self, queue):
try:
return self.get_queryset().filter(queue=queue).latest()
except AdditionalReview.DoesNotExist:
return None
class AdditionalReview(ModelBase):
app = models.ForeignKey(Webapp)
queue = models.CharField(max_length=30)
passed = models.NullBooleanField()
review_completed = models.DateTimeField(null=True)
comment = models.CharField(null=True, blank=True, max_length=255)
reviewer = models.ForeignKey('users.UserProfile', null=True, blank=True)
objects = AdditionalReviewManager()
class Meta:
db_table = 'additional_review'
get_latest_by = 'created'
@property
def pending(self):
return self.passed is None
@property
def failed(self):
return self.passed is False
def __init__(self, *args, **kwargs):
super(AdditionalReview, self).__init__(*args, **kwargs)
from mkt.reviewers.utils import log_reviewer_action
self.log_reviewer_action = log_reviewer_action
def execute_post_review_task(self):
"""
Call the correct post-review function for the queue.
"""
# TODO: Pull this function from somewhere based on self.queue.
if self.passed is None:
raise ValueError('cannot execute post-review task when unreviewed')
elif self.passed:
tarako_passed(self)
action = amo.LOG.PASS_ADDITIONAL_REVIEW
else:
tarako_failed(self)
action = amo.LOG.FAIL_ADDITIONAL_REVIEW
self.log_reviewer_action(
self.app, self.reviewer, self.comment or '', action,
queue=self.queue)
def cleanup_queues(sender, instance, **kwargs):
RereviewQueue.objects.filter(addon=instance).delete()
EscalationQueue.objects.filter(addon=instance).delete()
models.signals.post_delete.connect(cleanup_queues, sender=Webapp,
dispatch_uid='queue-addon-cleanup')
| 32.569231 | 81 | 0.590526 | [
"BSD-3-Clause"
] | ngokevin/zamboni | mkt/reviewers/models.py | 14,819 | Python |
class Solution:
def backspaceCompare(self, S: str, T: str) -> bool:
s_bcount, t_bcount = 0, 0
s_idx, t_idx = len(S) - 1, len(T) - 1
while s_idx >= 0 or t_idx >= 0:
while s_idx >= 0:
if S[s_idx] == '#':
s_bcount += 1
s_idx -= 1
continue
if s_bcount > 0:
s_idx -= 1
s_bcount -= 1
else:
break
while t_idx >= 0:
if T[t_idx] == '#':
t_bcount += 1
t_idx -= 1
continue
if t_bcount > 0:
t_idx -= 1
t_bcount -= 1
else:
break
if s_idx >= 0 and t_idx >= 0 and S[s_idx] != T[t_idx]:
return False
elif (s_idx >= 0 and t_idx < 0) or (s_idx < 0 and t_idx >= 0):
return False
s_idx -= 1
t_idx -= 1
return True
| 29.125 | 74 | 0.318455 | [
"MIT"
] | subwaymatch/leetcode | 0844_backspace_string_compare.py | 1,165 | Python |
_base_ = [
'../../_base_/schedules/schedule_1200e.py', '../../_base_/runtime_10e.py'
]
model = dict(
type='DBNet',
pretrained='torchvision://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=-1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False,
style='caffe'),
neck=dict(
type='FPNC', in_channels=[64, 128, 256, 512], lateral_channels=256),
bbox_head=dict(
type='DBHead',
text_repr_type='quad',
in_channels=256,
loss=dict(type='DBLoss', alpha=5.0, beta=10.0, bbce_loss=True)),
train_cfg=None,
test_cfg=None)
dataset_type = 'IcdarDataset'
data_root = 'data/icdar2015/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# for visualizing img, pls uncomment it.
# img_norm_cfg = dict(mean=[0, 0, 0], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='LoadTextAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(type='ColorJitter', brightness=32.0 / 255, saturation=0.5),
dict(type='Normalize', **img_norm_cfg),
# img aug
dict(
type='ImgAug',
args=[['Fliplr', 0.5],
dict(cls='Affine', rotate=[-10, 10]), ['Resize', [0.5, 3.0]]]),
# random crop
dict(type='EastRandomCrop', target_size=(640, 640)),
dict(type='DBNetTargets', shrink_ratio=0.4),
dict(type='Pad', size_divisor=32),
# for visualizing img and gts, pls set visualize = True
dict(
type='CustomFormatBundle',
keys=['gt_shrink', 'gt_shrink_mask', 'gt_thr', 'gt_thr_mask'],
visualize=dict(flag=False, boundary_key='gt_shrink')),
dict(
type='Collect',
keys=['img', 'gt_shrink', 'gt_shrink_mask', 'gt_thr', 'gt_thr_mask'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 736),
flip=False,
transforms=[
dict(type='Resize', img_scale=(2944, 736), keep_ratio=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=16,
workers_per_gpu=8,
train=dict(
type=dataset_type,
ann_file=data_root + '/instances_training.json',
# for debugging top k imgs
# select_first_k=200,
img_prefix=data_root + '/imgs',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + '/instances_test.json',
img_prefix=data_root + '/imgs',
# select_first_k=100,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + '/instances_test.json',
img_prefix=data_root + '/imgs',
# select_first_k=100,
pipeline=test_pipeline))
evaluation = dict(interval=100, metric='hmean-iou')
| 32.402062 | 77 | 0.596564 | [
"Apache-2.0"
] | 2793145003/mmocr | configs/textdet/dbnet/dbnet_r18_fpnc_1200e_icdar2015.py | 3,143 | Python |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RVgam(RPackage):
"""Vector Generalized Linear and Additive Models.
An implementation of about 6 major classes of statistical regression
models. The central algorithm is Fisher scoring and iterative reweighted
least squares. At the heart of this package are the vector generalized
linear and additive model (VGLM/VGAM) classes. VGLMs can be loosely thought
of as multivariate GLMs. VGAMs are data-driven VGLMs that use smoothing.
The book "Vector Generalized Linear and Additive Models: With an
Implementation in R" (Yee, 2015) <DOI:10.1007/978-1-4939-2818-7> gives
details of the statistical framework and the package. Currently only
fixed-effects models are implemented. Many (150+) models and distributions
are estimated by maximum likelihood estimation (MLE) or penalized MLE. The
other classes are RR-VGLMs (reduced-rank VGLMs), quadratic RR-VGLMs,
reduced-rank VGAMs, RCIMs (row-column interaction models)---these classes
perform constrained and unconstrained quadratic ordination (CQO/UQO) models
in ecology, as well as constrained additive ordination (CAO). Hauck-Donner
effect detection is implemented. Note that these functions are subject to
change; see the NEWS and ChangeLog files for latest changes."""
cran = "VGAM"
version('1.1-6', sha256='446a61bac5dd4794e05d20c2f3901eec54afac52c6e23ce2787c5575170dd417')
version('1.1-5', sha256='30190b150f3e5478137d288a45f575b2654ad7c29254b0a1fe5c954ee010a1bb')
version('1.1-1', sha256='de192bd65a7e8818728008de8e60e6dd3b61a13616c887a43e0ccc8147c7da52')
version('1.0-6', sha256='121820a167411e847b41bdcb0028b55842d0ccc0c3471755c67449837e0fe3b9')
version('1.0-4', sha256='e581985f78ef8b866d0e810b2727061bb9c9bc177b2c9090aebb3a35ae87a964')
version('1.0-3', sha256='23bb6690ae15e9ede3198ef55d5d3236c279aa8fa6bd4f7350242379d9d72673')
version('1.0-2', sha256='03561bf484f97b616b1979132c759c5faa69c5d5a4cfd7aea2ea6d3612ac0961')
version('1.0-1', sha256='c066864e406fcee23f383a28299dba3cf83356e5b68df16324885afac87a05ea')
version('1.0-0', sha256='6acdd7db49c0987c565870afe593160ceba72a6ca4a84e6da3cf6f74d1fa02e1')
depends_on('r@3.0.0:', type=('build', 'run'))
depends_on('r@3.1.0:', type=('build', 'run'), when='@1.0-2:')
depends_on('r@3.4.0:', type=('build', 'run'), when='@1.0-4:')
depends_on('r@3.5.0:', type=('build', 'run'), when='@1.1-5:')
| 58.933333 | 95 | 0.761312 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | jmellorcrummey/spack | var/spack/repos/builtin/packages/r-vgam/package.py | 2,652 | Python |
from django import db
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.contenttypes.views import shortcut
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpRequest
from django.test import TestCase
class ContentTypesTests(TestCase):
def setUp(self):
# First, let's make sure we're dealing with a blank slate (and that
# DEBUG is on so that queries get logged)
self.old_DEBUG = settings.DEBUG
self.old_Site_meta_installed = Site._meta.installed
settings.DEBUG = True
ContentType.objects.clear_cache()
db.reset_queries()
def tearDown(self):
settings.DEBUG = self.old_DEBUG
Site._meta.installed = self.old_Site_meta_installed
def test_lookup_cache(self):
"""
Make sure that the content type cache (see ContentTypeManager)
works correctly. Lookups for a particular content type -- by model or
by ID -- should hit the database only on the first lookup.
"""
# At this point, a lookup for a ContentType should hit the DB
ContentType.objects.get_for_model(ContentType)
self.assertEqual(1, len(db.connection.queries))
# A second hit, though, won't hit the DB, nor will a lookup by ID
ct = ContentType.objects.get_for_model(ContentType)
self.assertEqual(1, len(db.connection.queries))
ContentType.objects.get_for_id(ct.id)
self.assertEqual(1, len(db.connection.queries))
# Once we clear the cache, another lookup will again hit the DB
ContentType.objects.clear_cache()
ContentType.objects.get_for_model(ContentType)
len(db.connection.queries)
self.assertEqual(2, len(db.connection.queries))
def test_shortcut_view(self):
"""
Check that the shortcut view (used for the admin "view on site"
functionality) returns a complete URL regardless of whether the sites
framework is installed
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
from django.contrib.auth.models import User
user_ct = ContentType.objects.get_for_model(User)
obj = User.objects.create(username="john")
if Site._meta.installed:
current_site = Site.objects.get_current()
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://%s/users/john/" % current_site.domain,
response._headers.get("location")[1])
Site._meta.installed = False
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://Example.com/users/john/",
response._headers.get("location")[1])
| 38.786667 | 77 | 0.663802 | [
"BSD-3-Clause"
] | coderanger/django | django/contrib/contenttypes/tests.py | 2,909 | Python |
"""
BasicSR/codes/dataops/common.py (8-Nov-20)
https://github.com/victorca25/BasicSR/blob/dev2/codes/dataops/common.py
"""
import os
import math
import pickle
import random
import numpy as np
import torch
import cv2
import logging
import copy
from torchvision.utils import make_grid
#from dataops.colors import *
from .colors import *
#from dataops.debug import tmp_vis, describe_numpy, describe_tensor
####################
# Files & IO
####################
###################### get image path list ######################
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.dng', '.DNG', '.webp','.npy', '.NPY']
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def _get_paths_from_images(path):
'''get image path list from image folder'''
assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
images = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, '{:s} has no valid image file'.format(path)
return images
def _get_paths_from_lmdb(dataroot):
'''get image path list from lmdb'''
import lmdb
env = lmdb.open(dataroot, readonly=True, lock=False, readahead=False, meminit=False)
keys_cache_file = os.path.join(dataroot, '_keys_cache.p')
logger = logging.getLogger('base')
if os.path.isfile(keys_cache_file):
logger.info('Read lmdb keys from cache: {}'.format(keys_cache_file))
keys = pickle.load(open(keys_cache_file, "rb"))
else:
with env.begin(write=False) as txn:
logger.info('Creating lmdb keys cache: {}'.format(keys_cache_file))
keys = [key.decode('ascii') for key, _ in txn.cursor()]
pickle.dump(keys, open(keys_cache_file, 'wb'))
paths = sorted([key for key in keys if not key.endswith('.meta')])
return env, paths
def get_image_paths(data_type, dataroot):
'''get image path list
support lmdb or image files'''
env, paths = None, None
if dataroot is not None:
if data_type == 'lmdb':
env, paths = _get_paths_from_lmdb(dataroot)
elif data_type == 'img':
paths = sorted(_get_paths_from_images(dataroot))
else:
raise NotImplementedError('data_type [{:s}] is not recognized.'.format(data_type))
return env, paths
###################### read images ######################
def _read_lmdb_img(env, path):
with env.begin(write=False) as txn:
buf = txn.get(path.encode('ascii'))
buf_meta = txn.get((path + '.meta').encode('ascii')).decode('ascii')
img_flat = np.frombuffer(buf, dtype=np.uint8)
H, W, C = [int(s) for s in buf_meta.split(',')]
img = img_flat.reshape(H, W, C)
return img
def read_img(env, path, out_nc=3, fix_channels=True):
'''
Reads image using cv2 (rawpy if dng) or from lmdb by default
(can also use using PIL instead of cv2)
Arguments:
out_nc: Desired number of channels
fix_channels: changes the images to the desired number of channels
Output:
Numpy uint8, HWC, BGR, [0,255] by default
'''
img = None
if env is None: # img
if(path[-3:].lower() == 'dng'): # if image is a DNG
import rawpy
with rawpy.imread(path) as raw:
img = raw.postprocess()
if(path[-3:].lower() == 'npy'): # if image is a NPY numpy array
with open(path, 'rb') as f:
img = np.load(f)
else: # else, if image can be read by cv2
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
#TODO: add variable detecting if cv2 is not available and try PIL instead
# elif: # using PIL instead of OpenCV
# img = Image.open(path).convert('RGB')
# else: # For other images unrecognized by cv2
# import matplotlib.pyplot as plt
# img = (255*plt.imread(path)[:,:,:3]).astype('uint8')
else:
img = _read_lmdb_img(env, path)
# if not img:
# raise ValueError(f"Failed to read image: {path}")
if fix_channels:
img = fix_img_channels(img, out_nc)
return img
def fix_img_channels(img, out_nc):
'''
fix image channels to the expected number
'''
# if image has only 2 dimensions, add "channel" dimension (1)
if img.ndim == 2:
#img = img[..., np.newaxis] #alt
#img = np.expand_dims(img, axis=2)
img = np.tile(np.expand_dims(img, axis=2), (1, 1, 3))
# special case: properly remove alpha channel
if out_nc == 3 and img.shape[2] == 4:
img = bgra2rgb(img)
# remove all extra channels
elif img.shape[2] > out_nc:
img = img[:, :, :out_nc]
# if alpha is expected, add solid alpha channel
elif img.shape[2] == 3 and out_nc == 4:
img = np.dstack((img, np.full(img.shape[:-1], 255, dtype=np.uint8)))
return img
####################
# image processing
# process on numpy image
####################
def bgra2rgb(img):
'''
cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) has an issue removing the alpha channel,
this gets rid of wrong transparent colors that can harm training
'''
if img.shape[2] == 4:
#b, g, r, a = cv2.split((img*255).astype(np.uint8))
b, g, r, a = cv2.split((img.astype(np.uint8)))
b = cv2.bitwise_and(b, b, mask=a)
g = cv2.bitwise_and(g, g, mask=a)
r = cv2.bitwise_and(r, r, mask=a)
#return cv2.merge([b, g, r]).astype(np.float32)/255.
return cv2.merge([b, g, r])
return img
def channel_convert(in_c, tar_type, img_list):
# conversion among BGR, gray and y
# Note: OpenCV uses inverted channels BGR, instead of RGB.
# If images are loaded with something other than OpenCV,
# check that the channels are in the correct order and use
# the alternative conversion functions.
#if in_c == 4 and tar_type == 'RGB-A': # BGRA to BGR, remove alpha channel
#return [cv2.cvtColor(img, cv2.COLOR_BGRA2BGR) for img in img_list]
#return [bgra2rgb(img) for img in img_list]
if in_c == 3 and tar_type == 'gray': # BGR to gray
gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
return [np.expand_dims(img, axis=2) for img in gray_list]
elif in_c == 3 and tar_type == 'RGB-LAB': #RGB to LAB
return [cv2.cvtColor(img, cv2.COLOR_BGR2LAB) for img in img_list]
elif in_c == 3 and tar_type == 'LAB-RGB': #RGB to LAB
return [cv2.cvtColor(img, cv2.COLOR_LAB2BGR) for img in img_list]
elif in_c == 3 and tar_type == 'y': # BGR to y
y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
return [np.expand_dims(img, axis=2) for img in y_list]
elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
else:
return img_list
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
if only_y:
rlt = np.dot(img_ , [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img_ , [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def bgr2ycbcr(img, only_y=True, separate=False):
'''bgr version of matlab rgb2ycbcr
Python opencv library (cv2) cv2.COLOR_BGR2YCrCb has
different parameters with MATLAB color convertion.
only_y: only return Y channel
separate: if true, will returng the channels as
separate images
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
if only_y:
rlt = np.dot(img_ , [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
rlt = np.matmul(img_ , [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
if separate:
rlt = rlt.astype(in_img_type)
# y, cb, cr
return rlt[:, :, 0], rlt[:, :, 1], rlt[:, :, 2]
else:
return rlt.astype(in_img_type)
'''
def ycbcr2rgb_(img, only_y=True):
"""same as matlab ycbcr2rgb
(Note: this implementation is the original from BasicSR, but
appears to be for ycrcb, like cv2)
Input:
uint8, [0, 255]
float, [0, 1]
"""
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
# convert
# original (for ycrcb):
rlt = np.matmul(img_ , [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
#alternative conversion:
# xform = np.array([[1, 0, 1.402], [1, -0.34414, -.71414], [1, 1.772, 0]])
# img_[:, :, [1, 2]] -= 128
# rlt = img_.dot(xform.T)
np.putmask(rlt, rlt > 255, 255)
np.putmask(rlt, rlt < 0, 0)
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
'''
def ycbcr2rgb(img, only_y=True):
'''
bgr version of matlab ycbcr2rgb
Python opencv library (cv2) cv2.COLOR_YCrCb2BGR has
different parameters to MATLAB color convertion.
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
# convert
mat = np.array([[24.966, 128.553, 65.481],[112, -74.203, -37.797], [-18.214, -93.786, 112.0]])
mat = np.linalg.inv(mat.T) * 255
offset = np.array([[[16, 128, 128]]])
rlt = np.dot((img_ - offset), mat)
rlt = np.clip(rlt, 0, 255)
## rlt = np.rint(rlt).astype('uint8')
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
'''
#TODO: TMP RGB version, to check (PIL)
def rgb2ycbcr(img_rgb):
## the range of img_rgb should be (0, 1)
img_y = 0.257 * img_rgb[:, :, 0] + 0.504 * img_rgb[:, :, 1] + 0.098 * img_rgb[:, :, 2] + 16 / 255.0
img_cb = -0.148 * img_rgb[:, :, 0] - 0.291 * img_rgb[:, :, 1] + 0.439 * img_rgb[:, :, 2] + 128 / 255.0
img_cr = 0.439 * img_rgb[:, :, 0] - 0.368 * img_rgb[:, :, 1] - 0.071 * img_rgb[:, :, 2] + 128 / 255.0
return img_y, img_cb, img_cr
#TODO: TMP RGB version, to check (PIL)
def ycbcr2rgb(img_ycbcr):
## the range of img_ycbcr should be (0, 1)
img_r = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 1.596 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_g = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) - 0.392 * (img_ycbcr[:, :, 1] - 128 / 255.0) - 0.813 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_b = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 2.017 * (img_ycbcr[:, :, 1] - 128 / 255.0)
img_r = img_r[:, :, np.newaxis]
img_g = img_g[:, :, np.newaxis]
img_b = img_b[:, :, np.newaxis]
img_rgb = np.concatenate((img_r, img_g, img_b), 2)
return img_rgb
'''
def modcrop(img_in, scale):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r, :]
else:
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
return img
#TODO: this should probably be elsewhere (augmentations.py)
def augment(img_list, hflip=True, rot=True):
# horizontal flip OR rotate
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
#rot90n = rot and random.random() < 0.5
def _augment(img):
if hflip: img = np.flip(img, axis=1) #img[:, ::-1, :]
if vflip: img = np.flip(img, axis=0) #img[::-1, :, :]
#if rot90: img = img.transpose(1, 0, 2)
if rot90: img = np.rot90(img, 1) #90 degrees # In PIL: img.transpose(Image.ROTATE_90)
#if rot90n: img = np.rot90(img, -1) #-90 degrees
return img
return [_augment(img) for img in img_list]
####################
# Normalization functions
####################
#TODO: Could also automatically detect the possible range with min and max, like in def ssim()
def denorm(x, min_max=(-1.0, 1.0)):
'''
Denormalize from [-1,1] range to [0,1]
formula: xi' = (xi - mu)/sigma
Example: "out = (x + 1.0) / 2.0" for denorm
range (-1,1) to (0,1)
for use with proper act in Generator output (ie. tanh)
'''
out = (x - min_max[0]) / (min_max[1] - min_max[0])
if isinstance(x, torch.Tensor):
return out.clamp(0, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, 0, 1)
else:
raise TypeError("Got unexpected object type, expected torch.Tensor or \
np.ndarray")
def norm(x):
#Normalize (z-norm) from [0,1] range to [-1,1]
out = (x - 0.5) * 2.0
if isinstance(x, torch.Tensor):
return out.clamp(-1, 1)
elif isinstance(x, np.ndarray):
return np.clip(out, -1, 1)
else:
raise TypeError("Got unexpected object type, expected torch.Tensor or \
np.ndarray")
####################
# np and tensor conversions
####################
#2tensor
def np2tensor(img, bgr2rgb=True, data_range=1., normalize=False, change_range=True, add_batch=True):
"""
Converts a numpy image array into a Tensor array.
Parameters:
img (numpy array): the input image numpy array
add_batch (bool): choose if new tensor needs batch dimension added
"""
if not isinstance(img, np.ndarray): #images expected to be uint8 -> 255
raise TypeError("Got unexpected object type, expected np.ndarray")
#check how many channels the image has, then condition, like in my BasicSR. ie. RGB, RGBA, Gray
#if bgr2rgb:
#img = img[:, :, [2, 1, 0]] #BGR to RGB -> in numpy, if using OpenCV, else not needed. Only if image has colors.
if change_range:
if np.issubdtype(img.dtype, np.integer):
info = np.iinfo
elif np.issubdtype(img.dtype, np.floating):
info = np.finfo
img = img*data_range/info(img.dtype).max #uint8 = /255
img = torch.from_numpy(np.ascontiguousarray(np.transpose(img, (2, 0, 1)))).float() #"HWC to CHW" and "numpy to tensor"
if bgr2rgb:
if img.shape[0] == 3: #RGB
#BGR to RGB -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img = bgr_to_rgb(img)
elif img.shape[0] == 4: #RGBA
#BGR to RGB -> in tensor, if using OpenCV, else not needed. Only if image has colors.)
img = bgra_to_rgba(img)
if add_batch:
img.unsqueeze_(0) # Add fake batch dimension = 1 . squeeze() will remove the dimensions of size 1
if normalize:
img = norm(img)
return img
#2np
def tensor2np(img, rgb2bgr=True, remove_batch=True, data_range=255,
denormalize=False, change_range=True, imtype=np.uint8):
"""
Converts a Tensor array into a numpy image array.
Parameters:
img (tensor): the input image tensor array
4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
remove_batch (bool): choose if tensor of shape BCHW needs to be squeezed
denormalize (bool): Used to denormalize from [-1,1] range back to [0,1]
imtype (type): the desired type of the converted numpy array (np.uint8
default)
Output:
img (np array): 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
"""
if not isinstance(img, torch.Tensor):
raise TypeError("Got unexpected object type, expected torch.Tensor")
n_dim = img.dim()
#TODO: Check: could denormalize here in tensor form instead, but end result is the same
img = img.float().cpu()
if n_dim == 4 or n_dim == 3:
#if n_dim == 4, has to convert to 3 dimensions, either removing batch or by creating a grid
if n_dim == 4 and remove_batch:
if img.shape[0] > 1:
# leave only the first image in the batch
img = img[0,...]
else:
# remove a fake batch dimension
img = img.squeeze()
# squeeze removes batch and channel of grayscale images (dimensions = 1)
if len(img.shape) < 3:
#add back the lost channel dimension
img = img.unsqueeze(dim=0)
# convert images in batch (BCHW) to a grid of all images (C B*H B*W)
else:
n_img = len(img)
img = make_grid(img, nrow=int(math.sqrt(n_img)), normalize=False)
if img.shape[0] == 3 and rgb2bgr: #RGB
#RGB to BGR -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = rgb_to_bgr(img).numpy()
elif img.shape[0] == 4 and rgb2bgr: #RGBA
#RGBA to BGRA -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = rgba_to_bgra(img).numpy()
else:
img_np = img.numpy()
img_np = np.transpose(img_np, (1, 2, 0)) # "CHW to HWC" -> # HWC, BGR
elif n_dim == 2:
img_np = img.numpy()
else:
raise TypeError(
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
#if rgb2bgr:
#img_np = img_np[[2, 1, 0], :, :] #RGB to BGR -> in numpy, if using OpenCV, else not needed. Only if image has colors.
#TODO: Check: could denormalize in the begining in tensor form instead
if denormalize:
img_np = denorm(img_np) #denormalize if needed
if change_range:
img_np = np.clip(data_range*img_np,0,data_range).round() #clip to the data_range
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
#has to be in range (0,255) before changing to np.uint8, else np.float32
return img_np.astype(imtype)
####################
# Prepare Images
####################
# https://github.com/sunreef/BlindSR/blob/master/src/image_utils.py
def patchify_tensor(features, patch_size, overlap=10):
batch_size, channels, height, width = features.size()
effective_patch_size = patch_size - overlap
n_patches_height = (height // effective_patch_size)
n_patches_width = (width // effective_patch_size)
if n_patches_height * effective_patch_size < height:
n_patches_height += 1
if n_patches_width * effective_patch_size < width:
n_patches_width += 1
patches = []
for b in range(batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, height - patch_size)
patch_start_width = min(w * effective_patch_size, width - patch_size)
patches.append(features[b:b+1, :,
patch_start_height: patch_start_height + patch_size,
patch_start_width: patch_start_width + patch_size])
return torch.cat(patches, 0)
def recompose_tensor(patches, full_height, full_width, overlap=10):
batch_size, channels, patch_size, _ = patches.size()
effective_patch_size = patch_size - overlap
n_patches_height = (full_height // effective_patch_size)
n_patches_width = (full_width // effective_patch_size)
if n_patches_height * effective_patch_size < full_height:
n_patches_height += 1
if n_patches_width * effective_patch_size < full_width:
n_patches_width += 1
n_patches = n_patches_height * n_patches_width
if batch_size % n_patches != 0:
print("Error: The number of patches provided to the recompose function does not match the number of patches in each image.")
final_batch_size = batch_size // n_patches
blending_in = torch.linspace(0.1, 1.0, overlap)
blending_out = torch.linspace(1.0, 0.1, overlap)
middle_part = torch.ones(patch_size - 2 * overlap)
blending_profile = torch.cat([blending_in, middle_part, blending_out], 0)
horizontal_blending = blending_profile[None].repeat(patch_size, 1)
vertical_blending = blending_profile[:, None].repeat(1, patch_size)
blending_patch = horizontal_blending * vertical_blending
blending_image = torch.zeros(1, channels, full_height, full_width)
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
blending_image[0, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += blending_patch[None]
recomposed_tensor = torch.zeros(final_batch_size, channels, full_height, full_width)
if patches.is_cuda:
blending_patch = blending_patch.cuda()
blending_image = blending_image.cuda()
recomposed_tensor = recomposed_tensor.cuda()
patch_index = 0
for b in range(final_batch_size):
for h in range(n_patches_height):
for w in range(n_patches_width):
patch_start_height = min(h * effective_patch_size, full_height - patch_size)
patch_start_width = min(w * effective_patch_size, full_width - patch_size)
recomposed_tensor[b, :, patch_start_height: patch_start_height + patch_size, patch_start_width: patch_start_width + patch_size] += patches[patch_index] * blending_patch
patch_index += 1
recomposed_tensor /= blending_image
return recomposed_tensor
#TODO: imresize could be an independent file (imresize.py)
####################
# Matlab imresize
####################
# These next functions are all interpolation methods. x is the distance from the left pixel center
def cubic(x):
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
def box(x):
return ((-0.5 <= x) & (x < 0.5)) * 1.0
def linear(x):
return (x + 1) * ((-1 <= x) & (x < 0)) + (1 - x) * ((0 <= x) & (x <= 1))
def lanczos2(x):
return (((torch.sin(math.pi*x) * torch.sin(math.pi*x/2) + torch.finfo(torch.float32).eps) /
((math.pi**2 * x**2 / 2) + torch.finfo(torch.float32).eps))
* (torch.abs(x) < 2))
def lanczos3(x):
return (((torch.sin(math.pi*x) * torch.sin(math.pi*x/3) + torch.finfo(torch.float32).eps) /
((math.pi**2 * x**2 / 3) + torch.finfo(torch.float32).eps))
* (torch.abs(x) < 3))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply kernel
if (scale < 1) and (antialiasing):
weights = scale * kernel(distance_to_center * scale)
else:
weights = kernel(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
def imresize(img, scale, antialiasing=True, interpolation=None):
# The scale should be the same for H and W
# input: img: CHW RGB [0,1]
# output: CHW RGB [0,1] w/o round
in_C, in_H, in_W = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
# Choose interpolation method, each method has the matching kernel size
kernel, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 4.0) # set default interpolation method as cubic
}.get(interpolation)
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_He:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_C, out_H, in_W)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[0, i, :] = img_aug[0, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[1, i, :] = img_aug[1, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
out_1[2, i, :] = img_aug[2, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_C, out_H, out_W)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[0, :, i] = out_1_aug[0, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[1, :, i] = out_1_aug[1, :, idx:idx + kernel_width].mv(weights_W[i])
out_2[2, :, i] = out_1_aug[2, :, idx:idx + kernel_width].mv(weights_W[i])
return out_2
def imresize_np(img, scale, antialiasing=True, interpolation=None):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC BGR [0,1]
# output: HWC BGR [0,1] w/o round
change_range = False
if img.max() > 1:
img_type = img.dtype
if np.issubdtype(img_type, np.integer):
info = np.iinfo
elif np.issubdtype(img_type, np.floating):
info = np.finfo
img = img/info(img_type).max
change_range = True
img = torch.from_numpy(img)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
# Choose interpolation method, each method has the matching kernel size
kernel, kernel_width = {
"cubic": (cubic, 4.0),
"lanczos2": (lanczos2, 4.0),
"lanczos3": (lanczos3, 6.0),
"box": (box, 1.0),
"linear": (linear, 2.0),
None: (cubic, 4.0) # set default interpolation method as cubic
}.get(interpolation)
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
out_1[i, :, 0] = img_aug[idx:idx + kernel_width, :, 0].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 1] = img_aug[idx:idx + kernel_width, :, 1].transpose(0, 1).mv(weights_H[i])
out_1[i, :, 2] = img_aug[idx:idx + kernel_width, :, 2].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
out_2[:, i, 0] = out_1_aug[:, idx:idx + kernel_width, 0].mv(weights_W[i])
out_2[:, i, 1] = out_1_aug[:, idx:idx + kernel_width, 1].mv(weights_W[i])
out_2[:, i, 2] = out_1_aug[:, idx:idx + kernel_width, 2].mv(weights_W[i])
out_2 = out_2.numpy().clip(0,1)
if change_range:
out_2 = out_2*info(img_type).max #uint8 = 255
out_2 = out_2.astype(img_type)
return out_2
if __name__ == '__main__':
# test imresize function
# read images
img = cv2.imread('test.png')
img = img * 1.0 / 255
img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()
# imresize
scale = 1 / 4
import time
total_time = 0
for i in range(10):
start_time = time.time()
rlt = imresize(img, scale, antialiasing=True)
use_time = time.time() - start_time
total_time += use_time
print('average time: {}'.format(total_time / 10))
import torchvision.utils
torchvision.utils.save_image(
(rlt * 255).round() / 255, 'rlt.png', nrow=1, padding=0, normalize=False)
| 38.010067 | 184 | 0.613872 | [
"Apache-2.0"
] | f74066357/Image_Inpainting | mmedit/models/inpaintors/vic/common.py | 33,981 | Python |
# -*- coding: utf-8 -*-
from numpy import NaN as npNaN
from pandas import DataFrame, Series
# from pandas_ta.overlap.ma import ma
from .ma import ma
from pandas_ta.utils import get_offset, verify_series
def hilo(high, low, close, high_length=None, low_length=None, mamode=None, offset=None, **kwargs):
"""Indicator: Gann HiLo (HiLo)"""
# Validate Arguments
high = verify_series(high)
low = verify_series(low)
close = verify_series(close)
high_length = int(high_length) if high_length and high_length > 0 else 13
low_length = int(low_length) if low_length and low_length > 0 else 21
mamode = mamode.lower() if isinstance(mamode, str) else "sma"
offset = get_offset(offset)
# Calculate Result
m = close.size
hilo = Series(npNaN, index=close.index)
long = Series(npNaN, index=close.index)
short = Series(npNaN, index=close.index)
high_ma = ma(mamode, high, length=high_length)
low_ma = ma(mamode, low, length=low_length)
for i in range(1, m):
if close.iloc[i] > high_ma.iloc[i - 1]:
hilo.iloc[i] = long.iloc[i] = low_ma.iloc[i]
elif close.iloc[i] < low_ma.iloc[i - 1]:
hilo.iloc[i] = short.iloc[i] = high_ma.iloc[i]
else:
hilo.iloc[i] = hilo.iloc[i - 1]
long.iloc[i] = short.iloc[i] = hilo.iloc[i - 1]
# Offset
if offset != 0:
hilo = hilo.shift(offset)
long = long.shift(offset)
short = short.shift(offset)
# Handle fills
if "fillna" in kwargs:
hilo.fillna(kwargs["fillna"], inplace=True)
long.fillna(kwargs["fillna"], inplace=True)
short.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
hilo.fillna(method=kwargs["fill_method"], inplace=True)
long.fillna(method=kwargs["fill_method"], inplace=True)
short.fillna(method=kwargs["fill_method"], inplace=True)
# Name & Category
_props = f"_{high_length}_{low_length}"
data = {f"HILO{_props}": hilo, f"HILOl{_props}": long, f"HILOs{_props}": short}
df = DataFrame(data, index=close.index)
df.name = f"HILO{_props}"
df.category = "overlap"
return df
hilo.__doc__ = \
"""Gann HiLo Activator(HiLo)
The Gann High Low Activator Indicator was created by Robert Krausz in a 1998
issue of Stocks & Commodities Magazine. It is a moving average based trend
indicator consisting of two different simple moving averages.
The indicator tracks both curves (of the highs and the lows). The close of the
bar defines which of the two gets plotted.
Increasing high_length and decreasing low_length better for short trades,
vice versa for long positions.
Sources:
https://www.sierrachart.com/index.php?page=doc/StudiesReference.php&ID=447&Name=Gann_HiLo_Activator
https://www.tradingtechnologies.com/help/x-study/technical-indicator-definitions/simple-moving-average-sma/
https://www.tradingview.com/script/XNQSLIYb-Gann-High-Low/
Calculation:
Default Inputs:
high_length=13, low_length=21, mamode="sma"
EMA = Exponential Moving Average
HMA = Hull Moving Average
SMA = Simple Moving Average # Default
if "ema":
high_ma = EMA(high, high_length)
low_ma = EMA(low, low_length)
elif "hma":
high_ma = HMA(high, high_length)
low_ma = HMA(low, low_length)
else: # "sma"
high_ma = SMA(high, high_length)
low_ma = SMA(low, low_length)
# Similar to Supertrend MA selection
hilo = Series(npNaN, index=close.index)
for i in range(1, m):
if close.iloc[i] > high_ma.iloc[i - 1]:
hilo.iloc[i] = low_ma.iloc[i]
elif close.iloc[i] < low_ma.iloc[i - 1]:
hilo.iloc[i] = high_ma.iloc[i]
else:
hilo.iloc[i] = hilo.iloc[i - 1]
Args:
high (pd.Series): Series of 'high's
low (pd.Series): Series of 'low's
close (pd.Series): Series of 'close's
high_length (int): It's period. Default: 13
low_length (int): It's period. Default: 21
mamode (str): Options: 'sma' or 'ema'. Default: 'sma'
offset (int): How many periods to offset the result. Default: 0
Kwargs:
adjust (bool): Default: True
presma (bool, optional): If True, uses SMA for initial value.
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.DataFrame: HILO (line), HILOl (long), HILOs (short) columns.
"""
| 34.734375 | 111 | 0.65857 | [
"MIT"
] | MyBourse/pandas-ta | pandas_ta/overlap/hilo.py | 4,446 | Python |
import github
import pandas as pd
def get_issues(repo_addr):
g = github.Github()
repo = g.get_repo(repo_addr)
return repo.get_issues()
def fetch_issue_activity(repo_addr):
g = github.Github()
issues = g.get_repo(repo_addr).get_issues(state="all")
events = []
for issue in issues:
if issue.pull_request is not None:
continue
events.append((issue.created_at, 1))
if issue.state == "closed":
events.append((issue.closed_at, -1))
df = pd.DataFrame(events, columns=["date", "action"])
df.sort_values("date", inplace=True)
df["open"] = df["action"].cumsum()
df["total_events"] = abs(df["action"]).cumsum()
df["closed"] = (df["total_events"] - df["open"]) // 2
return df
| 24.15625 | 58 | 0.614489 | [
"MIT"
] | autt/gathering-leto | src/data/data/__init__.py | 773 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
if __name__ == '__main__':
goods = []
while True:
command = input(">>> ").lower()
if command == 'exit':
break
elif command == 'add':
name = input("Название товара: ")
shop = input("Название магазина: ")
price = float(input("Стоимость: "))
good = {
'name': name,
'shop': shop,
'price': price,
}
goods.append(good)
# Отсортировать список в случае необходимости.
if len(goods) > 1:
goods.sort(key=lambda item: item.get('shop', ''))
elif command == 'list':
line = '+-{}-+-{}-+-{}-+-{}-+'.format(
'-' * 4,
'-' * 30,
'-' * 20,
'-' * 8
)
print(line)
print(
'| {:^4} | {:^30} | {:^20} | {:^8} |'.format(
"№",
"Название",
"Магазин",
"Цена"
)
)
print(line)
for idx, good in enumerate(goods, 1):
print(
'| {:>4} | {:<30} | {:<20} | {:>8} |'.format(
idx,
good.get('name', ''),
good.get('shop', ''),
good.get('price', 0)
)
)
print(line)
elif command.startswith('select '):
parts = command.split(' ', maxsplit=1)
shopName = parts[1]
count = 0
for good in goods:
if shopName == good.get('shop', shopName):
count += 1
print(
'{:>4}: {}'.format(count, good.get('name', ''))
)
if count == 0:
print("Такого магазина не существует либо нет товаров.")
elif command == 'help':
print("Список команд:\n")
print("add - добавить товар;")
print("list - вывести список товаров;")
print("select <имя магазина> - запросить товары магазина;")
print("help - отобразить справку;")
print("exit - завершить работу с программой.")
else:
print(f"Неизвестная команда {command}", file=sys.stderr) | 29.011765 | 72 | 0.37794 | [
"MIT"
] | surai5a/laba_2_6 | pythonProject/ind.py | 2,745 | Python |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 5 08:32:13 2019
@author: Thiago
"""
import numpy as np
import pylab as pl
#%%
#Simulação de uma va
def va_estoque():
p=np.array([0.1, 0.2, 0.6, 0.1])
x=np.random.rand()
if 0 < x <= p[0]:
return 1
elif p[0] < x <= p[0]+p[1]:
return 2
elif p[0]+p[1] < x <= p[0]+p[1]+p[2]:
return 3
elif p[0]+p[1]+p[2] < x <= 1.0:
return 4
v = [va_estoque() for i in range(100000)]
pl.hist(v,)
pl.show()
#%%
#simulação estoque
M, T, estoque, lucro = 3, 3, 10, 0
R = 10000
for i in range(R):
Y=va_estoque()
lucro += 20*min(estoque, Y)
estoque -= max(0, estoque-Y)
lucro -= 5*estoque
if estoque<M:
estoque += T
lucro -= 10*T
lucro /= R
print(M, T, lucro, estoque)
#%%
#simulação Urna de Ehrenfest
N, s = 100, []
for j in range(1000):
v = [True for i in range(N)]
for i in range(1000):
k=np.random.choice(N)
v[k] = not v[k]
x = sum(v) / N
s.append(x)
pl.hist(s)
#%%
#Lei dos grandes números
np.random.seed(0)
S = [1, 2, 3, 4, 5, 6]
n_vals = np.logspace(1, 5, num=200)
s=[]
for val in n_vals:
np.random.seed(0)
n = int(val)
x = np.random.choice(S,n)
p=sum(x==3)/n
s.append([n,p])
s=np.array(s)
pl.semilogx(s[:,1])
pl.axhline(1./len(S),c='r')
#%%
#processos ergodicos
#%%
'''
s = 3000
for n in [1,2,3,5,10,50,100,200,400,1000]:
z=np.zeros(s)
for k in range(n):
x = np.random.uniform(-1, 1, s)
z+=x
x = z/np.sqrt(n)
pl.figure(n)
sns.distplot(y, bins=12, rug=True)
pl.title('N = ' + str())
''' | 15.099099 | 42 | 0.50716 | [
"MIT"
] | thiago9864/introducao_modelagem | aulas/05-06/variaveis_aleatorias.py | 1,683 | Python |
"""Test GeoTIFF as process output."""
import numpy as np
import numpy.ma as ma
import os
import pytest
import rasterio
from rasterio.io import MemoryFile
from rio_cogeo.cogeo import cog_validate
import shutil
from tilematrix import Bounds
import warnings
import mapchete
from mapchete.errors import MapcheteConfigError
from mapchete.io import path_exists
from mapchete.formats.default import gtiff
from mapchete.tile import BufferedTilePyramid
def test_output_data(mp_tmpdir):
"""Check GeoTIFF as output data."""
output_params = dict(
grid="geodetic",
format="GeoTIFF",
path=mp_tmpdir,
pixelbuffer=0,
metatiling=1,
bands=1,
dtype="int16",
delimiters=dict(
bounds=Bounds(-180.0, -90.0, 180.0, 90.0),
effective_bounds=Bounds(-180.439453125, -90.0, 180.439453125, 90.0),
zoom=[5],
process_bounds=Bounds(-180.0, -90.0, 180.0, 90.0),
),
)
output = gtiff.OutputDataWriter(output_params)
assert output.path == mp_tmpdir
assert output.file_extension == ".tif"
tp = BufferedTilePyramid("geodetic")
tile = tp.tile(5, 5, 5)
# get_path
assert output.get_path(tile) == os.path.join(*[mp_tmpdir, "5", "5", "5" + ".tif"])
# prepare_path
try:
temp_dir = os.path.join(*[mp_tmpdir, "5", "5"])
output.prepare_path(tile)
assert os.path.isdir(temp_dir)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
# profile
assert isinstance(output.profile(tile), dict)
# write
try:
data = np.ones((1,) + tile.shape) * 128
output.write(tile, data)
# tiles_exist
assert output.tiles_exist(tile)
# read
data = output.read(tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.any()
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
# read empty
try:
data = output.read(tile)
assert isinstance(data, np.ndarray)
assert data[0].mask.all()
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
# empty
try:
empty = output.empty(tile)
assert isinstance(empty, ma.MaskedArray)
assert not empty.any()
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
# deflate with predictor
try:
# with pytest.deprecated_call():
output_params.update(compress="deflate", predictor=2)
output = gtiff.OutputDataWriter(output_params)
assert output.profile(tile)["compress"] == "deflate"
assert output.profile(tile)["predictor"] == 2
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
# using deprecated "compression" property
try:
with pytest.deprecated_call():
output_params.update(compression="deflate", predictor=2)
output = gtiff.OutputDataWriter(output_params)
assert output.profile(tile)["compress"] == "deflate"
assert output.profile(tile)["predictor"] == 2
finally:
shutil.rmtree(mp_tmpdir, ignore_errors=True)
def test_for_web(client, mp_tmpdir):
"""Send GTiff via flask."""
tile_base_url = "/wmts_simple/1.0.0/cleantopo_br/default/WGS84/"
for url in ["/"]:
response = client.get(url)
assert response.status_code == 200
for url in [
tile_base_url + "5/30/62.tif",
tile_base_url + "5/30/63.tif",
tile_base_url + "5/31/62.tif",
tile_base_url + "5/31/63.tif",
]:
response = client.get(url)
assert response.status_code == 200
img = response.data
with warnings.catch_warnings():
warnings.simplefilter("ignore")
with MemoryFile(img) as memfile:
with memfile.open() as dataset:
assert dataset.read().any()
def test_input_data(mp_tmpdir, cleantopo_br):
"""Check GeoTIFF proces output as input data."""
with mapchete.open(cleantopo_br.path) as mp:
tp = BufferedTilePyramid("geodetic")
# TODO tile with existing but empty data
tile = tp.tile(5, 5, 5)
output_params = dict(
grid="geodetic",
format="GeoTIFF",
path=mp_tmpdir,
pixelbuffer=0,
metatiling=1,
bands=2,
dtype="int16",
delimiters=dict(
bounds=Bounds(-180.0, -90.0, 180.0, 90.0),
effective_bounds=Bounds(-180.439453125, -90.0, 180.439453125, 90.0),
zoom=[5],
process_bounds=Bounds(-180.0, -90.0, 180.0, 90.0),
),
)
output = gtiff.OutputDataWriter(output_params)
with output.open(tile, mp) as input_tile:
for data in [
input_tile.read(),
input_tile.read(1),
input_tile.read([1]),
# TODO assert valid indexes are passed input_tile.read([1, 2])
]:
assert isinstance(data, ma.masked_array)
assert input_tile.is_empty()
# open without resampling
with output.open(tile, mp) as input_tile:
pass
def test_write_geotiff_tags(mp_tmpdir, cleantopo_br, write_rasterfile_tags_py):
"""Pass on metadata tags from user process to rasterio."""
conf = dict(**cleantopo_br.dict)
conf.update(process=write_rasterfile_tags_py)
with mapchete.open(conf) as mp:
for tile in mp.get_process_tiles():
data, tags = mp.execute(tile)
assert data.any()
assert isinstance(tags, dict)
mp.write(process_tile=tile, data=(data, tags))
# read data
out_path = mp.config.output.get_path(tile)
with rasterio.open(out_path) as src:
assert "filewide_tag" in src.tags()
assert src.tags()["filewide_tag"] == "value"
assert "band_tag" in src.tags(1)
assert src.tags(1)["band_tag"] == "True"
@pytest.mark.remote
def test_s3_write_output_data(gtiff_s3, s3_example_tile, mp_s3_tmpdir):
"""Write and read output."""
with mapchete.open(gtiff_s3.dict) as mp:
process_tile = mp.config.process_pyramid.tile(*s3_example_tile)
# basic functions
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
# write
mp.batch_process(tile=process_tile.id)
# check if tile exists
assert mp.config.output.tiles_exist(process_tile)
# read again, this time with data
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
def test_output_single_gtiff(output_single_gtiff):
tile_id = (5, 3, 7)
with mapchete.open(output_single_gtiff.path) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
# basic functions
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
# write
mp.batch_process(multi=2)
# check if tile exists
assert mp.config.output.tiles_exist(process_tile)
# read again, this time with data
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
# write empty array
data = ma.masked_array(
data=np.ones(process_tile.shape),
mask=np.ones(process_tile.shape),
)
mp.config.output.write(process_tile, data)
assert os.path.isfile(mp.config.output.path)
# error on existing file
with pytest.raises(MapcheteConfigError):
mapchete.open(output_single_gtiff.path)
# overwrite existing file
with mapchete.open(output_single_gtiff.path, mode="overwrite") as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
assert not mp.config.output.tiles_exist(process_tile)
# write
mp.batch_process(tile=process_tile.id)
# check if tile exists
assert mp.config.output.tiles_exist(process_tile)
assert mp.config.output.tiles_exist(
output_tile=mp.config.output_pyramid.intersecting(process_tile)[0]
)
# read again, this time with data
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
def test_output_single_gtiff_errors(output_single_gtiff):
# single gtiff does not work on multiple zoom levels
with pytest.raises(ValueError):
mapchete.open(dict(output_single_gtiff.dict, zoom_levels=[5, 6]))
# provide either process_tile or output_tile
with mapchete.open(output_single_gtiff.path) as mp:
tile = mp.config.process_pyramid.tile(5, 3, 7)
with pytest.raises(ValueError):
mp.config.output.tiles_exist(process_tile=tile, output_tile=tile)
def test_output_single_gtiff_pixelbuffer(output_single_gtiff):
tile_id = (5, 3, 7)
with mapchete.open(
dict(
output_single_gtiff.dict,
output=dict(output_single_gtiff.dict["output"], pixelbuffer=5),
),
) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
# basic functions
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
# write
mp.batch_process(tile=process_tile.id)
# check if tile exists
assert mp.config.output.tiles_exist(process_tile)
# read again, this time with data
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
def test_output_single_gtiff_compression(output_single_gtiff):
tile_id = (5, 3, 7)
with mapchete.open(
dict(
output_single_gtiff.dict,
output=dict(output_single_gtiff.dict["output"], compress="deflate"),
),
) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
assert "compress" in mp.config.output.profile()
assert mp.config.output.profile()["compress"] == "deflate"
mp.batch_process(tile=process_tile.id)
with rasterio.open(mp.config.output.path) as src:
assert src.profile["compress"] == "deflate"
def test_output_single_gtiff_overviews(output_single_gtiff):
# overwrite existing file
with mapchete.open(
dict(
output_single_gtiff.dict,
output=dict(
output_single_gtiff.dict["output"],
overviews=True,
overviews_resampling="bilinear",
),
),
) as mp:
tile_id = (5, 3, 7)
process_tile = mp.config.process_pyramid.tile(*tile_id)
mp.batch_process(tile=process_tile.id)
with rasterio.open(mp.config.output.path) as src:
assert src.overviews(1)
assert src.tags().get("OVR_RESAMPLING_ALG").lower() == "bilinear"
for o in [1, 2, 4, 8]:
a = src.read(
masked=True, out_shape=(1, int(src.height / o), int(src.width / o))
)
assert not a.mask.all()
@pytest.mark.remote
def test_output_single_gtiff_s3(output_single_gtiff, mp_s3_tmpdir):
tile_id = (5, 3, 7)
with mapchete.open(
dict(
output_single_gtiff.dict,
output=dict(
output_single_gtiff.dict["output"],
path=os.path.join(mp_s3_tmpdir, "temp.tif"),
),
)
) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
# basic functions
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
# write
mp.batch_process(multi=2)
# check if tile exists
assert mp.config.output.tiles_exist(process_tile)
# read again, this time with data
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
# write empty array
data = ma.masked_array(
data=np.ones(process_tile.shape),
mask=np.ones(process_tile.shape),
)
mp.config.output.write(process_tile, data)
assert path_exists(mp.config.output.path)
@pytest.mark.remote
def test_output_single_gtiff_s3_tempfile(output_single_gtiff, mp_s3_tmpdir):
tile_id = (5, 3, 7)
with mapchete.open(
dict(
output_single_gtiff.dict,
output=dict(
output_single_gtiff.dict["output"],
path=os.path.join(mp_s3_tmpdir, "temp.tif"),
in_memory=False,
),
)
) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
# basic functions
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
# write
mp.batch_process(multi=2)
# check if tile exists
assert mp.config.output.tiles_exist(process_tile)
# read again, this time with data
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
# write empty array
data = ma.masked_array(
data=np.ones(process_tile.shape),
mask=np.ones(process_tile.shape),
)
mp.config.output.write(process_tile, data)
assert path_exists(mp.config.output.path)
def test_output_single_gtiff_cog(output_single_gtiff_cog):
tile_id = (5, 3, 7)
with mapchete.open(output_single_gtiff_cog.dict) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
# basic functions
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
# write
mp.batch_process(multi=2)
# check if tile exists
assert mp.config.output.tiles_exist(process_tile)
# read again, this time with data
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
# write empty array
data = ma.masked_array(
data=np.ones(process_tile.shape),
mask=np.ones(process_tile.shape),
)
mp.config.output.write(process_tile, data)
assert path_exists(mp.config.output.path)
assert cog_validate(mp.config.output.path, strict=True)
def test_output_single_gtiff_cog_tempfile(output_single_gtiff_cog):
tile_id = (5, 3, 7)
with mapchete.open(
dict(
output_single_gtiff_cog.dict,
output=dict(output_single_gtiff_cog.dict["output"], in_memory=False),
)
) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
# basic functions
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
# write
mp.batch_process(multi=2)
# check if tile exists
assert mp.config.output.tiles_exist(process_tile)
# read again, this time with data
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
# write empty array
data = ma.masked_array(
data=np.ones(process_tile.shape),
mask=np.ones(process_tile.shape),
)
mp.config.output.write(process_tile, data)
assert path_exists(mp.config.output.path)
assert cog_validate(mp.config.output.path, strict=True)
@pytest.mark.remote
def test_output_single_gtiff_cog_s3(output_single_gtiff_cog, mp_s3_tmpdir):
tile_id = (5, 3, 7)
with mapchete.open(
dict(
output_single_gtiff_cog.dict,
output=dict(
output_single_gtiff_cog.dict["output"],
path=os.path.join(mp_s3_tmpdir, "cog.tif"),
),
)
) as mp:
process_tile = mp.config.process_pyramid.tile(*tile_id)
# basic functions
assert mp.config.output.profile()
assert mp.config.output.empty(process_tile).mask.all()
assert mp.config.output.get_path(process_tile)
# check if tile exists
assert not mp.config.output.tiles_exist(process_tile)
# write
mp.batch_process(multi=2)
# check if tile exists
assert mp.config.output.tiles_exist(process_tile)
# read again, this time with data
data = mp.config.output.read(process_tile)
assert isinstance(data, np.ndarray)
assert not data[0].mask.all()
# write empty array
data = ma.masked_array(
data=np.ones(process_tile.shape),
mask=np.ones(process_tile.shape),
)
mp.config.output.write(process_tile, data)
assert path_exists(mp.config.output.path)
assert cog_validate(mp.config.output.path, strict=True)
| 36.396761 | 86 | 0.628699 | [
"MIT"
] | Scartography/mapchete | test/test_formats_geotiff.py | 17,980 | Python |
# Copyright (c) 2012, Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import re
import pep8
"""
Guidelines for writing new hacking checks
- Use only for Manila specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range M3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the M3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to manila/tests/test_hacking.py
"""
UNDERSCORE_IMPORT_FILES = []
log_translation = re.compile(
r"(.)*LOG\.(audit|error|info|critical|exception)\(\s*('|\")")
log_translation_LC = re.compile(
r"(.)*LOG\.(critical)\(\s*(_\(|'|\")")
log_translation_LE = re.compile(
r"(.)*LOG\.(error|exception)\(\s*(_\(|'|\")")
log_translation_LI = re.compile(
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
log_translation_LW = re.compile(
r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")")
translated_log = re.compile(
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)"
"\(\s*_\(\s*('|\")")
string_translation = re.compile(r"[^_]*_\(\s*('|\")")
underscore_import_check = re.compile(r"(.)*import _(.)*")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
oslo_namespace_imports = re.compile(r"from[\s]*oslo[.](.*)")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
CHECK_DESC = 'No check message specified'
def __init__(self, tree, filename):
"""This object is created automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
* Use filename so we can start enforcing this in specific folders instead
of needing to do so all at once.
M319
"""
if logical_line.startswith("LOG.debug(_("):
yield(0, "M319 Don't translate debug level logs")
def validate_log_translations(logical_line, physical_line, filename):
# Translations are not required in the test and tempest
# directories.
if ("manila/tests" in filename or "manila_tempest_tests" in filename or
"contrib/tempest" in filename):
return
if pep8.noqa(physical_line):
return
msg = "M327: LOG.critical messages require translations `_LC()`!"
if log_translation_LC.match(logical_line):
yield (0, msg)
msg = ("M328: LOG.error and LOG.exception messages require translations "
"`_LE()`!")
if log_translation_LE.match(logical_line):
yield (0, msg)
msg = "M329: LOG.info messages require translations `_LI()`!"
if log_translation_LI.match(logical_line):
yield (0, msg)
msg = "M330: LOG.warning messages require translations `_LW()`!"
if log_translation_LW.match(logical_line):
yield (0, msg)
msg = "M331: Log messages require translations!"
if log_translation.match(logical_line):
yield (0, msg)
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif (translated_log.match(logical_line) or
string_translation.match(logical_line)):
yield(0, "M323: Found use of _() without explicit import of _ !")
class CheckForStrExc(BaseASTChecker):
"""Checks for the use of str() on an exception.
This currently only handles the case where str() is used in
the scope of an exception handler. If the exception is passed
into a function, returned from an assertRaises, or used on an
exception created in the same scope, this does not catch it.
"""
CHECK_DESC = ('M325 str() cannot be used on an exception. '
'Remove or use six.text_type()')
def __init__(self, tree, filename):
super(CheckForStrExc, self).__init__(tree, filename)
self.name = []
self.already_checked = []
def visit_TryExcept(self, node):
for handler in node.handlers:
if handler.name:
self.name.append(handler.name.id)
super(CheckForStrExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrExc, self).generic_visit(node)
def visit_Call(self, node):
if self._check_call_names(node, ['str']):
if node not in self.already_checked:
self.already_checked.append(node)
if isinstance(node.args[0], ast.Name):
if node.args[0].id in self.name:
self.add_error(node.args[0])
super(CheckForStrExc, self).generic_visit(node)
class CheckForTransAdd(BaseASTChecker):
"""Checks for the use of concatenation on a translated string.
Translations should not be concatenated with other strings, but
should instead include the string being added to the translated
string to give the translators the most information.
"""
CHECK_DESC = ('M326 Translated messages cannot be concatenated. '
'String should be included in translated message.')
TRANS_FUNC = ['_', '_LI', '_LW', '_LE', '_LC']
def visit_BinOp(self, node):
if isinstance(node.op, ast.Add):
if self._check_call_names(node.left, self.TRANS_FUNC):
self.add_error(node.left)
elif self._check_call_names(node.right, self.TRANS_FUNC):
self.add_error(node.right)
super(CheckForTransAdd, self).generic_visit(node)
def check_oslo_namespace_imports(logical_line, physical_line, filename):
if pep8.noqa(physical_line):
return
if re.match(oslo_namespace_imports, logical_line):
msg = ("M333: '%s' must be used instead of '%s'.") % (
logical_line.replace('oslo.', 'oslo_'),
logical_line)
yield(0, msg)
def dict_constructor_with_list_copy(logical_line):
msg = ("M336: Must use a dict comprehension instead of a dict constructor"
" with a sequence of key-value pairs."
)
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
def factory(register):
register(validate_log_translations)
register(check_explicit_underscore_import)
register(no_translate_debug_logs)
register(CheckForStrExc)
register(CheckForTransAdd)
register(check_oslo_namespace_imports)
register(dict_constructor_with_list_copy)
| 36.551587 | 78 | 0.670828 | [
"Apache-2.0"
] | scality/manila | manila/hacking/checks.py | 9,211 | Python |
# -*-coding:Utf-8 -*
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la volonté TenirGouvernail"""
import re
from secondaires.navigation.equipage.ordres.tenir_gouvernail import \
TenirGouvernail as OrdreTenirGouvernail
from secondaires.navigation.equipage.ordres.long_deplacer import LongDeplacer
from secondaires.navigation.equipage.volonte import Volonte
class TenirGouvernail(Volonte):
"""Classe représentant une volonté.
Cette volonté choisit un matelot pour tenir le gouvernail
du navire.
"""
cle = "tenir_gouvernail"
ordre_court = re.compile(r"^tg$", re.I)
ordre_long = re.compile(r"^tenir\s+gouvernail?$", re.I)
def choisir_matelots(self, exception=None):
"""Retourne le matelot le plus apte à accomplir la volonté."""
proches = []
matelots = self.navire.equipage.get_matelots_libres(exception)
graph = self.navire.graph
gouvernail = self.navire.gouvernail
if gouvernail is None or gouvernail.tenu is not None:
return None
for matelot in matelots:
origine = matelot.salle.mnemonic
destination = gouvernail.parent.mnemonic
if origine == destination:
proches.append((matelot, [], gouvernail))
else:
chemin = graph.get((origine, destination))
if chemin:
proches.append((matelot, chemin, gouvernail))
proches = sorted([couple for couple in proches],
key=lambda couple: len(couple[1]))
if proches:
return proches[0]
return None
def executer(self, sequence):
"""Exécute la volonté."""
if sequence is None:
self.terminer()
return
matelot, sorties, gouvernail = sequence
navire = self.navire
ordres = []
if sorties:
aller = LongDeplacer(matelot, navire, *sorties)
ordres.append(aller)
tenir = OrdreTenirGouvernail(matelot, navire)
ordres.append(tenir)
self.ajouter_ordres(matelot, ordres)
def crier_ordres(self, personnage):
"""On fait crier l'ordre au personnage."""
msg = "{} s'écrie : un homme à la barre !".format(
personnage.distinction_audible)
self.navire.envoyer(msg)
@classmethod
def extraire_arguments(cls, navire):
"""Extrait les arguments de la volonté."""
return ()
| 37.857143 | 79 | 0.686289 | [
"BSD-3-Clause"
] | stormi/tsunami | src/secondaires/navigation/equipage/volontes/tenir_gouvernail.py | 3,986 | Python |
#https://blog.csdn.net/orangefly0214/article/details/81387077
import MultiTemplate
from MultiTemplate import TaskTemplate
# https://blog.csdn.net/u013812710/article/details/72886491
# https://blog.csdn.net/ismr_m/article/details/53100896
#https://blog.csdn.net/bcfdsagbfcisbg/article/details/78134172
import kubernetes
import os
import influxdb
import time
import yaml
def check_path(name):
train_dir = os.path.join('/tfdata/k8snfs/', name)
print(train_dir)
if not os.path.exists(train_dir):
os.makedirs(train_dir)
return train_dir
def check_ns(name):
kubernetes.config.load_kube_config()
v1 = kubernetes.client.CoreV1Api()
# v1.create_namespace()
exist_ns = v1.list_namespace()
exist_ns_name = []
for i in exist_ns.items:
exist_ns_name.append(i.metadata.name)
if name in exist_ns_name:
return True
else:
return False
class SubTask():
def __init__(self,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag):
self.template_id = template_id
self.ps_replicas = ps_replicas
self.worker_replicas = worker_replicas
self.training_step = training_step
self.interval = interval
self.batch_size = batch_size
self.task_id = task_id
self.tag = tag
self.rtimes = rtimes
self.influx_client = influxdb.InfluxDBClient(host='192.168.128.10',port=8086,username='admin',password='admin',database="NODEMESSAGE")
self.node_list = ['k8s-master','k8s-worker0','k8s-worker2','k8sworker1','k8s-worker3','k8s-worker4','k8s-worker5']
#self.node_list = ['k8s-master','k8s-worker0','k8s-worker2','k8sworker1']
self.node_cpu = {}
self.node_cpu['k8s-master'] = 32000
self.node_cpu['k8s-worker0'] = 24000
self.node_cpu['k8s-worker2'] = 24000
self.node_cpu['k8sworker1'] = 16000
self.node_cpu['k8s-worker3'] = 24000
self.node_cpu['k8s-worker4'] = 16000
self.node_cpu['k8s-worker5'] = 24000
self.node_memory = {}
self.node_memory['k8s-master'] = float(251*1024)
self.node_memory['k8s-worker0'] = float(94*1024)
self.node_memory['k8s-worker2'] = float(94*1024)
self.node_memory['k8sworker1'] = float(125*1024)
self.node_memory['k8s-worker3'] = float(94 * 1024)
self.node_memory['k8s-worker4'] = float(125 * 1024)
self.node_memory['k8s-worker5'] = float(94 * 1024)
self.args = ['--training_step='+str(self.training_step),'--batch_size='+str(self.batch_size),'--interval='+str(self.interval),'--task_id='+str(self.task_id),'--rtimes='+str(self.rtimes),"--tag="+self.tag]
class VGGTask(SubTask):
def __init__(self,v1,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag,channel1,channel2,channel3,channel4,channel5,num_layer1,num_layer2,num_layer3,num_layer4,num_layer5):
SubTask.__init__(self,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag)
self.channel1 = channel1
self.channel2 = channel2
self.channel3 = channel3
self.channel4 = channel4
self.channel5 = channel5
self.num_layer1 = num_layer1
self.num_layer2 = num_layer2
self.num_layer3 = num_layer3
self.num_layer4 = num_layer4
self.num_layer5 = num_layer5
self.num_layers = num_layer1+num_layer2+num_layer3+num_layer4+num_layer5+3
self.template = TaskTemplate.VGG
self.v1 = v1
self.name = 'vgg-'+str(self.task_id)+'-'+str(self.rtimes)
def get_node_list(self):
node_list = [i.metadata.name for i in self.v1.list_node().items]
return node_list
def make_args(self):
self.args.append('--channel1='+str(self.channel1))
self.args.append('--channel2='+str(self.channel2))
self.args.append('--channel3='+str(self.channel3))
self.args.append('--channel4='+str(self.channel4))
self.args.append('--channel5='+str(self.channel5))
self.args.append('--num_layer1='+str(self.num_layer1))
self.args.append('--num_layer2='+str(self.num_layer2))
self.args.append('--num_layer3='+str(self.num_layer3))
self.args.append('--num_layer4='+str(self.num_layer4))
self.args.append('--num_layer5='+str(self.num_layer5))
self.args.append('--num_layers='+str(self.num_layers))
def create_tf(self):
name = 'vgg-'+str(self.task_id)+'-'+str(self.rtimes)
ns_body = TaskTemplate.NS
ns_body['metadata']['name'] = name
if not check_ns(name):
self.v1.create_namespace(ns_body)
train_dir = check_path(name)
time.sleep(12)
result = self.influx_client.query("select * from "+"NODEMESSAGE"+" group by nodes order by desc limit 3")
node_list = self.get_node_list()
result_keys = result.keys()
nodes = [i[-1]['nodes'] for i in result_keys]
node_mg = [list(result[i]) for i in result_keys]
cpu_base = {}
memory_base = {}
point_base = {}
point_base_list = []
for i in range(len(node_mg)):
cpu_base[nodes[i]] = 0
memory_base[nodes[i]] = 0
point_base[nodes[i]] = 0.0
for j in range(len(node_mg[0])):
cpu_base[nodes[i]] += node_mg[i][j]['cpu']
memory_base[nodes[i]] += node_mg[i][j]['memory']
cpu_base[nodes[i]] = (cpu_base[nodes[i]] / len(node_mg[0]))/self.node_cpu[nodes[i]]
memory_base[nodes[i]] = (memory_base[nodes[i]] / len(node_mg[0])) / self.node_memory[nodes[i]]
tmp = cpu_base[nodes[i]]*0.6+memory_base[nodes[i]]*0.4
point_base[nodes[i]] = tmp
point_base_list.append(tmp)
list.sort(point_base_list)
for key in nodes:
command = 'kubectl label nodes '+key+' woksch-'
os.system(command)
command2 = 'kubectl label nodes '+key+' wokpro-'
os.system(command2)
nod_prori = point_base_list.index(point_base[key])
priori = ' wokpro=%d' % nod_prori
command3 = 'kubectl label nodes '+key+priori
os.system(command3)
if cpu_base[key] <= 0.57 and memory_base[key] <= 0.6:
command = 'kubectl label nodes '+key+' woksch=true'
os.system(command)
else:
command = 'kubectl label nodes ' + key + ' woksch=false'
os.system(command)
self.template['metadata']['name'] = name
self.template['metadata']['namespace'] = name
self.template['spec']['tfReplicaSpecs']['PS']['replicas'] = self.ps_replicas
self.template['spec']['tfReplicaSpecs']['Worker']['replicas'] = self.worker_replicas
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.make_args()
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['args'] = self.args[:]
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['args'] = self.args[:]
log_dir = '/tfdata/tfcnn/expjob/'
# f = open(log_dir+str(name)+'.yaml', "w")
f = open(log_dir + str(name) + '.yaml', "w")
yaml.dump(self.template, f)
f.close()
response = os.system('kubectl create -f '+log_dir+str(name)+'.yaml')
if response == 0:
print('create task sucess')
else:
print("Error code:"+str(response))
def delete_tf(self):
name = 'vgg-'+str(self.task_id)+'-'+str(self.rtimes)
log_dir = '/tfdata/tfcnn/expjob/'
response = os.system('kubectl delete -f ' + log_dir + str(name) + '.yaml')
if response == 0:
print('delete task sucess')
else:
print("Error code:" + str(response))
self.v1.delete_namespace(name=name)
class RESTask(SubTask):
def __init__(self,v1,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag,bottle,layer1,layer2,layer3,layer4,channel1,channel2,channel3,channel4):
SubTask.__init__(self,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag)
self.channel1 = channel1
self.channel2 = channel2
self.channel3 = channel3
self.channel4 = channel4
self.bottle = bottle
self.layer1 = layer1
self.layer2 = layer2
self.layer3 = layer3
self.layer4 = layer4
self.name = 'res-'+str(self.task_id)+'-'+str(self.rtimes)
if self.bottle == 1:
self.num_layers = 3*(layer1+layer4+layer3+layer2)+2
else:
self.num_layers = 2 * (layer1 + layer4 + layer3 + layer2) + 2
self.template = TaskTemplate.RES
self.v1 = v1
def get_node_list(self):
node_list = [i.metadata.name for i in self.v1.list_node().items]
return node_list
def make_args(self):
self.args.append('--bottle=' + str(self.bottle))
self.args.append('--channel1='+str(self.channel1))
self.args.append('--channel2='+str(self.channel2))
self.args.append('--channel3='+str(self.channel3))
self.args.append('--channel4='+str(self.channel4))
self.args.append('--layer1='+str(self.layer1))
self.args.append('--layer2='+str(self.layer2))
self.args.append('--layer3='+str(self.layer3))
self.args.append('--layer4='+str(self.layer4))
def create_tf(self):
name = 'res-'+str(self.task_id)+'-'+str(self.rtimes)
ns_body = TaskTemplate.NS
ns_body['metadata']['name'] = name
if not check_ns(name):
self.v1.create_namespace(ns_body)
train_dir = check_path(name)
time.sleep(12)
result = self.influx_client.query("select * from " + "NODEMESSAGE" + " group by nodes order by desc limit 3")
node_list = self.get_node_list()
result_keys = result.keys()
nodes = [i[-1]['nodes'] for i in result_keys]
node_mg = [list(result[i]) for i in result_keys]
cpu_base = {}
memory_base = {}
point_base = {}
point_base_list = []
for i in range(len(node_mg)):
cpu_base[nodes[i]] = 0
memory_base[nodes[i]] = 0
point_base[nodes[i]] = 0.0
for j in range(len(node_mg[0])):
cpu_base[nodes[i]] += node_mg[i][j]['cpu']
memory_base[nodes[i]] += node_mg[i][j]['memory']
cpu_base[nodes[i]] = (cpu_base[nodes[i]] / len(node_mg[0])) / self.node_cpu[nodes[i]]
memory_base[nodes[i]] = (memory_base[nodes[i]] / len(node_mg[0])) / self.node_memory[nodes[i]]
tmp = cpu_base[nodes[i]] * 0.6 + memory_base[nodes[i]] * 0.4
point_base[nodes[i]] = tmp
point_base_list.append(tmp)
list.sort(point_base_list)
for key in nodes:
command = 'kubectl label nodes ' + key + ' woksch-'
os.system(command)
command2 = 'kubectl label nodes ' + key + ' wokpro-'
os.system(command2)
nod_prori = point_base_list.index(point_base[key])
priori = ' wokpro=%d' % nod_prori
command3 = 'kubectl label nodes ' + key + priori
os.system(command3)
if cpu_base[key] <= 0.6 and memory_base[key] <= 0.6:
command = 'kubectl label nodes ' + key + ' woksch=true'
os.system(command)
else:
command = 'kubectl label nodes ' + key + ' woksch=false'
os.system(command)
self.template['metadata']['name'] = name
self.template['metadata']['namespace'] = name
self.template['spec']['tfReplicaSpecs']['PS']['replicas'] = self.ps_replicas
self.template['spec']['tfReplicaSpecs']['Worker']['replicas'] = self.worker_replicas
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.make_args()
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['args'] = self.args[:]
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['args'] = self.args[:]
log_dir = '/tfdata/tfcnn/expjob/'
f = open(log_dir+str(name)+'.yaml', "w")
yaml.dump(self.template, f)
f.close()
response = os.system('kubectl create -f '+log_dir+str(name)+'.yaml')
if response == 0:
print('create task sucess')
else:
print("Error code:"+str(response))
def delete_tf(self):
name = 'res-'+str(self.task_id)+'-'+str(self.rtimes)
log_dir = '/tfdata/tfcnn/expjob/'
response = os.system('kubectl delete -f ' + log_dir + str(name) + '.yaml')
if response == 0:
print('delete task sucess')
else:
print("Error code:" + str(response))
self.v1.delete_namespace(name=name)
class RETask(SubTask):
def __init__(self,v1,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag,stack,channel1,channel2,channel3,channel4):
SubTask.__init__(self,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag)
self.channel1 = channel1
self.channel2 = channel2
self.channel3 = channel3
self.channel4 = channel4
self.stack = stack
self.num_layers = 6*self.stack+2
self.template = TaskTemplate.RE
self.name = 're-'+str(self.task_id)+'-'+str(self.rtimes)
self.v1 = v1
def get_node_list(self):
node_list = [i.metadata.name for i in self.v1.list_node().items]
return node_list
def make_args(self):
self.args.append('--stack='+str(self.stack))
self.args.append('--channel1='+str(self.channel1))
self.args.append('--channel2='+str(self.channel2))
self.args.append('--channel3='+str(self.channel3))
self.args.append('--channel4='+str(self.channel4))
def create_tf(self):
name = 're-'+str(self.task_id)+'-'+str(self.rtimes)
ns_body = TaskTemplate.NS
ns_body['metadata']['name'] = name
if not check_ns(name):
self.v1.create_namespace(ns_body)
train_dir = check_path(name)
time.sleep(12)
result = self.influx_client.query("select * from " + "NODEMESSAGE" + " group by nodes order by desc limit 3")
node_list = self.get_node_list()
result_keys = result.keys()
nodes = [i[-1]['nodes'] for i in result_keys]
node_mg = [list(result[i]) for i in result_keys]
cpu_base = {}
memory_base = {}
point_base = {}
point_base_list = []
for i in range(len(node_mg)):
cpu_base[nodes[i]] = 0
memory_base[nodes[i]] = 0
point_base[nodes[i]] = 0.0
for j in range(len(node_mg[0])):
cpu_base[nodes[i]] += node_mg[i][j]['cpu']
memory_base[nodes[i]] += node_mg[i][j]['memory']
cpu_base[nodes[i]] = (cpu_base[nodes[i]] / len(node_mg[0])) / self.node_cpu[nodes[i]]
memory_base[nodes[i]] = (memory_base[nodes[i]] / len(node_mg[0])) / self.node_memory[nodes[i]]
tmp = cpu_base[nodes[i]] * 0.6 + memory_base[nodes[i]] * 0.4
point_base[nodes[i]] = tmp
point_base_list.append(tmp)
list.sort(point_base_list)
for key in nodes:
command = 'kubectl label nodes ' + key + ' woksch-'
os.system(command)
command2 = 'kubectl label nodes ' + key + ' wokpro-'
os.system(command2)
nod_prori = point_base_list.index(point_base[key])
priori = ' wokpro=%d' % nod_prori
command3 = 'kubectl label nodes ' + key + priori
os.system(command3)
if cpu_base[key] <= 0.6 and memory_base[key] <= 0.6:
command = 'kubectl label nodes ' + key + ' woksch=true'
os.system(command)
else:
command = 'kubectl label nodes ' + key + ' woksch=false'
os.system(command)
self.template['metadata']['name'] = name
self.template['metadata']['namespace'] = name
self.template['spec']['tfReplicaSpecs']['PS']['replicas'] = self.ps_replicas
self.template['spec']['tfReplicaSpecs']['Worker']['replicas'] = self.worker_replicas
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.make_args()
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['args'] = self.args[:]
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['args'] = self.args[:]
log_dir = '/tfdata/tfcnn/expjob/'
f = open(log_dir+str(name)+'.yaml', "w")
yaml.dump(self.template, f)
f.close()
response = os.system('kubectl create -f '+log_dir+str(name)+'.yaml')
if response == 0:
print('create task sucess')
else:
print("Error code:"+str(response))
def delete_tf(self):
name = 're-'+str(self.task_id)+'-'+str(self.rtimes)
log_dir = '/tfdata/tfcnn/expjob/'
response = os.system('kubectl delete -f ' + log_dir + str(name) + '.yaml')
if response == 0:
print('delete task sucess')
else:
print("Error code:" + str(response))
self.v1.delete_namespace(name=name)
class XCETask(SubTask):
def __init__(self,v1,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag,repeat,channel1,channel2,channel3,channel4,channel5,channel6,channel7,channel8):
SubTask.__init__(self,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag)
self.channel1 = channel1
self.channel2 = channel2
self.channel3 = channel3
self.channel4 = channel4
self.channel5 = channel5
self.channel6 = channel6
self.channel7 = channel7
self.channel8 = channel8
self.repeat = repeat
self.template = TaskTemplate.XCEPTION
self.v1 = v1
self.name = 'xception-'+str(self.task_id)+'-'+str(self.rtimes)
def get_node_list(self):
node_list = [i.metadata.name for i in self.v1.list_node().items]
return node_list
def make_args(self):
self.args.append('--repeat='+str(self.repeat))
self.args.append('--channel1='+str(self.channel1))
self.args.append('--channel2='+str(self.channel2))
self.args.append('--channel3='+str(self.channel3))
self.args.append('--channel4='+str(self.channel4))
self.args.append('--channel5=' + str(self.channel5))
self.args.append('--channel6=' + str(self.channel6))
self.args.append('--channel7=' + str(self.channel7))
self.args.append('--channel8=' + str(self.channel8))
def create_tf(self):
name = 'xception-'+str(self.task_id)+'-'+str(self.rtimes)
ns_body = TaskTemplate.NS
ns_body['metadata']['name'] = name
if not check_ns(name):
self.v1.create_namespace(ns_body)
train_dir = check_path(name)
time.sleep(12)
result = self.influx_client.query("select * from " + "NODEMESSAGE" + " group by nodes order by desc limit 3")
node_list = self.get_node_list()
result_keys = result.keys()
nodes = [i[-1]['nodes'] for i in result_keys]
node_mg = [list(result[i]) for i in result_keys]
cpu_base = {}
memory_base = {}
point_base = {}
point_base_list = []
for i in range(len(node_mg)):
cpu_base[nodes[i]] = 0
memory_base[nodes[i]] = 0
point_base[nodes[i]] = 0.0
for j in range(len(node_mg[0])):
cpu_base[nodes[i]] += node_mg[i][j]['cpu']
memory_base[nodes[i]] += node_mg[i][j]['memory']
cpu_base[nodes[i]] = (cpu_base[nodes[i]] / len(node_mg[0])) / self.node_cpu[nodes[i]]
memory_base[nodes[i]] = (memory_base[nodes[i]] / len(node_mg[0])) / self.node_memory[nodes[i]]
tmp = cpu_base[nodes[i]] * 0.6 + memory_base[nodes[i]] * 0.4
point_base[nodes[i]] = tmp
point_base_list.append(tmp)
list.sort(point_base_list)
for key in nodes:
command = 'kubectl label nodes ' + key + ' woksch-'
os.system(command)
command2 = 'kubectl label nodes ' + key + ' wokpro-'
os.system(command2)
nod_prori = point_base_list.index(point_base[key])
priori = ' wokpro=%d' % nod_prori
command3 = 'kubectl label nodes ' + key + priori
os.system(command3)
if cpu_base[key] <= 0.6 and memory_base[key] <= 0.6:
command = 'kubectl label nodes ' + key + ' woksch=true'
os.system(command)
else:
command = 'kubectl label nodes ' + key + ' woksch=false'
os.system(command)
self.template['metadata']['name'] = name
self.template['metadata']['namespace'] = name
self.template['spec']['tfReplicaSpecs']['PS']['replicas'] = self.ps_replicas
self.template['spec']['tfReplicaSpecs']['Worker']['replicas'] = self.worker_replicas
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.make_args()
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['args'] = self.args[:]
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['args'] = self.args[:]
log_dir = '/tfdata/tfcnn/expjob/'
f = open(log_dir+str(name)+'.yaml', "w")
yaml.dump(self.template, f)
f.close()
response = os.system('kubectl create -f '+log_dir+str(name)+'.yaml')
if response == 0:
print('create task sucess')
else:
print("Error code:"+str(response))
def delete_tf(self):
name = 'xception-'+str(self.task_id)+'-'+str(self.rtimes)
log_dir = '/tfdata/tfcnn/expjob/'
response = os.system('kubectl delete -f ' + log_dir + str(name) + '.yaml')
if response == 0:
print('delete task sucess')
else:
print("Error code:" + str(response))
self.v1.delete_namespace(name=name)
class DENTask(SubTask):
def __init__(self,v1,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag,L,k,BC):
SubTask.__init__(self,template_id,ps_replicas,worker_replicas,training_step,batch_size,interval,task_id,rtimes,tag)
self.L = L
self.k = k
self.BC = BC
self.template = TaskTemplate.DEN
self.v1 = v1
self.name = 'den-'+str(self.task_id)+'-'+str(self.rtimes)
def get_node_list(self):
node_list = [i.metadata.name for i in self.v1.list_node().items]
return node_list
def make_args(self):
self.args.append('--L='+str(self.L))
self.args.append('--k='+str(self.k))
self.args.append('--BC='+str(self.BC))
def create_tf(self):
name = 'den-'+str(self.task_id)+'-'+str(self.rtimes)
ns_body = TaskTemplate.NS
ns_body['metadata']['name'] = name
if not check_ns(name):
self.v1.create_namespace(ns_body)
train_dir = check_path(name)
time.sleep(12)
result = self.influx_client.query("select * from " + "NODEMESSAGE" + " group by nodes order by desc limit 3")
node_list = self.get_node_list()
result_keys = result.keys()
nodes = [i[-1]['nodes'] for i in result_keys]
node_mg = [list(result[i]) for i in result_keys]
cpu_base = {}
memory_base = {}
point_base = {}
point_base_list = []
for i in range(len(node_mg)):
cpu_base[nodes[i]] = 0
memory_base[nodes[i]] = 0
point_base[nodes[i]] = 0.0
for j in range(len(node_mg[0])):
cpu_base[nodes[i]] += node_mg[i][j]['cpu']
memory_base[nodes[i]] += node_mg[i][j]['memory']
cpu_base[nodes[i]] = (cpu_base[nodes[i]] / len(node_mg[0])) / self.node_cpu[nodes[i]]
memory_base[nodes[i]] = (memory_base[nodes[i]] / len(node_mg[0])) / self.node_memory[nodes[i]]
tmp = cpu_base[nodes[i]] * 0.6 + memory_base[nodes[i]] * 0.4
point_base[nodes[i]] = tmp
point_base_list.append(tmp)
list.sort(point_base_list)
for key in nodes:
command = 'kubectl label nodes ' + key + ' woksch-'
os.system(command)
command2 = 'kubectl label nodes ' + key + ' wokpro-'
os.system(command2)
nod_prori = point_base_list.index(point_base[key])
priori = ' wokpro=%d' % nod_prori
command3 = 'kubectl label nodes ' + key + priori
os.system(command3)
if cpu_base[key] <= 0.6 and memory_base[key] <= 0.6:
command = 'kubectl label nodes ' + key + ' woksch=true'
os.system(command)
else:
command = 'kubectl label nodes ' + key + ' woksch=false'
os.system(command)
self.template['metadata']['name'] = name
self.template['metadata']['namespace'] = name
self.template['spec']['tfReplicaSpecs']['PS']['replicas'] = self.ps_replicas
self.template['spec']['tfReplicaSpecs']['Worker']['replicas'] = self.worker_replicas
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['volumes'][0]['hostPath']['path'] = train_dir
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['volumeMounts'][0]['name'] = name
self.make_args()
self.template['spec']['tfReplicaSpecs']['PS']['template']['spec']['containers'][0]['args'] = self.args[:]
self.template['spec']['tfReplicaSpecs']['Worker']['template']['spec']['containers'][0]['args'] = self.args[:]
log_dir = '/tfdata/tfcnn/expjob/'
f = open(log_dir+str(name)+'.yaml', "w")
yaml.dump(self.template, f)
f.close()
response = os.system('kubectl create -f '+log_dir+str(name)+'.yaml')
if response == 0:
print('create task sucess')
else:
print("Error code:"+str(response))
def delete_tf(self):
name = 'den-'+str(self.task_id)+'-'+str(self.rtimes)
log_dir = '/tfdata/tfcnn/expjob/'
response = os.system('kubectl delete -f ' + log_dir + str(name) + '.yaml')
if response == 0:
print('delete task sucess')
else:
print("Error code:" + str(response))
self.v1.delete_namespace(name=name)
if __name__ == '__main__':
kubernetes.config.load_kube_config()
v1 = kubernetes.client.CoreV1Api()
# v1.create_namespace()
v1.list_namespace()
check_path('ceshi')
# vgg = VGGTask(1,2,4,80,1.0,2,1,"ms",32,64,128,256,512,2,3,3,4,4)
# vgg.create_tf()
| 46.155488 | 219 | 0.594986 | [
"Apache-2.0"
] | qore-dl/qore-dl-code | experiment code/CPU Experiments Code/task_submit_save.py | 30,278 | Python |
# -*- coding: utf-8 -*-
from datetime import datetime
import time
import unittest
from webapp2_caffeine.cache import CacheContainer
from webapp2_caffeine.cache import flush
class DummyCache(CacheContainer):
key = 'dummy_cache'
@property
def fresh_value(self):
return datetime.now()
class CacheContainerTest(unittest.TestCase):
def setUp(self):
flush()
def tearDown(self):
flush()
def test_fresh_value(self):
container = CacheContainer()
with self.assertRaises(NotImplementedError):
container.fresh_value
def test_set(self):
container = CacheContainer()
with self.assertRaises(ValueError):
container.set('my value')
container = DummyCache()
value, expiration = container.set('my value')
self.assertEqual(value, 'my value')
self.assertTrue(21000 < expiration - time.time() < 21600)
self.assertEqual(container.get(), 'my value')
def test_get(self):
container = DummyCache()
self.assertEqual(container.get(), None)
container.set('my value', 1000)
self.assertEqual(container.get(), None)
container.set('my value')
self.assertEqual(container.get(), 'my value')
def test_delete(self):
container = DummyCache()
container.set('my value')
container.delete()
self.assertEqual(container.get(), None)
def test_update(self):
container = DummyCache()
container.update()
self.assertTrue(container.get())
def test_value(self):
container = DummyCache()
old_value = container.value
self.assertTrue(old_value)
self.assertTrue(container.value, old_value)
| 26.119403 | 65 | 0.641143 | [
"Apache-2.0"
] | gvigneron/webapp2_caffeine | tests/test_cache.py | 1,750 | Python |
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Ttm Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.blocktools import create_block, create_coinbase, get_masternode_payment
from test_framework.mininode import P2PDataStore, network_thread_start
from test_framework.messages import CTxOut, FromHex, CCbTx, CTransaction, ToHex
from test_framework.script import CScript
from test_framework.test_framework import TtmTestFramework
from test_framework.util import assert_equal, get_bip9_status, hex_str_to_bytes
'''
feature_block_reward_reallocation.py
Checks block reward reallocation correctness
'''
class BlockRewardReallocationTest(TtmTestFramework):
def set_test_params(self):
self.set_ttm_test_params(2, 1, fast_dip3_enforcement=True)
self.set_ttm_dip8_activation(450)
# 536870912 == 0x20000000, i.e. not signalling for anything
def create_test_block(self, version=536870912):
self.bump_mocktime(5)
bt = self.nodes[0].getblocktemplate()
tip = int(bt['previousblockhash'], 16)
nextheight = bt['height']
coinbase = create_coinbase(nextheight)
coinbase.nVersion = 3
coinbase.nType = 5 # CbTx
coinbase.vout[0].nValue = bt['coinbasevalue']
for mn in bt['masternode']:
coinbase.vout.append(CTxOut(mn['amount'], CScript(hex_str_to_bytes(mn['script']))))
coinbase.vout[0].nValue -= mn['amount']
cbtx = FromHex(CCbTx(), bt['coinbase_payload'])
coinbase.vExtraPayload = cbtx.serialize()
coinbase.rehash()
coinbase.calc_sha256()
block = create_block(tip, coinbase, self.mocktime)
block.nVersion = version
# Add quorum commitments from template
for tx in bt['transactions']:
tx2 = FromHex(CTransaction(), tx['data'])
if tx2.nType == 6:
block.vtx.append(tx2)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
return block
def signal(self, num_blocks, expected_lockin):
self.log.info("Signal with %d/500 blocks" % (num_blocks))
# create and send non-signalling blocks
for i in range(500 - num_blocks):
test_block = self.create_test_block()
self.nodes[0].submitblock(ToHex(test_block))
# generate at most 10 signaling blocks at a time
if num_blocks > 0:
for i in range((num_blocks - 1) // 10):
self.bump_mocktime(10)
self.nodes[0].generate(10)
self.nodes[0].generate((num_blocks - 1) % 10)
assert_equal(get_bip9_status(self.nodes[0], 'realloc')['status'], 'started')
self.nodes[0].generate(1)
if expected_lockin:
assert_equal(get_bip9_status(self.nodes[0], 'realloc')['status'], 'locked_in')
else:
assert_equal(get_bip9_status(self.nodes[0], 'realloc')['status'], 'started')
def threshold(self, attempt):
threshold_calc = 400 - attempt * attempt
if threshold_calc < 300:
return 300
return threshold_calc
def run_test(self):
self.log.info("Wait for DIP3 to activate")
while get_bip9_status(self.nodes[0], 'dip0003')['status'] != 'active':
self.bump_mocktime(10)
self.nodes[0].generate(10)
self.nodes[0].add_p2p_connection(P2PDataStore())
network_thread_start()
self.nodes[0].p2p.wait_for_verack()
self.log.info("Mine all but one remaining block in the window")
bi = self.nodes[0].getblockchaininfo()
for i in range(498 - bi['blocks']):
self.bump_mocktime(1)
self.nodes[0].generate(1)
self.log.info("Initial state is DEFINED")
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 498)
assert_equal(bi['bip9_softforks']['realloc']['status'], 'defined')
self.log.info("Advance from DEFINED to STARTED at height = 499")
self.nodes[0].generate(1)
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 499)
assert_equal(bi['bip9_softforks']['realloc']['status'], 'started')
assert_equal(bi['bip9_softforks']['realloc']['statistics']['threshold'], self.threshold(0))
self.signal(399, False) # 1 block short
self.log.info("Still STARTED but new threshold should be lower at height = 999")
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 999)
assert_equal(bi['bip9_softforks']['realloc']['statistics']['threshold'], self.threshold(1))
self.signal(398, False) # 1 block short again
self.log.info("Still STARTED but new threshold should be even lower at height = 1499")
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 1499)
assert_equal(bi['bip9_softforks']['realloc']['statistics']['threshold'], self.threshold(2))
pre_locked_in_blockhash = bi['bestblockhash']
self.signal(396, True) # just enough to lock in
self.log.info("Advanced to LOCKED_IN at height = 1999")
for i in range(49):
self.bump_mocktime(10)
self.nodes[0].generate(10)
self.nodes[0].generate(9)
self.log.info("Still LOCKED_IN at height = 2498")
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 2498)
assert_equal(bi['bip9_softforks']['realloc']['status'], 'locked_in')
self.log.info("Advance from LOCKED_IN to ACTIVE at height = 2499")
self.nodes[0].generate(1) # activation
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 2499)
assert_equal(bi['bip9_softforks']['realloc']['status'], 'active')
assert_equal(bi['bip9_softforks']['realloc']['since'], 2500)
self.log.info("Reward split should stay ~50/50 before the first superblock after activation")
# This applies even if reallocation was activated right at superblock height like it does here
bt = self.nodes[0].getblocktemplate()
assert_equal(bt['height'], 2500)
assert_equal(bt['masternode'][0]['amount'], get_masternode_payment(bt['height'], bt['coinbasevalue'], 2500))
self.nodes[0].generate(9)
bt = self.nodes[0].getblocktemplate()
assert_equal(bt['masternode'][0]['amount'], get_masternode_payment(bt['height'], bt['coinbasevalue'], 2500))
assert_equal(bt['coinbasevalue'], 13748571607)
assert_equal(bt['masternode'][0]['amount'], 6874285801) # 0.4999999998
self.log.info("Reallocation should kick-in with the superblock mined at height = 2010")
for period in range(19): # there will be 19 adjustments, 3 superblocks long each
for i in range(3):
self.bump_mocktime(10)
self.nodes[0].generate(10)
bt = self.nodes[0].getblocktemplate()
assert_equal(bt['masternode'][0]['amount'], get_masternode_payment(bt['height'], bt['coinbasevalue'], 2500))
self.log.info("Reward split should reach ~60/40 after reallocation is done")
assert_equal(bt['coinbasevalue'], 10221599170)
assert_equal(bt['masternode'][0]['amount'], 6132959502) # 0.6
self.log.info("Reward split should stay ~60/40 after reallocation is done")
for period in range(10): # check 10 next superblocks
self.bump_mocktime(10)
self.nodes[0].generate(10)
bt = self.nodes[0].getblocktemplate()
assert_equal(bt['masternode'][0]['amount'], get_masternode_payment(bt['height'], bt['coinbasevalue'], 2500))
assert_equal(bt['coinbasevalue'], 9491484944)
assert_equal(bt['masternode'][0]['amount'], 5694890966) # 0.6
# make sure all nodes are still synced
self.sync_all()
self.log.info("Rollback the chain back to the STARTED state")
self.mocktime = self.nodes[0].getblock(pre_locked_in_blockhash, 1)['time']
for node in self.nodes:
node.invalidateblock(pre_locked_in_blockhash)
# create and send non-signalling block
test_block = self.create_test_block()
self.nodes[0].submitblock(ToHex(test_block))
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 1499)
assert_equal(bi['bip9_softforks']['realloc']['status'], 'started')
assert_equal(bi['bip9_softforks']['realloc']['statistics']['threshold'], self.threshold(2))
self.log.info("Check thresholds reach min level and stay there")
for i in range(8): # 7 to reach min level and 1 more to check it doesn't go lower than that
self.signal(0, False) # no need to signal
bi = self.nodes[0].getblockchaininfo()
assert_equal(bi['blocks'], 1999 + i * 500)
assert_equal(bi['bip9_softforks']['realloc']['status'], 'started')
assert_equal(bi['bip9_softforks']['realloc']['statistics']['threshold'], self.threshold(i + 3))
assert_equal(bi['bip9_softforks']['realloc']['statistics']['threshold'], 300)
if __name__ == '__main__':
BlockRewardReallocationTest().main()
| 45.975369 | 124 | 0.645773 | [
"MIT"
] | mytitanium/Titanium-Core-1.0 | test/functional/feature_block_reward_reallocation.py | 9,333 | Python |
from unittest import mock
import pytest
from rest_framework.serializers import ValidationError
from drf_recaptcha.client import RecaptchaResponse
from drf_recaptcha.validators import ReCaptchaV2Validator, ReCaptchaV3Validator
@pytest.mark.parametrize(
("validator_class", "params"),
[
(ReCaptchaV2Validator, {}),
(ReCaptchaV3Validator, {"action": "test_action", "required_score": 0.4}),
],
)
def test_recaptcha_validator_get_response_success(validator_class, params):
validator = validator_class(**params)
assert isinstance(validator.get_response("test_token"), RecaptchaResponse)
@pytest.mark.parametrize(
("validator_class", "params"),
[
(ReCaptchaV2Validator, {}),
(ReCaptchaV3Validator, {"action": "test_action", "required_score": 0.4}),
],
)
def test_recaptcha_validator_get_response_fail(validator_class, params):
validator = validator_class(**params)
assert isinstance(validator.get_response("test_token"), RecaptchaResponse)
@pytest.mark.parametrize(
("validator_class", "params", "response"),
[
(ReCaptchaV2Validator, {}, RecaptchaResponse(is_valid=True)),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(
is_valid=True, extra_data={"score": 0.6, "action": "test_action"}
),
),
],
)
def test_recaptcha_validator_call_success(validator_class, params, response):
validator = validator_class(**params)
validator.get_response = mock.Mock(return_value=response)
try:
validator("test_token")
except ValidationError:
pytest.fail("Validation is not passed")
@pytest.mark.parametrize(
("validator_class", "params", "response", "error"),
[
(
ReCaptchaV2Validator,
{},
RecaptchaResponse(is_valid=False),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
(
ReCaptchaV2Validator,
{},
RecaptchaResponse(
is_valid=True, extra_data={"score": 0.6, "action": "test_action"}
),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_error')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(is_valid=False),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(is_valid=True),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_error')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(is_valid=True, extra_data={"score": 0.3}),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(is_valid=True, extra_data={"score": 0.5}),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
(
ReCaptchaV3Validator,
{"action": "test_action", "required_score": 0.4},
RecaptchaResponse(
is_valid=True, extra_data={"score": 0.5, "action": "other_action"}
),
"[ErrorDetail(string='Error verifying reCAPTCHA, please try again.', code='captcha_invalid')]",
),
],
)
def test_recaptcha_validator_call_fail(validator_class, params, response, error):
validator = validator_class(**params)
validator.get_response = mock.Mock(return_value=response)
with pytest.raises(ValidationError) as exc_info:
validator("test_token")
assert str(exc_info.value) == error
@pytest.mark.parametrize(
("validator_class", "params"),
[
(ReCaptchaV2Validator, {}),
(ReCaptchaV3Validator, {"action": "test_action", "required_score": 0.4}),
],
)
def test_recaptcha_validator_set_context(validator_class, params, settings):
settings.DRF_RECAPTCHA_TESTING = True
validator = validator_class(**params)
assert validator.recaptcha_client_ip == ""
serializer_field = mock.Mock(
context={"request": mock.Mock(META={"HTTP_X_FORWARDED_FOR": "4.3.2.1"})}
)
validator("test_token", serializer_field)
assert validator.recaptcha_client_ip == "4.3.2.1"
| 34.630435 | 107 | 0.634442 | [
"MIT"
] | finhold72/recaptcha | tests/test_validator.py | 4,779 | Python |
import os
from datetime import datetime
import numpy as np
import xarray as xr
from pyoos.collectors.usgs.usgs_rest import UsgsRest
from pyoos.parsers.waterml import WaterML11ToPaegan
def get_usgs_data(station_id, start_date, end_date, parameter="00060", cache_dir=None):
"""Get river discharge data from the USGS REST web service.
See `U.S. Geological Survey Water Services
<https://waterservices.usgs.gov/>`_ (USGS)
Parameters
----------
station_id : str
The station id to get
start_date : str
String for start date in the format: 'YYYY-MM-dd', e.g. '1980-01-01'
end_date : str
String for start date in the format: 'YYYY-MM-dd', e.g. '2018-12-31'
parameter : str
The parameter code to get, e.g. ('00060') discharge, cubic feet per second
cache_dir : str
Directory where files retrieved from the web service are cached.
If set to None then USGS_DATA_HOME env var will be used as cache directory.
Examples
--------
>>> from ewatercycle.observation.usgs import get_usgs_data
>>> data = get_usgs_data('03109500', '2000-01-01', '2000-12-31', cache_dir='.')
>>> data
<xarray.Dataset>
Dimensions: (time: 8032)
Coordinates:
* time (time) datetime64[ns] 2000-01-04T05:00:00 ... 2000-12-23T04:00:00
Data variables:
Streamflow (time) float32 8.296758 10.420501 ... 10.647034 11.694747
Attributes:
title: USGS Data from streamflow data
station: Little Beaver Creek near East Liverpool OH
stationid: 03109500
location: (40.6758974, -80.5406244)
""" # noqa: E501
if cache_dir is None:
cache_dir = os.environ["USGS_DATA_HOME"]
# Check if we have the netcdf data
netcdf = os.path.join(
cache_dir,
"USGS_"
+ station_id
+ "_"
+ parameter
+ "_"
+ start_date
+ "_"
+ end_date
+ ".nc",
)
if os.path.exists(netcdf):
return xr.open_dataset(netcdf)
# Download the data if needed
out = os.path.join(
cache_dir,
"USGS_"
+ station_id
+ "_"
+ parameter
+ "_"
+ start_date
+ "_"
+ end_date
+ ".wml",
)
if not os.path.exists(out):
collector = UsgsRest()
collector.filter(
start=datetime.strptime(start_date, "%Y-%m-%d"),
end=datetime.strptime(end_date, "%Y-%m-%d"),
variables=[parameter],
features=[station_id],
)
data = collector.raw()
with open(out, "w") as file:
file.write(data)
collector.clear()
else:
with open(out, "r") as file:
data = file.read()
# Convert the raw data to an xarray
data = WaterML11ToPaegan(data).feature
# We expect only 1 station
if len(data.elements) == 0:
raise ValueError("Data does not contain any station data")
else:
station = data.elements[0]
# Unit conversion from cubic feet to cubic meter per second
values = np.array(
[float(point.members[0]["value"]) / 35.315 for point in station.elements],
dtype=np.float32,
)
times = [point.time for point in station.elements]
attrs = {
"units": "cubic meters per second",
}
# Create the xarray dataset
ds = xr.Dataset(
{"streamflow": (["time"], values, attrs)}, coords={"time": times}
)
# Set some nice attributes
ds.attrs["title"] = "USGS Data from streamflow data"
ds.attrs["station"] = station.name
ds.attrs["stationid"] = station.get_uid()
ds.attrs["location"] = (station.location.y, station.location.x)
ds.to_netcdf(netcdf)
return ds
| 30.271318 | 89 | 0.576184 | [
"Apache-2.0"
] | cffbots/ewatercycle | src/ewatercycle/observation/usgs.py | 3,905 | Python |
#!/usr/bin/python
#
# Copyright 2018-2020 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.1.7
Contact: contact@polyaxon.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from polyaxon_sdk.api_client import ApiClient
from polyaxon_sdk.exceptions import ApiTypeError, ApiValueError # noqa: F401
class UsersV1Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_token(self, body, **kwargs): # noqa: E501
"""Create token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_token(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1Token body: Token body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Token
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.create_token_with_http_info(body, **kwargs) # noqa: E501
def create_token_with_http_info(self, body, **kwargs): # noqa: E501
"""Create token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_token_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1Token body: Token body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_token" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `body` when calling `create_token`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/users/tokens",
"POST",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1Token", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def delete_token(self, uuid, **kwargs): # noqa: E501
"""Delete token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_token(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: UUid of the namespace (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.delete_token_with_http_info(uuid, **kwargs) # noqa: E501
def delete_token_with_http_info(self, uuid, **kwargs): # noqa: E501
"""Delete token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_token_with_http_info(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: UUid of the namespace (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["uuid"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_token" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'uuid' is set
if self.api_client.client_side_validation and (
"uuid" not in local_var_params
or local_var_params["uuid"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `uuid` when calling `delete_token`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "uuid" in local_var_params:
path_params["uuid"] = local_var_params["uuid"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/users/tokens/{uuid}",
"DELETE",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_token(self, uuid, **kwargs): # noqa: E501
"""Get token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_token(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: UUid of the namespace (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Token
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.get_token_with_http_info(uuid, **kwargs) # noqa: E501
def get_token_with_http_info(self, uuid, **kwargs): # noqa: E501
"""Get token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_token_with_http_info(uuid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str uuid: UUid of the namespace (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["uuid"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_token" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'uuid' is set
if self.api_client.client_side_validation and (
"uuid" not in local_var_params
or local_var_params["uuid"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `uuid` when calling `get_token`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "uuid" in local_var_params:
path_params["uuid"] = local_var_params["uuid"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/users/tokens/{uuid}",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1Token", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def get_user(self, **kwargs): # noqa: E501
"""Get current user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_user(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1User
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.get_user_with_http_info(**kwargs) # noqa: E501
def get_user_with_http_info(self, **kwargs): # noqa: E501
"""Get current user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_user_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1User, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = []
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_user" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/users",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1User", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def list_tokens(self, **kwargs): # noqa: E501
"""List tokens # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_tokens(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search search.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1ListTokenResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.list_tokens_with_http_info(**kwargs) # noqa: E501
def list_tokens_with_http_info(self, **kwargs): # noqa: E501
"""List tokens # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_tokens_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int offset: Pagination offset.
:param int limit: Limit size.
:param str sort: Sort to order the search.
:param str query: Query filter the search search.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1ListTokenResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["offset", "limit", "sort", "query"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_tokens" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
collection_formats = {}
path_params = {}
query_params = []
if (
"offset" in local_var_params and local_var_params["offset"] is not None
): # noqa: E501
query_params.append(("offset", local_var_params["offset"])) # noqa: E501
if (
"limit" in local_var_params and local_var_params["limit"] is not None
): # noqa: E501
query_params.append(("limit", local_var_params["limit"])) # noqa: E501
if (
"sort" in local_var_params and local_var_params["sort"] is not None
): # noqa: E501
query_params.append(("sort", local_var_params["sort"])) # noqa: E501
if (
"query" in local_var_params and local_var_params["query"] is not None
): # noqa: E501
query_params.append(("query", local_var_params["query"])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/users/tokens",
"GET",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1ListTokenResponse", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def patch_token(self, token_uuid, body, **kwargs): # noqa: E501
"""Patch token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_token(token_uuid, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str token_uuid: UUID (required)
:param V1Token body: Token body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Token
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.patch_token_with_http_info(token_uuid, body, **kwargs) # noqa: E501
def patch_token_with_http_info(self, token_uuid, body, **kwargs): # noqa: E501
"""Patch token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_token_with_http_info(token_uuid, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str token_uuid: UUID (required)
:param V1Token body: Token body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["token_uuid", "body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_token" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'token_uuid' is set
if self.api_client.client_side_validation and (
"token_uuid" not in local_var_params
or local_var_params["token_uuid"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `token_uuid` when calling `patch_token`"
) # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `body` when calling `patch_token`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "token_uuid" in local_var_params:
path_params["token.uuid"] = local_var_params["token_uuid"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/users/tokens/{token.uuid}",
"PATCH",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1Token", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def patch_user(self, body, **kwargs): # noqa: E501
"""Patch current user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_user(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1User body: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1User
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.patch_user_with_http_info(body, **kwargs) # noqa: E501
def patch_user_with_http_info(self, body, **kwargs): # noqa: E501
"""Patch current user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_user_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1User body: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1User, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_user" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `body` when calling `patch_user`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/users",
"PATCH",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1User", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def update_token(self, token_uuid, body, **kwargs): # noqa: E501
"""Update token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_token(token_uuid, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str token_uuid: UUID (required)
:param V1Token body: Token body (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1Token
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.update_token_with_http_info(
token_uuid, body, **kwargs
) # noqa: E501
def update_token_with_http_info(self, token_uuid, body, **kwargs): # noqa: E501
"""Update token # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_token_with_http_info(token_uuid, body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str token_uuid: UUID (required)
:param V1Token body: Token body (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1Token, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["token_uuid", "body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_token" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'token_uuid' is set
if self.api_client.client_side_validation and (
"token_uuid" not in local_var_params
or local_var_params["token_uuid"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `token_uuid` when calling `update_token`"
) # noqa: E501
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `body` when calling `update_token`"
) # noqa: E501
collection_formats = {}
path_params = {}
if "token_uuid" in local_var_params:
path_params["token.uuid"] = local_var_params["token_uuid"] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/users/tokens/{token.uuid}",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1Token", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
def update_user(self, body, **kwargs): # noqa: E501
"""Update current user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1User body: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1User
If the method is called asynchronously,
returns the request thread.
"""
kwargs["_return_http_data_only"] = True
return self.update_user_with_http_info(body, **kwargs) # noqa: E501
def update_user_with_http_info(self, body, **kwargs): # noqa: E501
"""Update current user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_user_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param V1User body: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1User, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ["body"]
all_params.extend(
[
"async_req",
"_return_http_data_only",
"_preload_content",
"_request_timeout",
]
)
for key, val in six.iteritems(local_var_params["kwargs"]):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_user" % key
)
local_var_params[key] = val
del local_var_params["kwargs"]
# verify the required parameter 'body' is set
if self.api_client.client_side_validation and (
"body" not in local_var_params
or local_var_params["body"] is None # noqa: E501
): # noqa: E501
raise ApiValueError(
"Missing the required parameter `body` when calling `update_user`"
) # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if "body" in local_var_params:
body_params = local_var_params["body"]
# HTTP header `Accept`
header_params["Accept"] = self.api_client.select_header_accept(
["application/json"]
) # noqa: E501
# HTTP header `Content-Type`
header_params[
"Content-Type"
] = self.api_client.select_header_content_type( # noqa: E501
["application/json"]
) # noqa: E501
# Authentication setting
auth_settings = ["ApiKey"] # noqa: E501
return self.api_client.call_api(
"/api/v1/users",
"PUT",
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type="V1User", # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get("async_req"),
_return_http_data_only=local_var_params.get(
"_return_http_data_only"
), # noqa: E501
_preload_content=local_var_params.get("_preload_content", True),
_request_timeout=local_var_params.get("_request_timeout"),
collection_formats=collection_formats,
)
| 39.867399 | 89 | 0.569815 | [
"Apache-2.0"
] | deeplearning2012/polyaxon | sdks/python/http_client/v1/polyaxon_sdk/api/users_v1_api.py | 47,203 | Python |
#!/home/anitha/Track/virtual/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 24.333333 | 42 | 0.780822 | [
"Unlicense"
] | Anitha987/Hood-Track | virtual/bin/django-admin.py | 146 | Python |
from model.contact import Contact
import random
def test_delete_some_contact(app, db, check_ui):
if app.contacts.count() == 0:
app.contacts.create_new_contact(Contact(firstname="crab"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
app.contacts.delete_contact_by_id(contact.id)
new_contacts = db.get_contact_list()
assert len(old_contacts) - 1 == app.contacts.count()
old_contacts.remove(contact)
assert old_contacts == new_contacts
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contacts.get_contacts_list(), key=Contact.id_or_max)
| 36 | 125 | 0.739198 | [
"Apache-2.0"
] | winsok/pythonlearning | test/test_contactdeletetest.py | 648 | Python |
import concurrent.futures
import datetime
import io
import logging
import os
import random
import time
import typing as t
import discord
import discord.ext.commands as commands
from PIL import Image, ImageDraw, ImageSequence, ImageFont
import bot.extensions as ext
from bot.consts import Colors
from bot.messaging.events import Events
log = logging.getLogger(__name__)
MAX_WALDO_GRID_SIZE = 100
CRAB_LINE_LENGTH = 58
CRAB_COMMAND_COOLDOWN = 3
def pillow_process(args, is_rave, lines_in_text, timestamp):
# Open crab.gif and add our font
with Image.open('bot/cogs/memes_cog/assets/crab.gif') as im:
fnt = ImageFont.truetype('bot/cogs/memes_cog/assets/LemonMilk.otf', 11)
# Draw text on each frame of the gif
# Gonna be honest I don't quite understand how it works but I got it from the Pillow docs/issues
frames = []
for frame in ImageSequence.Iterator(im):
d = ImageDraw.Draw(frame)
w, h = d.textsize(args, fnt)
# draws the text on to the frame. Tries to center horizontally and tries to go as close to the bottom as possible
d.text((im.size[0] / 2 - w / 2, im.size[1] - h - (5 * lines_in_text)), args, font=fnt, align='center',
stroke_width=bool(is_rave), stroke_fill=Colors.ClemsonOrange, spacing=6)
del d
b = io.BytesIO()
frame.save(b, format='GIF')
frame = Image.open(b)
frames.append(frame)
frames[0].save(f'bot/cogs/memes_cog/assets/out_{timestamp}.gif', save_all=True, append_images=frames[1:])
class MemesCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@ext.command()
@ext.long_help(
'A fun command to generate a pseudo bubblewrap effect in discord'
)
@ext.short_help('Creates bubblewrap!')
@ext.example('bubblewrap')
async def bubblewrap(self, ctx):
msg = ''
for _ in range(0, 5):
for _ in range(0, 10):
msg += '||pop!|| '
msg += '\n'
await ctx.send(msg)
@commands.command()
@ext.long_help(
'A fun command to generate a wheres waldo effect in discord, see if you can find him first!'
'Optionally takes a size parameter to make it easier or harder'
)
@ext.short_help('Can you find him?')
@ext.example(('waldo', 'waldo 10'))
async def waldo(self, ctx, size=MAX_WALDO_GRID_SIZE):
"""
Play Where's Waldo!
Usage: <prefix>waldo [size = 100]
"""
random_start_letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'X',
'Y', 'Z']
max_waldo_line_size = 6
new_line_waldo_chance = 10
msg = ''
count = 0
place = random.randint(0, size)
for i in range(size + 1):
if i == place:
msg += '||`WALDO`|| '
count += 1
else:
helper = random.randint(0, len(random_start_letters) - 1)
letter = random_start_letters[helper]
msg += f'||`{letter}ALDO`|| '
count += 1
new_line = random.randint(0, 100)
if new_line < new_line_waldo_chance or count > max_waldo_line_size:
msg += '\n'
count = 0
await ctx.send(msg)
@ext.command()
@ext.chainable()
@ext.long_help(
'A fun command to spongebob meme text in discord'
)
@ext.short_help('sO yOu doNt KnOw wHat tHiS Is?')
@ext.example('spongebob hello world')
async def spongebob(self, ctx, *, args):
"""
Spongebob Text
"""
random.seed(time.time())
args = args.replace('"', "'")
result = ''
for i in args:
helper = random.randint(0, 100)
if helper > 60:
result += str(i).upper()
else:
result += str(i).lower()
await ctx.send(result)
@ext.command(aliases=['rave', '🦀'])
@commands.cooldown(1, CRAB_COMMAND_COOLDOWN, commands.BucketType.guild)
@ext.long_help(
'A fun command to generate a crab rave gif with specified text overlay'
)
@ext.short_help('Generates a crab rave gif')
@ext.chainable_input()
@ext.example('crab hello from crab world')
async def crab(self, ctx, is_rave: t.Optional[bool] = True, *, args='Bottom text\n is dead'):
"""
Create your own crab rave.
Usage: <prefix>crab [is_rave=True] [text=Bottom text\\n is dead]
Aliases: rave, 🦀
"""
# crab.gif dimensions - 352 by 200
# Immediately grab the timestamp incase of multiple calls in a row
timestamp = datetime.datetime.utcnow().microsecond
wait_msg = await ctx.send('Generating your gif')
args = args.replace('\\', '')
# Add new lines for when the text would go out of bounds
lines_in_text = 1
while len(args) > (CRAB_LINE_LENGTH * lines_in_text):
newline_loc = CRAB_LINE_LENGTH * lines_in_text
# I didn't want to add a newline in the middle of a word
while not args[newline_loc].isspace():
newline_loc -= 1
if newline_loc == CRAB_LINE_LENGTH * (lines_in_text - 1):
newline_loc = CRAB_LINE_LENGTH * lines_in_text
break
args = f'{args[:newline_loc]} \n{args[newline_loc:]}'
lines_in_text += 1
loop = self.bot.loop
with concurrent.futures.ProcessPoolExecutor() as pool:
pil_args = (args, is_rave, lines_in_text, timestamp)
await loop.run_in_executor(pool, pillow_process, *pil_args)
# Attach, send, and delete created gif
attachment = discord.File(filename=f'out_{timestamp}.gif', fp=f'bot/cogs/memes_cog/assets/out_{timestamp}.gif')
msg = await ctx.send(file=attachment)
await self.bot.messenger.publish(Events.on_set_deletable, msg=msg, author=ctx.author)
await wait_msg.delete()
os.remove(f'bot/cogs/memes_cog/assets/out_{timestamp}.gif')
@ext.command(hidden=True, aliases=['ctray', 'trayforjay'])
async def cookouttray(self, ctx, input):
"""
For those who do finances with cookout trays, we proudly present the command for you
Simply type one of the following:
cookouttray
ctray
trayforjay
Followed by a monetary value such as (leave off the dollar sign):
20
100
3.14
To have it converted into cookout trays
Examples:
cookouttray 20
ctray 100
trayforjay 3.14
Clicking the link "Cash to Cookout Tray Converter" in the output will also take you to cookout's website
"""
money = round(float(input), 2)
output = money / 5
embed = discord.Embed(
title='Cash to Cookout Tray Converter',
description=f'{ctx.message.author.mention} ${money} is approximately {output} cookout trays',
url=f"https://www.fastfoodmenuprices.com/cookout-prices/",
color=Colors.ClemsonOrange)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(MemesCog(bot))
| 35.462963 | 147 | 0.565144 | [
"MIT"
] | Cavesprout/ClemBot | bot/cogs/memes_cog/memes_cog.py | 7,666 | Python |
#! /usr/bin/env python3
# -*- coding: utf8 -*-
# Virtual dancers that consumes real GigglePixel packets
#
# To use, start this up and then bring up a server broadcasting GigglePixel.
# When this receives a palette packet, the dancing pair (whose humble wearables
# are only capable of displaying one color at a time apiece) will light up
# to match the first two elements of the packet received. When an ID packet
# is received, they will shout their love of the sender.
PORT = 7016
import socket
import sys
from time import time
from x256 import x256
from udp import *
WHITE = '\033[0m'
RGB1 = None
RGB2 = None
banner = "Yay"
note = u'♪'
face = u'(・o・)'
# Print without newline
def p(s):
sys.stdout.write(s)
# Return a two-element array showing current arm position, and toggle it for next time
arm_phase = False
def arms():
global arm_phase
arm_phase = not arm_phase
if arm_phase:
return u'┏┛'
else:
return u'┗┓'
# Take an RGB value and return an ANSI escape sequence to show it in the terminal
def color(rgb):
if rgb is None:
return ""
ix = x256.from_rgb(*rgb)
return "\033[38;5;%dm" % ix
# Draw the dancers
def draw():
l, r = arms()
p (color(RGB1) + l + face + r + WHITE + ' ' + note + ' ')
l, r = arms()
p (color(RGB2) + l + face + r + WHITE + " -" + banner + "!")
p ("\n\033[1A") # Keep drawing over and over on the same line
def handle_packet(gp):
global banner
global RGB1
global RGB2
if gp is None: return
if gp.packet_type == "PALETTE":
entries = gp.payload["entries"]
if len(entries) < 1:
return
elif len(entries) == 1:
entries.extend(entries)
RGB1 = (entries[0]["red"], entries[0]["green"], entries[0]["blue"])
RGB2 = (entries[1]["red"], entries[1]["green"], entries[1]["blue"])
elif gp.packet_type == "ID":
banner = "We love " + gp.payload["name"]
next_dance = time()
listener = GigglePixelListener()
try:
while True:
draw()
now = time()
time_left = next_dance - now
gp = None
if time_left > 0:
gp = listener.get_packet(time_left)
handle_packet(gp)
if gp is None:
next_dance = time() + 1
arms() # Toggle arm positions
except KeyboardInterrupt:
print (WHITE)
| 24.195652 | 86 | 0.649146 | [
"MIT"
] | playasystems/hacks | python-lib/example-consumer.py | 2,240 | Python |
from __future__ import unicode_literals
import io
import os
import re
import sys
from botocore.awsrequest import AWSPreparedRequest
from moto.core.utils import (
amzn_request_id,
str_to_rfc_1123_datetime,
py2_strip_unicode_keys,
)
from urllib.parse import (
parse_qs,
parse_qsl,
urlparse,
unquote,
urlencode,
urlunparse,
)
import xmltodict
from moto.packages.httpretty.core import HTTPrettyRequest
from moto.core.responses import _TemplateEnvironmentMixin, ActionAuthenticatorMixin
from moto.core.utils import path_url
from moto.core import ACCOUNT_ID
from moto.settings import S3_IGNORE_SUBDOMAIN_BUCKETNAME
from moto.s3bucket_path.utils import (
bucket_name_from_url as bucketpath_bucket_name_from_url,
parse_key_name as bucketpath_parse_key_name,
is_delete_keys as bucketpath_is_delete_keys,
)
from .exceptions import (
BucketAlreadyExists,
BucketMustHaveLockeEnabled,
DuplicateTagKeys,
InvalidContentMD5,
InvalidContinuationToken,
S3ClientError,
MissingBucket,
MissingKey,
MissingVersion,
InvalidMaxPartArgument,
InvalidPartOrder,
MalformedXML,
MalformedACLError,
IllegalLocationConstraintException,
InvalidNotificationARN,
InvalidNotificationEvent,
ObjectNotInActiveTierError,
NoSystemTags,
PreconditionFailed,
InvalidRange,
LockNotEnabled,
)
from .models import (
s3_backend,
get_canned_acl,
FakeGrantee,
FakeGrant,
FakeAcl,
FakeKey,
)
from .utils import (
bucket_name_from_url,
clean_key_name,
metadata_from_headers,
parse_region_from_url,
)
from xml.dom import minidom
DEFAULT_REGION_NAME = "us-east-1"
ACTION_MAP = {
"BUCKET": {
"HEAD": {"DEFAULT": "HeadBucket",},
"GET": {
"uploads": "ListBucketMultipartUploads",
"location": "GetBucketLocation",
"lifecycle": "GetLifecycleConfiguration",
"versioning": "GetBucketVersioning",
"policy": "GetBucketPolicy",
"website": "GetBucketWebsite",
"acl": "GetBucketAcl",
"tagging": "GetBucketTagging",
"logging": "GetBucketLogging",
"cors": "GetBucketCORS",
"notification": "GetBucketNotification",
"accelerate": "GetAccelerateConfiguration",
"versions": "ListBucketVersions",
"public_access_block": "GetPublicAccessBlock",
"DEFAULT": "ListBucket",
},
"PUT": {
"lifecycle": "PutLifecycleConfiguration",
"versioning": "PutBucketVersioning",
"policy": "PutBucketPolicy",
"website": "PutBucketWebsite",
"acl": "PutBucketAcl",
"tagging": "PutBucketTagging",
"logging": "PutBucketLogging",
"cors": "PutBucketCORS",
"notification": "PutBucketNotification",
"accelerate": "PutAccelerateConfiguration",
"public_access_block": "PutPublicAccessBlock",
"DEFAULT": "CreateBucket",
},
"DELETE": {
"lifecycle": "PutLifecycleConfiguration",
"policy": "DeleteBucketPolicy",
"website": "DeleteBucketWebsite",
"tagging": "PutBucketTagging",
"cors": "PutBucketCORS",
"public_access_block": "DeletePublicAccessBlock",
"DEFAULT": "DeleteBucket",
},
},
"KEY": {
"HEAD": {"DEFAULT": "HeadObject",},
"GET": {
"uploadId": "ListMultipartUploadParts",
"acl": "GetObjectAcl",
"tagging": "GetObjectTagging",
"versionId": "GetObjectVersion",
"DEFAULT": "GetObject",
},
"PUT": {
"acl": "PutObjectAcl",
"tagging": "PutObjectTagging",
"DEFAULT": "PutObject",
},
"DELETE": {
"uploadId": "AbortMultipartUpload",
"versionId": "DeleteObjectVersion",
"DEFAULT": "DeleteObject",
},
"POST": {
"uploads": "PutObject",
"restore": "RestoreObject",
"uploadId": "PutObject",
},
},
"CONTROL": {
"GET": {"publicAccessBlock": "GetPublicAccessBlock"},
"PUT": {"publicAccessBlock": "PutPublicAccessBlock"},
"DELETE": {"publicAccessBlock": "DeletePublicAccessBlock"},
},
}
def parse_key_name(pth):
# strip the first '/' left by urlparse
return pth[1:] if pth.startswith("/") else pth
def is_delete_keys(request, path, bucket_name):
# GOlang sends a request as url/?delete= (treating it as a normal key=value, even if the value is empty)
# Python sends a request as url/?delete (treating it as a flag)
# https://github.com/spulec/moto/issues/2937
return (
path == "/?delete"
or path == "/?delete="
or (path == "/" and getattr(request, "query_string", "") == "delete")
)
class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin):
def __init__(self, backend):
super(ResponseObject, self).__init__()
self.backend = backend
self.method = ""
self.path = ""
self.data = {}
self.headers = {}
@property
def should_autoescape(self):
return True
def all_buckets(self):
self.data["Action"] = "ListAllMyBuckets"
self._authenticate_and_authorize_s3_action()
# No bucket specified. Listing all buckets
all_buckets = self.backend.list_buckets()
template = self.response_template(S3_ALL_BUCKETS)
return template.render(buckets=all_buckets)
def subdomain_based_buckets(self, request):
if S3_IGNORE_SUBDOMAIN_BUCKETNAME:
return False
host = request.headers.get("host", request.headers.get("Host"))
if not host:
host = urlparse(request.url).netloc
if (
not host
or host.startswith("localhost")
or host.startswith("localstack")
or re.match(r"^[^.]+$", host)
or re.match(r"^.*\.svc\.cluster\.local:?\d*$", host)
):
# Default to path-based buckets for (1) localhost, (2) localstack hosts (e.g. localstack.dev),
# (3) local host names that do not contain a "." (e.g., Docker container host names), or
# (4) kubernetes host names
return False
match = re.match(r"^([^\[\]:]+)(:\d+)?$", host)
if match:
match = re.match(
r"((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}", match.groups()[0]
)
if match:
return False
match = re.match(r"^\[(.+)\](:\d+)?$", host)
if match:
match = re.match(
r"^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)([\dA-F]{1,4}(\3|:\b)|\2){5}(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z",
match.groups()[0],
re.IGNORECASE,
)
if match:
return False
path_based = host == "s3.amazonaws.com" or re.match(
r"s3[\.\-]([^.]*)\.amazonaws\.com", host
)
return not path_based
def is_delete_keys(self, request, path, bucket_name):
if self.subdomain_based_buckets(request):
return is_delete_keys(request, path, bucket_name)
else:
return bucketpath_is_delete_keys(request, path, bucket_name)
def parse_bucket_name_from_url(self, request, url):
if self.subdomain_based_buckets(request):
return bucket_name_from_url(url)
else:
return bucketpath_bucket_name_from_url(url)
def parse_key_name(self, request, url):
if self.subdomain_based_buckets(request):
return parse_key_name(url)
else:
return bucketpath_parse_key_name(url)
def ambiguous_response(self, request, full_url, headers):
# Depending on which calling format the client is using, we don't know
# if this is a bucket or key request so we have to check
if self.subdomain_based_buckets(request):
return self.key_or_control_response(request, full_url, headers)
else:
# Using path-based buckets
return self.bucket_response(request, full_url, headers)
@amzn_request_id
def bucket_response(self, request, full_url, headers):
self.method = request.method
self.path = self._get_path(request)
self.headers = request.headers
if "host" not in self.headers:
self.headers["host"] = urlparse(full_url).netloc
try:
response = self._bucket_response(request, full_url, headers)
except S3ClientError as s3error:
response = s3error.code, {}, s3error.description
return self._send_response(response)
@staticmethod
def _send_response(response):
if isinstance(response, str):
return 200, {}, response.encode("utf-8")
else:
status_code, headers, response_content = response
if not isinstance(response_content, bytes):
response_content = response_content.encode("utf-8")
return status_code, headers, response_content
def _bucket_response(self, request, full_url, headers):
querystring = self._get_querystring(full_url)
method = request.method
region_name = parse_region_from_url(full_url)
bucket_name = self.parse_bucket_name_from_url(request, full_url)
if not bucket_name:
# If no bucket specified, list all buckets
return self.all_buckets()
self.data["BucketName"] = bucket_name
if hasattr(request, "body"):
# Boto
body = request.body
else:
# Flask server
body = request.data
if body is None:
body = b""
if isinstance(body, bytes):
body = body.decode("utf-8")
body = "{0}".format(body).encode("utf-8")
if method == "HEAD":
return self._bucket_response_head(bucket_name, querystring)
elif method == "GET":
return self._bucket_response_get(bucket_name, querystring)
elif method == "PUT":
return self._bucket_response_put(
request, body, region_name, bucket_name, querystring
)
elif method == "DELETE":
return self._bucket_response_delete(body, bucket_name, querystring)
elif method == "POST":
return self._bucket_response_post(request, body, bucket_name)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
@staticmethod
def _get_querystring(full_url):
parsed_url = urlparse(full_url)
querystring = parse_qs(parsed_url.query, keep_blank_values=True)
return querystring
def _bucket_response_head(self, bucket_name, querystring):
self._set_action("BUCKET", "HEAD", querystring)
self._authenticate_and_authorize_s3_action()
try:
self.backend.head_bucket(bucket_name)
except MissingBucket:
# Unless we do this, boto3 does not raise ClientError on
# HEAD (which the real API responds with), and instead
# raises NoSuchBucket, leading to inconsistency in
# error response between real and mocked responses.
return 404, {}, ""
return 200, {}, ""
def _bucket_response_get(self, bucket_name, querystring):
self._set_action("BUCKET", "GET", querystring)
self._authenticate_and_authorize_s3_action()
if "object-lock" in querystring:
(
lock_enabled,
mode,
days,
years,
) = self.backend.get_object_lock_configuration(bucket_name)
template = self.response_template(S3_BUCKET_LOCK_CONFIGURATION)
return template.render(
lock_enabled=lock_enabled, mode=mode, days=days, years=years,
)
if "uploads" in querystring:
for unsup in ("delimiter", "max-uploads"):
if unsup in querystring:
raise NotImplementedError(
"Listing multipart uploads with {} has not been implemented yet.".format(
unsup
)
)
multiparts = list(self.backend.get_all_multiparts(bucket_name).values())
if "prefix" in querystring:
prefix = querystring.get("prefix", [None])[0]
multiparts = [
upload
for upload in multiparts
if upload.key_name.startswith(prefix)
]
template = self.response_template(S3_ALL_MULTIPARTS)
return template.render(bucket_name=bucket_name, uploads=multiparts)
elif "location" in querystring:
location = self.backend.get_bucket_location(bucket_name)
template = self.response_template(S3_BUCKET_LOCATION)
# us-east-1 is different - returns a None location
if location == DEFAULT_REGION_NAME:
location = None
return template.render(location=location)
elif "lifecycle" in querystring:
rules = self.backend.get_bucket_lifecycle(bucket_name)
if not rules:
template = self.response_template(S3_NO_LIFECYCLE)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_LIFECYCLE_CONFIGURATION)
return template.render(rules=rules)
elif "versioning" in querystring:
versioning = self.backend.get_bucket_versioning(bucket_name)
template = self.response_template(S3_BUCKET_GET_VERSIONING)
return template.render(status=versioning)
elif "policy" in querystring:
policy = self.backend.get_bucket_policy(bucket_name)
if not policy:
template = self.response_template(S3_NO_POLICY)
return 404, {}, template.render(bucket_name=bucket_name)
return 200, {}, policy
elif "website" in querystring:
website_configuration = self.backend.get_bucket_website_configuration(
bucket_name
)
if not website_configuration:
template = self.response_template(S3_NO_BUCKET_WEBSITE_CONFIG)
return 404, {}, template.render(bucket_name=bucket_name)
return 200, {}, website_configuration
elif "acl" in querystring:
acl = self.backend.get_bucket_acl(bucket_name)
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return template.render(acl=acl)
elif "tagging" in querystring:
tags = self.backend.get_bucket_tagging(bucket_name)["Tags"]
# "Special Error" if no tags:
if len(tags) == 0:
template = self.response_template(S3_NO_BUCKET_TAGGING)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_OBJECT_TAGGING_RESPONSE)
return template.render(tags=tags)
elif "logging" in querystring:
logging = self.backend.get_bucket_logging(bucket_name)
if not logging:
template = self.response_template(S3_NO_LOGGING_CONFIG)
return 200, {}, template.render()
template = self.response_template(S3_LOGGING_CONFIG)
return 200, {}, template.render(logging=logging)
elif "cors" in querystring:
cors = self.backend.get_bucket_cors(bucket_name)
if len(cors) == 0:
template = self.response_template(S3_NO_CORS_CONFIG)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_BUCKET_CORS_RESPONSE)
return template.render(cors=cors)
elif "notification" in querystring:
notification_configuration = self.backend.get_bucket_notification_configuration(
bucket_name
)
if not notification_configuration:
return 200, {}, ""
template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG)
return template.render(config=notification_configuration)
elif "accelerate" in querystring:
bucket = self.backend.get_bucket(bucket_name)
if bucket.accelerate_configuration is None:
template = self.response_template(S3_BUCKET_ACCELERATE_NOT_SET)
return 200, {}, template.render()
template = self.response_template(S3_BUCKET_ACCELERATE)
return template.render(bucket=bucket)
elif "publicAccessBlock" in querystring:
public_block_config = self.backend.get_public_access_block(bucket_name)
template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION)
return template.render(public_block_config=public_block_config)
elif "versions" in querystring:
delimiter = querystring.get("delimiter", [None])[0]
encoding_type = querystring.get("encoding-type", [None])[0]
key_marker = querystring.get("key-marker", [None])[0]
max_keys = querystring.get("max-keys", [None])[0]
prefix = querystring.get("prefix", [""])[0]
version_id_marker = querystring.get("version-id-marker", [None])[0]
bucket = self.backend.get_bucket(bucket_name)
(
versions,
common_prefixes,
delete_markers,
) = self.backend.list_object_versions(
bucket_name,
delimiter=delimiter,
encoding_type=encoding_type,
key_marker=key_marker,
max_keys=max_keys,
version_id_marker=version_id_marker,
prefix=prefix,
)
key_list = versions
template = self.response_template(S3_BUCKET_GET_VERSIONS)
return (
200,
{},
template.render(
common_prefixes=common_prefixes,
key_list=key_list,
delete_marker_list=delete_markers,
bucket=bucket,
prefix=prefix,
max_keys=1000,
delimiter=delimiter,
key_marker=key_marker,
is_truncated="false",
),
)
elif "encryption" in querystring:
encryption = self.backend.get_bucket_encryption(bucket_name)
if not encryption:
template = self.response_template(S3_NO_ENCRYPTION)
return 404, {}, template.render(bucket_name=bucket_name)
template = self.response_template(S3_ENCRYPTION_CONFIG)
return 200, {}, template.render(encryption=encryption)
elif querystring.get("list-type", [None])[0] == "2":
return 200, {}, self._handle_list_objects_v2(bucket_name, querystring)
bucket = self.backend.get_bucket(bucket_name)
prefix = querystring.get("prefix", [None])[0]
if prefix and isinstance(prefix, bytes):
prefix = prefix.decode("utf-8")
delimiter = querystring.get("delimiter", [None])[0]
max_keys = int(querystring.get("max-keys", [1000])[0])
marker = querystring.get("marker", [None])[0]
result_keys, result_folders = self.backend.list_objects(
bucket, prefix, delimiter
)
if marker:
result_keys = self._get_results_from_token(result_keys, marker)
result_keys, is_truncated, next_marker = self._truncate_result(
result_keys, max_keys
)
template = self.response_template(S3_BUCKET_GET_RESPONSE)
return (
200,
{},
template.render(
bucket=bucket,
prefix=prefix,
delimiter=delimiter,
result_keys=result_keys,
result_folders=result_folders,
is_truncated=is_truncated,
next_marker=next_marker,
max_keys=max_keys,
),
)
def _set_action(self, action_resource_type, method, querystring):
action_set = False
for action_in_querystring, action in ACTION_MAP[action_resource_type][
method
].items():
if action_in_querystring in querystring:
self.data["Action"] = action
action_set = True
if not action_set:
self.data["Action"] = ACTION_MAP[action_resource_type][method]["DEFAULT"]
def _handle_list_objects_v2(self, bucket_name, querystring):
template = self.response_template(S3_BUCKET_GET_RESPONSE_V2)
bucket = self.backend.get_bucket(bucket_name)
continuation_token = querystring.get("continuation-token", [None])[0]
if continuation_token is not None and continuation_token == "":
raise InvalidContinuationToken()
prefix = querystring.get("prefix", [None])[0]
if prefix and isinstance(prefix, bytes):
prefix = prefix.decode("utf-8")
delimiter = querystring.get("delimiter", [None])[0]
all_keys = self.backend.list_objects_v2(bucket, prefix, delimiter)
fetch_owner = querystring.get("fetch-owner", [False])[0]
max_keys = int(querystring.get("max-keys", [1000])[0])
start_after = querystring.get("start-after", [None])[0]
if continuation_token or start_after:
limit = continuation_token or start_after
all_keys = self._get_results_from_token(all_keys, limit)
truncated_keys, is_truncated, next_continuation_token = self._truncate_result(
all_keys, max_keys
)
result_keys, result_folders = self._split_truncated_keys(truncated_keys)
key_count = len(result_keys) + len(result_folders)
return template.render(
bucket=bucket,
prefix=prefix or "",
delimiter=delimiter,
key_count=key_count,
result_keys=result_keys,
result_folders=result_folders,
fetch_owner=fetch_owner,
max_keys=max_keys,
is_truncated=is_truncated,
next_continuation_token=next_continuation_token,
start_after=None if continuation_token else start_after,
)
@staticmethod
def _split_truncated_keys(truncated_keys):
result_keys = []
result_folders = []
for key in truncated_keys:
if isinstance(key, FakeKey):
result_keys.append(key)
else:
result_folders.append(key)
return result_keys, result_folders
def _get_results_from_token(self, result_keys, token):
continuation_index = 0
for key in result_keys:
if (key.name if isinstance(key, FakeKey) else key) > token:
break
continuation_index += 1
return result_keys[continuation_index:]
def _truncate_result(self, result_keys, max_keys):
if max_keys == 0:
result_keys = []
is_truncated = True
next_continuation_token = None
elif len(result_keys) > max_keys:
is_truncated = "true"
result_keys = result_keys[:max_keys]
item = result_keys[-1]
next_continuation_token = item.name if isinstance(item, FakeKey) else item
else:
is_truncated = "false"
next_continuation_token = None
return result_keys, is_truncated, next_continuation_token
def _body_contains_location_constraint(self, body):
if body:
try:
xmltodict.parse(body)["CreateBucketConfiguration"]["LocationConstraint"]
return True
except KeyError:
pass
return False
def _create_bucket_configuration_is_empty(self, body):
if body:
try:
create_bucket_configuration = xmltodict.parse(body)[
"CreateBucketConfiguration"
]
del create_bucket_configuration["@xmlns"]
if len(create_bucket_configuration) == 0:
return True
except KeyError:
pass
return False
def _parse_pab_config(self, body):
parsed_xml = xmltodict.parse(body)
parsed_xml["PublicAccessBlockConfiguration"].pop("@xmlns", None)
# If Python 2, fix the unicode strings:
if sys.version_info[0] < 3:
parsed_xml = {
"PublicAccessBlockConfiguration": py2_strip_unicode_keys(
dict(parsed_xml["PublicAccessBlockConfiguration"])
)
}
return parsed_xml
def _bucket_response_put(
self, request, body, region_name, bucket_name, querystring
):
if not request.headers.get("Content-Length"):
return 411, {}, "Content-Length required"
self._set_action("BUCKET", "PUT", querystring)
self._authenticate_and_authorize_s3_action()
if "object-lock" in querystring:
body_decoded = body.decode()
config = self._lock_config_from_xml(body_decoded)
if not self.backend.get_bucket(bucket_name).object_lock_enabled:
raise BucketMustHaveLockeEnabled
self.backend.put_object_lock_configuration(
bucket_name,
config.get("enabled"),
config.get("mode"),
config.get("days"),
config.get("years"),
)
return 200, {}, ""
if "versioning" in querystring:
ver = re.search("<Status>([A-Za-z]+)</Status>", body.decode())
if ver:
self.backend.set_bucket_versioning(bucket_name, ver.group(1))
template = self.response_template(S3_BUCKET_VERSIONING)
return template.render(bucket_versioning_status=ver.group(1))
else:
return 404, {}, ""
elif "lifecycle" in querystring:
rules = xmltodict.parse(body)["LifecycleConfiguration"]["Rule"]
if not isinstance(rules, list):
# If there is only one rule, xmldict returns just the item
rules = [rules]
self.backend.put_bucket_lifecycle(bucket_name, rules)
return ""
elif "policy" in querystring:
self.backend.put_bucket_policy(bucket_name, body)
return "True"
elif "acl" in querystring:
# Headers are first. If not set, then look at the body (consistent with the documentation):
acls = self._acl_from_headers(request.headers)
if not acls:
acls = self._acl_from_xml(body)
self.backend.put_bucket_acl(bucket_name, acls)
return ""
elif "tagging" in querystring:
tagging = self._bucket_tagging_from_xml(body)
self.backend.put_bucket_tagging(bucket_name, tagging)
return ""
elif "website" in querystring:
self.backend.set_bucket_website_configuration(bucket_name, body)
return ""
elif "cors" in querystring:
try:
self.backend.put_bucket_cors(bucket_name, self._cors_from_xml(body))
return ""
except KeyError:
raise MalformedXML()
elif "logging" in querystring:
try:
self.backend.put_bucket_logging(
bucket_name, self._logging_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
elif "notification" in querystring:
try:
self.backend.put_bucket_notification_configuration(
bucket_name, self._notification_config_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
elif "accelerate" in querystring:
try:
accelerate_status = self._accelerate_config_from_xml(body)
self.backend.put_bucket_accelerate_configuration(
bucket_name, accelerate_status
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
elif "publicAccessBlock" in querystring:
pab_config = self._parse_pab_config(body)
self.backend.put_bucket_public_access_block(
bucket_name, pab_config["PublicAccessBlockConfiguration"]
)
return ""
elif "encryption" in querystring:
try:
self.backend.put_bucket_encryption(
bucket_name, self._encryption_config_from_xml(body)
)
return ""
except KeyError:
raise MalformedXML()
except Exception as e:
raise e
else:
# us-east-1, the default AWS region behaves a bit differently
# - you should not use it as a location constraint --> it fails
# - querying the location constraint returns None
# - LocationConstraint has to be specified if outside us-east-1
if (
region_name != DEFAULT_REGION_NAME
and not self._body_contains_location_constraint(body)
):
raise IllegalLocationConstraintException()
if body:
if self._create_bucket_configuration_is_empty(body):
raise MalformedXML()
try:
forced_region = xmltodict.parse(body)["CreateBucketConfiguration"][
"LocationConstraint"
]
if forced_region == DEFAULT_REGION_NAME:
raise S3ClientError(
"InvalidLocationConstraint",
"The specified location-constraint is not valid",
)
else:
region_name = forced_region
except KeyError:
pass
try:
new_bucket = self.backend.create_bucket(bucket_name, region_name)
except BucketAlreadyExists:
new_bucket = self.backend.get_bucket(bucket_name)
if (
new_bucket.region_name == DEFAULT_REGION_NAME
and region_name == DEFAULT_REGION_NAME
):
# us-east-1 has different behavior - creating a bucket there is an idempotent operation
pass
else:
template = self.response_template(S3_DUPLICATE_BUCKET_ERROR)
return 409, {}, template.render(bucket_name=bucket_name)
if "x-amz-acl" in request.headers:
# TODO: Support the XML-based ACL format
self.backend.put_bucket_acl(
bucket_name, self._acl_from_headers(request.headers)
)
if (
request.headers.get("x-amz-bucket-object-lock-enabled", "").lower()
== "true"
):
new_bucket.object_lock_enabled = True
new_bucket.versioning_status = "Enabled"
template = self.response_template(S3_BUCKET_CREATE_RESPONSE)
return 200, {}, template.render(bucket=new_bucket)
def _bucket_response_delete(self, body, bucket_name, querystring):
self._set_action("BUCKET", "DELETE", querystring)
self._authenticate_and_authorize_s3_action()
if "policy" in querystring:
self.backend.delete_bucket_policy(bucket_name, body)
return 204, {}, ""
elif "tagging" in querystring:
self.backend.delete_bucket_tagging(bucket_name)
return 204, {}, ""
elif "website" in querystring:
self.backend.delete_bucket_website(bucket_name)
return 204, {}, ""
elif "cors" in querystring:
self.backend.delete_bucket_cors(bucket_name)
return 204, {}, ""
elif "lifecycle" in querystring:
self.backend.delete_bucket_lifecycle(bucket_name)
return 204, {}, ""
elif "publicAccessBlock" in querystring:
self.backend.delete_public_access_block(bucket_name)
return 204, {}, ""
elif "encryption" in querystring:
self.backend.delete_bucket_encryption(bucket_name)
return 204, {}, ""
removed_bucket = self.backend.delete_bucket(bucket_name)
if removed_bucket:
# Bucket exists
template = self.response_template(S3_DELETE_BUCKET_SUCCESS)
return 204, {}, template.render(bucket=removed_bucket)
else:
# Tried to delete a bucket that still has keys
template = self.response_template(S3_DELETE_BUCKET_WITH_ITEMS_ERROR)
return 409, {}, template.render(bucket=removed_bucket)
def _bucket_response_post(self, request, body, bucket_name):
response_headers = {}
if not request.headers.get("Content-Length"):
return 411, {}, "Content-Length required"
path = self._get_path(request)
if self.is_delete_keys(request, path, bucket_name):
self.data["Action"] = "DeleteObject"
self._authenticate_and_authorize_s3_action()
return self._bucket_response_delete_keys(request, body, bucket_name)
self.data["Action"] = "PutObject"
self._authenticate_and_authorize_s3_action()
# POST to bucket-url should create file from form
if hasattr(request, "form"):
# Not HTTPretty
form = request.form
else:
# HTTPretty, build new form object
body = body.decode()
form = dict(parse_qsl(body))
key = form["key"]
if "file" in form:
f = form["file"]
else:
fobj = request.files["file"]
f = fobj.stream.read()
key = key.replace("${filename}", os.path.basename(fobj.filename))
if "success_action_redirect" in form:
redirect = form["success_action_redirect"]
parts = urlparse(redirect)
queryargs = parse_qs(parts.query)
queryargs["key"] = key
queryargs["bucket"] = bucket_name
redirect_queryargs = urlencode(queryargs, doseq=True)
newparts = (
parts.scheme,
parts.netloc,
parts.path,
parts.params,
redirect_queryargs,
parts.fragment,
)
fixed_redirect = urlunparse(newparts)
response_headers["Location"] = fixed_redirect
if "success_action_status" in form:
status_code = form["success_action_status"]
elif "success_action_redirect" in form:
status_code = 303
else:
status_code = 204
new_key = self.backend.put_object(bucket_name, key, f)
if form.get("acl"):
acl = get_canned_acl(form.get("acl"))
new_key.set_acl(acl)
# Metadata
metadata = metadata_from_headers(form)
new_key.set_metadata(metadata)
return status_code, response_headers, ""
@staticmethod
def _get_path(request):
if isinstance(request, HTTPrettyRequest):
path = request.path
else:
path = (
request.full_path
if hasattr(request, "full_path")
else path_url(request.url)
)
return path
def _bucket_response_delete_keys(self, request, body, bucket_name):
template = self.response_template(S3_DELETE_KEYS_RESPONSE)
body_dict = xmltodict.parse(body, strip_whitespace=False)
objects = body_dict["Delete"].get("Object", [])
if not isinstance(objects, list):
# We expect a list of objects, but when there is a single <Object> node xmltodict does not
# return a list.
objects = [objects]
if len(objects) == 0:
raise MalformedXML()
deleted_objects = self.backend.delete_objects(bucket_name, objects)
error_names = []
return (
200,
{},
template.render(deleted=deleted_objects, delete_errors=error_names),
)
def _handle_range_header(self, request, headers, response_content):
response_headers = {}
length = len(response_content)
last = length - 1
_, rspec = request.headers.get("range").split("=")
if "," in rspec:
raise NotImplementedError("Multiple range specifiers not supported")
def toint(i):
return int(i) if i else None
begin, end = map(toint, rspec.split("-"))
if begin is not None: # byte range
end = last if end is None else min(end, last)
elif end is not None: # suffix byte range
begin = length - min(end, length)
end = last
else:
return 400, response_headers, ""
if begin < 0 or end > last or begin > min(end, last):
raise InvalidRange(
actual_size=str(length), range_requested=request.headers.get("range")
)
response_headers["content-range"] = "bytes {0}-{1}/{2}".format(
begin, end, length
)
content = response_content[begin : end + 1]
response_headers["content-length"] = len(content)
return 206, response_headers, content
def _handle_v4_chunk_signatures(self, body, content_length):
body_io = io.BytesIO(body)
new_body = bytearray(content_length)
pos = 0
line = body_io.readline()
while line:
# https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
# str(hex(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
chunk_size = int(line[: line.find(b";")].decode("utf8"), 16)
new_body[pos : pos + chunk_size] = body_io.read(chunk_size)
pos = pos + chunk_size
body_io.read(2) # skip trailing \r\n
line = body_io.readline()
return bytes(new_body)
@amzn_request_id
def key_or_control_response(self, request, full_url, headers):
# Key and Control are lumped in because splitting out the regex is too much of a pain :/
self.method = request.method
self.path = self._get_path(request)
self.headers = request.headers
if "host" not in self.headers:
self.headers["host"] = urlparse(full_url).netloc
response_headers = {}
try:
# Is this an S3 control response?
if isinstance(request, AWSPreparedRequest) and "s3-control" in request.url:
response = self._control_response(request, full_url, headers)
else:
response = self._key_response(request, full_url, self.headers)
except S3ClientError as s3error:
response = s3error.code, {}, s3error.description
if isinstance(response, str):
status_code = 200
response_content = response
else:
status_code, response_headers, response_content = response
if (
status_code == 200
and "range" in request.headers
and request.headers["range"] != ""
):
try:
return self._handle_range_header(
request, response_headers, response_content
)
except S3ClientError as s3error:
return s3error.code, {}, s3error.description
return status_code, response_headers, response_content
def _control_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
if hasattr(request, "body"):
# Boto
body = request.body
if hasattr(body, "read"):
body = body.read()
else:
# Flask server
body = request.data
if body is None:
body = b""
if method == "GET":
return self._control_response_get(request, query, headers)
elif method == "PUT":
return self._control_response_put(request, body, query, headers)
elif method == "DELETE":
return self._control_response_delete(request, query, headers)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
def _control_response_get(self, request, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "GET", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
public_block_config = self.backend.get_account_public_access_block(
headers["x-amz-account-id"]
)
template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION)
return (
200,
response_headers,
template.render(public_block_config=public_block_config),
)
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _control_response_put(self, request, body, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "PUT", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
pab_config = self._parse_pab_config(body)
self.backend.put_account_public_access_block(
headers["x-amz-account-id"],
pab_config["PublicAccessBlockConfiguration"],
)
return 200, response_headers, ""
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _control_response_delete(self, request, query, headers):
action = self.path.split("?")[0].split("/")[
-1
] # Gets the action out of the URL sans query params.
self._set_action("CONTROL", "DELETE", action)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if "publicAccessBlock" in action:
self.backend.delete_account_public_access_block(headers["x-amz-account-id"])
return 200, response_headers, ""
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(action)
)
def _key_response(self, request, full_url, headers):
parsed_url = urlparse(full_url)
query = parse_qs(parsed_url.query, keep_blank_values=True)
method = request.method
key_name = self.parse_key_name(request, parsed_url.path)
bucket_name = self.parse_bucket_name_from_url(request, full_url)
# Because we patch the requests library the boto/boto3 API
# requests go through this method but so do
# `requests.get("https://bucket-name.s3.amazonaws.com/file-name")`
# Here we deny public access to private files by checking the
# ACL and checking for the mere presence of an Authorization
# header.
if "Authorization" not in request.headers:
if hasattr(request, "url"):
signed_url = "Signature=" in request.url
elif hasattr(request, "requestline"):
signed_url = "Signature=" in request.path
key = self.backend.get_object(bucket_name, key_name)
if key:
if not key.acl.public_read and not signed_url:
return 403, {}, ""
elif signed_url:
# coming in from requests.get(s3.generate_presigned_url())
if self._invalid_headers(request.url, dict(request.headers)):
return 403, {}, S3_INVALID_PRESIGNED_PARAMETERS
if hasattr(request, "body"):
# Boto
body = request.body
if hasattr(body, "read"):
body = body.read()
else:
# Flask server
body = request.data
if not body:
# when the data is being passed as a file
if request.files:
for _, value in request.files.items():
body = value.stream.read()
elif hasattr(request, "form"):
# Body comes through as part of the form, if no content-type is set on the PUT-request
# form = ImmutableMultiDict([('some data 123 321', '')])
form = request.form
for k, _ in form.items():
body = k
if body is None:
body = b""
if (
request.headers.get("x-amz-content-sha256", None)
== "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
):
body = self._handle_v4_chunk_signatures(
body, int(request.headers["x-amz-decoded-content-length"])
)
if method == "GET":
return self._key_response_get(
bucket_name, query, key_name, headers=request.headers
)
elif method == "PUT":
return self._key_response_put(
request, body, bucket_name, query, key_name, headers
)
elif method == "HEAD":
return self._key_response_head(
bucket_name, query, key_name, headers=request.headers
)
elif method == "DELETE":
return self._key_response_delete(headers, bucket_name, query, key_name)
elif method == "POST":
return self._key_response_post(request, body, bucket_name, query, key_name)
else:
raise NotImplementedError(
"Method {0} has not been implemented in the S3 backend yet".format(
method
)
)
def _key_response_get(self, bucket_name, query, key_name, headers):
self._set_action("KEY", "GET", query)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if query.get("uploadId"):
upload_id = query["uploadId"][0]
# 0 <= PartNumberMarker <= 2,147,483,647
part_number_marker = int(query.get("part-number-marker", [0])[0])
if not (0 <= part_number_marker <= 2147483647):
raise InvalidMaxPartArgument("part-number-marker", 0, 2147483647)
# 0 <= MaxParts <= 2,147,483,647 (default is 1,000)
max_parts = int(query.get("max-parts", [1000])[0])
if not (0 <= max_parts <= 2147483647):
raise InvalidMaxPartArgument("max-parts", 0, 2147483647)
parts = self.backend.list_parts(
bucket_name,
upload_id,
part_number_marker=part_number_marker,
max_parts=max_parts,
)
next_part_number_marker = parts[-1].name + 1 if parts else 0
is_truncated = parts and self.backend.is_truncated(
bucket_name, upload_id, next_part_number_marker
)
template = self.response_template(S3_MULTIPART_LIST_RESPONSE)
return (
200,
response_headers,
template.render(
bucket_name=bucket_name,
key_name=key_name,
upload_id=upload_id,
is_truncated=str(is_truncated).lower(),
max_parts=max_parts,
next_part_number_marker=next_part_number_marker,
parts=parts,
part_number_marker=part_number_marker,
),
)
version_id = query.get("versionId", [None])[0]
if_modified_since = headers.get("If-Modified-Since", None)
if_match = headers.get("If-Match", None)
if_none_match = headers.get("If-None-Match", None)
if_unmodified_since = headers.get("If-Unmodified-Since", None)
key = self.backend.get_object(bucket_name, key_name, version_id=version_id)
if key is None and version_id is None:
raise MissingKey(key_name)
elif key is None:
raise MissingVersion()
if if_unmodified_since:
if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since)
if key.last_modified > if_unmodified_since:
raise PreconditionFailed("If-Unmodified-Since")
if if_match and key.etag not in [if_match, '"{0}"'.format(if_match)]:
raise PreconditionFailed("If-Match")
if if_modified_since:
if_modified_since = str_to_rfc_1123_datetime(if_modified_since)
if key.last_modified < if_modified_since:
return 304, response_headers, "Not Modified"
if if_none_match and key.etag == if_none_match:
return 304, response_headers, "Not Modified"
if "acl" in query:
acl = s3_backend.get_object_acl(key)
template = self.response_template(S3_OBJECT_ACL_RESPONSE)
return 200, response_headers, template.render(acl=acl)
if "tagging" in query:
tags = self.backend.get_object_tagging(key)["Tags"]
template = self.response_template(S3_OBJECT_TAGGING_RESPONSE)
return 200, response_headers, template.render(tags=tags)
if "legal-hold" in query:
legal_hold = self.backend.get_object_legal_hold(key)
template = self.response_template(S3_OBJECT_LEGAL_HOLD)
return 200, response_headers, template.render(legal_hold=legal_hold)
response_headers.update(key.metadata)
response_headers.update(key.response_dict)
return 200, response_headers, key.value
def _key_response_put(self, request, body, bucket_name, query, key_name, headers):
self._set_action("KEY", "PUT", query)
self._authenticate_and_authorize_s3_action()
response_headers = {}
if query.get("uploadId") and query.get("partNumber"):
upload_id = query["uploadId"][0]
part_number = int(query["partNumber"][0])
if "x-amz-copy-source" in request.headers:
src = unquote(request.headers.get("x-amz-copy-source")).lstrip("/")
src_bucket, src_key = src.split("/", 1)
src_key, src_version_id = (
src_key.split("?versionId=")
if "?versionId=" in src_key
else (src_key, None)
)
src_range = request.headers.get("x-amz-copy-source-range", "").split(
"bytes="
)[-1]
try:
start_byte, end_byte = src_range.split("-")
start_byte, end_byte = int(start_byte), int(end_byte)
except ValueError:
start_byte, end_byte = None, None
if self.backend.get_object(
src_bucket, src_key, version_id=src_version_id
):
key = self.backend.copy_part(
bucket_name,
upload_id,
part_number,
src_bucket,
src_key,
src_version_id,
start_byte,
end_byte,
)
else:
return 404, response_headers, ""
template = self.response_template(S3_MULTIPART_UPLOAD_RESPONSE)
response = template.render(part=key)
else:
key = self.backend.upload_part(
bucket_name, upload_id, part_number, body
)
response = ""
response_headers.update(key.response_dict)
return 200, response_headers, response
storage_class = request.headers.get("x-amz-storage-class", "STANDARD")
encryption = request.headers.get("x-amz-server-side-encryption", None)
kms_key_id = request.headers.get(
"x-amz-server-side-encryption-aws-kms-key-id", None
)
bucket_key_enabled = request.headers.get(
"x-amz-server-side-encryption-bucket-key-enabled", None
)
if bucket_key_enabled is not None:
bucket_key_enabled = str(bucket_key_enabled).lower()
bucket = self.backend.get_bucket(bucket_name)
lock_enabled = bucket.object_lock_enabled
lock_mode = request.headers.get("x-amz-object-lock-mode", None)
lock_until = request.headers.get("x-amz-object-lock-retain-until-date", None)
legal_hold = request.headers.get("x-amz-object-lock-legal-hold", "OFF")
if lock_mode or lock_until or legal_hold == "ON":
if not request.headers.get("Content-Md5"):
raise InvalidContentMD5
if not lock_enabled:
raise LockNotEnabled
elif lock_enabled and bucket.has_default_lock:
if not request.headers.get("Content-Md5"):
raise InvalidContentMD5
lock_until = bucket.default_retention()
lock_mode = bucket.default_lock_mode
acl = self._acl_from_headers(request.headers)
if acl is None:
acl = self.backend.get_bucket(bucket_name).acl
tagging = self._tagging_from_headers(request.headers)
if "retention" in query:
if not lock_enabled:
raise LockNotEnabled
version_id = query.get("VersionId")
retention = self._mode_until_from_xml(body)
self.backend.put_object_retention(
bucket_name, key_name, version_id=version_id, retention=retention
)
return 200, response_headers, ""
if "legal-hold" in query:
if not lock_enabled:
raise LockNotEnabled
version_id = query.get("VersionId")
legal_hold_status = self._legal_hold_status_from_xml(body)
self.backend.put_object_legal_hold(
bucket_name, key_name, version_id, legal_hold_status
)
return 200, response_headers, ""
if "acl" in query:
self.backend.put_object_acl(bucket_name, key_name, acl)
return 200, response_headers, ""
if "tagging" in query:
if "versionId" in query:
version_id = query["versionId"][0]
else:
version_id = None
key = self.backend.get_object(bucket_name, key_name, version_id=version_id)
tagging = self._tagging_from_xml(body)
self.backend.set_key_tags(key, tagging, key_name)
return 200, response_headers, ""
if "x-amz-copy-source" in request.headers:
# Copy key
# you can have a quoted ?version=abc with a version Id, so work on
# we need to parse the unquoted string first
src_key = request.headers.get("x-amz-copy-source")
if isinstance(src_key, bytes):
src_key = src_key.decode("utf-8")
src_key_parsed = urlparse(src_key)
src_bucket, src_key = (
clean_key_name(src_key_parsed.path).lstrip("/").split("/", 1)
)
src_version_id = parse_qs(src_key_parsed.query).get("versionId", [None])[0]
key = self.backend.get_object(
src_bucket, src_key, version_id=src_version_id
)
if key is not None:
if key.storage_class in ["GLACIER", "DEEP_ARCHIVE"]:
if key.response_dict.get(
"x-amz-restore"
) is None or 'ongoing-request="true"' in key.response_dict.get(
"x-amz-restore"
):
raise ObjectNotInActiveTierError(key)
self.backend.copy_object(
src_bucket,
src_key,
bucket_name,
key_name,
storage=storage_class,
acl=acl,
src_version_id=src_version_id,
)
else:
return 404, response_headers, ""
new_key = self.backend.get_object(bucket_name, key_name)
mdirective = request.headers.get("x-amz-metadata-directive")
if mdirective is not None and mdirective == "REPLACE":
metadata = metadata_from_headers(request.headers)
new_key.set_metadata(metadata, replace=True)
tdirective = request.headers.get("x-amz-tagging-directive")
if tdirective == "REPLACE":
tagging = self._tagging_from_headers(request.headers)
self.backend.set_key_tags(new_key, tagging)
template = self.response_template(S3_OBJECT_COPY_RESPONSE)
response_headers.update(new_key.response_dict)
return 200, response_headers, template.render(key=new_key)
streaming_request = hasattr(request, "streaming") and request.streaming
closing_connection = headers.get("connection") == "close"
if closing_connection and streaming_request:
# Closing the connection of a streaming request. No more data
new_key = self.backend.get_object(bucket_name, key_name)
elif streaming_request:
# Streaming request, more data
new_key = self.backend.append_to_key(bucket_name, key_name, body)
else:
# Initial data
new_key = self.backend.put_object(
bucket_name,
key_name,
body,
storage=storage_class,
encryption=encryption,
kms_key_id=kms_key_id,
bucket_key_enabled=bucket_key_enabled,
lock_mode=lock_mode,
lock_legal_status=legal_hold,
lock_until=lock_until,
)
request.streaming = True
metadata = metadata_from_headers(request.headers)
metadata.update(metadata_from_headers(query))
new_key.set_metadata(metadata)
new_key.set_acl(acl)
new_key.website_redirect_location = request.headers.get(
"x-amz-website-redirect-location"
)
self.backend.set_key_tags(new_key, tagging)
response_headers.update(new_key.response_dict)
return 200, response_headers, ""
def _key_response_head(self, bucket_name, query, key_name, headers):
self._set_action("KEY", "HEAD", query)
self._authenticate_and_authorize_s3_action()
response_headers = {}
version_id = query.get("versionId", [None])[0]
part_number = query.get("partNumber", [None])[0]
if part_number:
part_number = int(part_number)
if_modified_since = headers.get("If-Modified-Since", None)
if_match = headers.get("If-Match", None)
if_none_match = headers.get("If-None-Match", None)
if_unmodified_since = headers.get("If-Unmodified-Since", None)
key = self.backend.head_object(
bucket_name, key_name, version_id=version_id, part_number=part_number
)
if key:
response_headers.update(key.metadata)
response_headers.update(key.response_dict)
if if_unmodified_since:
if_unmodified_since = str_to_rfc_1123_datetime(if_unmodified_since)
if key.last_modified > if_unmodified_since:
return 412, response_headers, ""
if if_match and key.etag != if_match:
return 412, response_headers, ""
if if_modified_since:
if_modified_since = str_to_rfc_1123_datetime(if_modified_since)
if key.last_modified < if_modified_since:
return 304, response_headers, "Not Modified"
if if_none_match and key.etag == if_none_match:
return 304, response_headers, "Not Modified"
return 200, response_headers, ""
else:
return 404, response_headers, ""
def _lock_config_from_xml(self, xml):
response_dict = {"enabled": False, "mode": None, "days": None, "years": None}
parsed_xml = xmltodict.parse(xml)
enabled = (
parsed_xml["ObjectLockConfiguration"]["ObjectLockEnabled"] == "Enabled"
)
response_dict["enabled"] = enabled
default_retention = parsed_xml.get("ObjectLockConfiguration").get("Rule")
if default_retention:
default_retention = default_retention.get("DefaultRetention")
mode = default_retention["Mode"]
days = int(default_retention.get("Days", 0))
years = int(default_retention.get("Years", 0))
if days and years:
raise MalformedXML
response_dict["mode"] = mode
response_dict["days"] = days
response_dict["years"] = years
return response_dict
def _acl_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not parsed_xml.get("AccessControlPolicy"):
raise MalformedACLError()
# The owner is needed for some reason...
if not parsed_xml["AccessControlPolicy"].get("Owner"):
# TODO: Validate that the Owner is actually correct.
raise MalformedACLError()
# If empty, then no ACLs:
if parsed_xml["AccessControlPolicy"].get("AccessControlList") is None:
return []
if not parsed_xml["AccessControlPolicy"]["AccessControlList"].get("Grant"):
raise MalformedACLError()
permissions = ["READ", "WRITE", "READ_ACP", "WRITE_ACP", "FULL_CONTROL"]
if not isinstance(
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"], list
):
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"] = [
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"]
]
grants = self._get_grants_from_xml(
parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"],
MalformedACLError,
permissions,
)
return FakeAcl(grants)
def _get_grants_from_xml(self, grant_list, exception_type, permissions):
grants = []
for grant in grant_list:
if grant.get("Permission", "") not in permissions:
raise exception_type()
if grant["Grantee"].get("@xsi:type", "") not in [
"CanonicalUser",
"AmazonCustomerByEmail",
"Group",
]:
raise exception_type()
# TODO: Verify that the proper grantee data is supplied based on the type.
grants.append(
FakeGrant(
[
FakeGrantee(
id=grant["Grantee"].get("ID", ""),
display_name=grant["Grantee"].get("DisplayName", ""),
uri=grant["Grantee"].get("URI", ""),
)
],
[grant["Permission"]],
)
)
return grants
def _acl_from_headers(self, headers):
canned_acl = headers.get("x-amz-acl", "")
if canned_acl:
return get_canned_acl(canned_acl)
grants = []
for header, value in headers.items():
header = header.lower()
if not header.startswith("x-amz-grant-"):
continue
permission = {
"read": "READ",
"write": "WRITE",
"read-acp": "READ_ACP",
"write-acp": "WRITE_ACP",
"full-control": "FULL_CONTROL",
}[header[len("x-amz-grant-") :]]
grantees = []
for key_and_value in value.split(","):
key, value = re.match(
'([^=]+)="?([^"]+)"?', key_and_value.strip()
).groups()
if key.lower() == "id":
grantees.append(FakeGrantee(id=value))
else:
grantees.append(FakeGrantee(uri=value))
grants.append(FakeGrant(grantees, [permission]))
if grants:
return FakeAcl(grants)
else:
return None
def _tagging_from_headers(self, headers):
tags = {}
if headers.get("x-amz-tagging"):
parsed_header = parse_qs(headers["x-amz-tagging"], keep_blank_values=True)
for tag in parsed_header.items():
tags[tag[0]] = tag[1][0]
return tags
def _tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml, force_list={"Tag": True})
tags = {}
for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]:
tags[tag["Key"]] = tag["Value"]
return tags
def _bucket_tagging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
tags = {}
# Optional if no tags are being sent:
if parsed_xml["Tagging"].get("TagSet"):
# If there is only 1 tag, then it's not a list:
if not isinstance(parsed_xml["Tagging"]["TagSet"]["Tag"], list):
tags[parsed_xml["Tagging"]["TagSet"]["Tag"]["Key"]] = parsed_xml[
"Tagging"
]["TagSet"]["Tag"]["Value"]
else:
for tag in parsed_xml["Tagging"]["TagSet"]["Tag"]:
if tag["Key"] in tags:
raise DuplicateTagKeys()
tags[tag["Key"]] = tag["Value"]
# Verify that "aws:" is not in the tags. If so, then this is a problem:
for key, _ in tags.items():
if key.startswith("aws:"):
raise NoSystemTags()
return tags
def _cors_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if isinstance(parsed_xml["CORSConfiguration"]["CORSRule"], list):
return [cors for cors in parsed_xml["CORSConfiguration"]["CORSRule"]]
return [parsed_xml["CORSConfiguration"]["CORSRule"]]
def _mode_until_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
return (
parsed_xml["Retention"]["Mode"],
parsed_xml["Retention"]["RetainUntilDate"],
)
def _legal_hold_status_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
return parsed_xml["LegalHold"]["Status"]
def _encryption_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if (
not parsed_xml["ServerSideEncryptionConfiguration"].get("Rule")
or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"].get(
"ApplyServerSideEncryptionByDefault"
)
or not parsed_xml["ServerSideEncryptionConfiguration"]["Rule"][
"ApplyServerSideEncryptionByDefault"
].get("SSEAlgorithm")
):
raise MalformedXML()
return [parsed_xml["ServerSideEncryptionConfiguration"]]
def _logging_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not parsed_xml["BucketLoggingStatus"].get("LoggingEnabled"):
return {}
if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetBucket"):
raise MalformedXML()
if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetPrefix"):
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetPrefix"] = ""
# Get the ACLs:
if parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetGrants"):
permissions = ["READ", "WRITE", "FULL_CONTROL"]
if not isinstance(
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"][
"Grant"
],
list,
):
target_grants = self._get_grants_from_xml(
[
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"][
"TargetGrants"
]["Grant"]
],
MalformedXML,
permissions,
)
else:
target_grants = self._get_grants_from_xml(
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"][
"Grant"
],
MalformedXML,
permissions,
)
parsed_xml["BucketLoggingStatus"]["LoggingEnabled"][
"TargetGrants"
] = target_grants
return parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]
def _notification_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
if not len(parsed_xml["NotificationConfiguration"]):
return {}
# The types of notifications, and their required fields (apparently lambda is categorized by the API as
# "CloudFunction"):
notification_fields = [
("Topic", "sns"),
("Queue", "sqs"),
("CloudFunction", "lambda"),
]
event_names = [
"s3:ReducedRedundancyLostObject",
"s3:ObjectCreated:*",
"s3:ObjectCreated:Put",
"s3:ObjectCreated:Post",
"s3:ObjectCreated:Copy",
"s3:ObjectCreated:CompleteMultipartUpload",
"s3:ObjectRemoved:*",
"s3:ObjectRemoved:Delete",
"s3:ObjectRemoved:DeleteMarkerCreated",
]
found_notifications = (
0 # Tripwire -- if this is not ever set, then there were no notifications
)
for name, arn_string in notification_fields:
# 1st verify that the proper notification configuration has been passed in (with an ARN that is close
# to being correct -- nothing too complex in the ARN logic):
the_notification = parsed_xml["NotificationConfiguration"].get(
"{}Configuration".format(name)
)
if the_notification:
found_notifications += 1
if not isinstance(the_notification, list):
the_notification = parsed_xml["NotificationConfiguration"][
"{}Configuration".format(name)
] = [the_notification]
for n in the_notification:
if not n[name].startswith("arn:aws:{}:".format(arn_string)):
raise InvalidNotificationARN()
# 2nd, verify that the Events list is correct:
assert n["Event"]
if not isinstance(n["Event"], list):
n["Event"] = [n["Event"]]
for event in n["Event"]:
if event not in event_names:
raise InvalidNotificationEvent()
# Parse out the filters:
if n.get("Filter"):
# Error if S3Key is blank:
if not n["Filter"]["S3Key"]:
raise KeyError()
if not isinstance(n["Filter"]["S3Key"]["FilterRule"], list):
n["Filter"]["S3Key"]["FilterRule"] = [
n["Filter"]["S3Key"]["FilterRule"]
]
for filter_rule in n["Filter"]["S3Key"]["FilterRule"]:
assert filter_rule["Name"] in ["suffix", "prefix"]
assert filter_rule["Value"]
if not found_notifications:
return {}
return parsed_xml["NotificationConfiguration"]
def _accelerate_config_from_xml(self, xml):
parsed_xml = xmltodict.parse(xml)
config = parsed_xml["AccelerateConfiguration"]
return config["Status"]
def _key_response_delete(self, headers, bucket_name, query, key_name):
self._set_action("KEY", "DELETE", query)
self._authenticate_and_authorize_s3_action()
if query.get("uploadId"):
upload_id = query["uploadId"][0]
self.backend.abort_multipart_upload(bucket_name, upload_id)
return 204, {}, ""
version_id = query.get("versionId", [None])[0]
if "tagging" in query:
self.backend.delete_object_tagging(
bucket_name, key_name, version_id=version_id
)
template = self.response_template(S3_DELETE_KEY_TAGGING_RESPONSE)
return 204, {}, template.render(version_id=version_id)
bypass = headers.get("X-Amz-Bypass-Governance-Retention")
success, response_meta = self.backend.delete_object(
bucket_name, key_name, version_id=version_id, bypass=bypass
)
response_headers = {}
if response_meta is not None:
for k in response_meta:
response_headers["x-amz-{}".format(k)] = response_meta[k]
return 204, response_headers, ""
def _complete_multipart_body(self, body):
ps = minidom.parseString(body).getElementsByTagName("Part")
prev = 0
for p in ps:
pn = int(p.getElementsByTagName("PartNumber")[0].firstChild.wholeText)
if pn <= prev:
raise InvalidPartOrder()
yield (pn, p.getElementsByTagName("ETag")[0].firstChild.wholeText)
def _key_response_post(self, request, body, bucket_name, query, key_name):
self._set_action("KEY", "POST", query)
self._authenticate_and_authorize_s3_action()
if body == b"" and "uploads" in query:
metadata = metadata_from_headers(request.headers)
storage_type = request.headers.get("x-amz-storage-class", "STANDARD")
multipart_id = self.backend.create_multipart_upload(
bucket_name, key_name, metadata, storage_type
)
template = self.response_template(S3_MULTIPART_INITIATE_RESPONSE)
response = template.render(
bucket_name=bucket_name, key_name=key_name, upload_id=multipart_id
)
return 200, {}, response
if query.get("uploadId"):
body = self._complete_multipart_body(body)
multipart_id = query["uploadId"][0]
multipart, value, etag = self.backend.complete_multipart_upload(
bucket_name, multipart_id, body
)
if value is None:
return 400, {}, ""
key = self.backend.put_object(
bucket_name,
multipart.key_name,
value,
storage=multipart.storage,
etag=etag,
multipart=multipart,
)
key.set_metadata(multipart.metadata)
template = self.response_template(S3_MULTIPART_COMPLETE_RESPONSE)
headers = {}
if key.version_id:
headers["x-amz-version-id"] = key.version_id
return (
200,
headers,
template.render(
bucket_name=bucket_name, key_name=key.name, etag=key.etag
),
)
elif "restore" in query:
es = minidom.parseString(body).getElementsByTagName("Days")
days = es[0].childNodes[0].wholeText
key = self.backend.get_object(bucket_name, key_name)
r = 202
if key.expiry_date is not None:
r = 200
key.restore(int(days))
return r, {}, ""
else:
raise NotImplementedError(
"Method POST had only been implemented for multipart uploads and restore operations, so far"
)
def _invalid_headers(self, url, headers):
"""
Verify whether the provided metadata in the URL is also present in the headers
:param url: .../file.txt&content-type=app%2Fjson&Signature=..
:param headers: Content-Type=app/json
:return: True or False
"""
metadata_to_check = {
"content-disposition": "Content-Disposition",
"content-encoding": "Content-Encoding",
"content-language": "Content-Language",
"content-length": "Content-Length",
"content-md5": "Content-MD5",
"content-type": "Content-Type",
}
for url_key, header_key in metadata_to_check.items():
metadata_in_url = re.search(url_key + "=(.+?)(&.+$|$)", url)
if metadata_in_url:
url_value = unquote(metadata_in_url.group(1))
if header_key not in headers or (url_value != headers[header_key]):
return True
return False
S3ResponseInstance = ResponseObject(s3_backend)
S3_ALL_BUCKETS = """<ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Owner>
<ID>bcaf1ffd86f41161ca5fb16fd081034f</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<Buckets>
{% for bucket in buckets %}
<Bucket>
<Name>{{ bucket.name }}</Name>
<CreationDate>{{ bucket.creation_date_ISO8601 }}</CreationDate>
</Bucket>
{% endfor %}
</Buckets>
</ListAllMyBucketsResult>"""
S3_BUCKET_GET_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
<MaxKeys>{{ max_keys }}</MaxKeys>
{% if delimiter %}
<Delimiter>{{ delimiter }}</Delimiter>
{% endif %}
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% if next_marker %}
<NextMarker>{{ next_marker }}</NextMarker>
{% endif %}
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_GET_RESPONSE_V2 = """<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
<MaxKeys>{{ max_keys }}</MaxKeys>
<KeyCount>{{ key_count }}</KeyCount>
{% if delimiter %}
<Delimiter>{{ delimiter }}</Delimiter>
{% endif %}
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% if next_continuation_token %}
<NextContinuationToken>{{ next_continuation_token }}</NextContinuationToken>
{% endif %}
{% if start_after %}
<StartAfter>{{ start_after }}</StartAfter>
{% endif %}
{% for key in result_keys %}
<Contents>
<Key>{{ key.name }}</Key>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
{% if fetch_owner %}
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
{% endif %}
</Contents>
{% endfor %}
{% if delimiter %}
{% for folder in result_folders %}
<CommonPrefixes>
<Prefix>{{ folder }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
</ListBucketResult>"""
S3_BUCKET_CREATE_RESPONSE = """<CreateBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<CreateBucketResponse>
<Bucket>{{ bucket.name }}</Bucket>
</CreateBucketResponse>
</CreateBucketResponse>"""
S3_DELETE_BUCKET_SUCCESS = """<DeleteBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<DeleteBucketResponse>
<Code>204</Code>
<Description>No Content</Description>
</DeleteBucketResponse>
</DeleteBucketResponse>"""
S3_DELETE_BUCKET_WITH_ITEMS_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>BucketNotEmpty</Code>
<Message>The bucket you tried to delete is not empty</Message>
<BucketName>{{ bucket.name }}</BucketName>
<RequestId>asdfasdfsdafds</RequestId>
<HostId>sdfgdsfgdsfgdfsdsfgdfs</HostId>
</Error>"""
S3_BUCKET_LOCATION = """<?xml version="1.0" encoding="UTF-8"?>
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/">{% if location != None %}{{ location }}{% endif %}</LocationConstraint>"""
S3_BUCKET_LIFECYCLE_CONFIGURATION = """<?xml version="1.0" encoding="UTF-8"?>
<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for rule in rules %}
<Rule>
<ID>{{ rule.id }}</ID>
{% if rule.filter %}
<Filter>
{% if rule.filter.prefix != None %}
<Prefix>{{ rule.filter.prefix }}</Prefix>
{% endif %}
{% if rule.filter.tag_key %}
<Tag>
<Key>{{ rule.filter.tag_key }}</Key>
<Value>{{ rule.filter.tag_value }}</Value>
</Tag>
{% endif %}
{% if rule.filter.and_filter %}
<And>
{% if rule.filter.and_filter.prefix != None %}
<Prefix>{{ rule.filter.and_filter.prefix }}</Prefix>
{% endif %}
{% for key, value in rule.filter.and_filter.tags.items() %}
<Tag>
<Key>{{ key }}</Key>
<Value>{{ value }}</Value>
</Tag>
{% endfor %}
</And>
{% endif %}
</Filter>
{% else %}
{% if rule.prefix != None %}
<Prefix>{{ rule.prefix }}</Prefix>
{% endif %}
{% endif %}
<Status>{{ rule.status }}</Status>
{% if rule.storage_class %}
<Transition>
{% if rule.transition_days %}
<Days>{{ rule.transition_days }}</Days>
{% endif %}
{% if rule.transition_date %}
<Date>{{ rule.transition_date }}</Date>
{% endif %}
<StorageClass>{{ rule.storage_class }}</StorageClass>
</Transition>
{% endif %}
{% if rule.expiration_days or rule.expiration_date or rule.expired_object_delete_marker %}
<Expiration>
{% if rule.expiration_days %}
<Days>{{ rule.expiration_days }}</Days>
{% endif %}
{% if rule.expiration_date %}
<Date>{{ rule.expiration_date }}</Date>
{% endif %}
{% if rule.expired_object_delete_marker %}
<ExpiredObjectDeleteMarker>{{ rule.expired_object_delete_marker }}</ExpiredObjectDeleteMarker>
{% endif %}
</Expiration>
{% endif %}
{% if rule.nvt_noncurrent_days and rule.nvt_storage_class %}
<NoncurrentVersionTransition>
<NoncurrentDays>{{ rule.nvt_noncurrent_days }}</NoncurrentDays>
<StorageClass>{{ rule.nvt_storage_class }}</StorageClass>
</NoncurrentVersionTransition>
{% endif %}
{% if rule.nve_noncurrent_days %}
<NoncurrentVersionExpiration>
<NoncurrentDays>{{ rule.nve_noncurrent_days }}</NoncurrentDays>
</NoncurrentVersionExpiration>
{% endif %}
{% if rule.aimu_days %}
<AbortIncompleteMultipartUpload>
<DaysAfterInitiation>{{ rule.aimu_days }}</DaysAfterInitiation>
</AbortIncompleteMultipartUpload>
{% endif %}
</Rule>
{% endfor %}
</LifecycleConfiguration>
"""
S3_BUCKET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket_versioning_status }}</Status>
</VersioningConfiguration>
"""
S3_BUCKET_GET_VERSIONING = """<?xml version="1.0" encoding="UTF-8"?>
{% if status is none %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
{% else %}
<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ status }}</Status>
</VersioningConfiguration>
{% endif %}
"""
S3_BUCKET_GET_VERSIONS = """<?xml version="1.0" encoding="UTF-8"?>
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>{{ bucket.name }}</Name>
{% if prefix != None %}
<Prefix>{{ prefix }}</Prefix>
{% endif %}
{% if common_prefixes %}
{% for prefix in common_prefixes %}
<CommonPrefixes>
<Prefix>{{ prefix }}</Prefix>
</CommonPrefixes>
{% endfor %}
{% endif %}
<Delimiter>{{ delimiter }}</Delimiter>
<KeyMarker>{{ key_marker or "" }}</KeyMarker>
<MaxKeys>{{ max_keys }}</MaxKeys>
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% for key in key_list %}
<Version>
<Key>{{ key.name }}</Key>
<VersionId>{% if key.version_id is none %}null{% else %}{{ key.version_id }}{% endif %}</VersionId>
<IsLatest>{{ 'true' if key.is_latest else 'false' }}</IsLatest>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
<ETag>{{ key.etag }}</ETag>
<Size>{{ key.size }}</Size>
<StorageClass>{{ key.storage_class }}</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</Version>
{% endfor %}
{% for marker in delete_marker_list %}
<DeleteMarker>
<Key>{{ marker.name }}</Key>
<VersionId>{{ marker.version_id }}</VersionId>
<IsLatest>{{ 'true' if marker.is_latest else 'false' }}</IsLatest>
<LastModified>{{ marker.last_modified_ISO8601 }}</LastModified>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
</DeleteMarker>
{% endfor %}
</ListVersionsResult>
"""
S3_DELETE_KEYS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
{% for k, v in deleted %}
<Deleted>
<Key>{{k}}</Key>
{% if v %}<VersionId>{{v}}</VersionId>{% endif %}
</Deleted>
{% endfor %}
{% for k in delete_errors %}
<Error>
<Key>{{k}}</Key>
</Error>
{% endfor %}
</DeleteResult>"""
S3_DELETE_KEY_TAGGING_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<DeleteObjectTaggingResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<VersionId>{{version_id}}</VersionId>
</DeleteObjectTaggingResult>
"""
S3_OBJECT_ACL_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<AccessControlPolicy xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<AccessControlList>
{% for grant in acl.grants %}
<Grant>
{% for grantee in grant.grantees %}
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="{{ grantee.type }}">
{% if grantee.uri %}
<URI>{{ grantee.uri }}</URI>
{% endif %}
{% if grantee.id %}
<ID>{{ grantee.id }}</ID>
{% endif %}
{% if grantee.display_name %}
<DisplayName>{{ grantee.display_name }}</DisplayName>
{% endif %}
</Grantee>
{% endfor %}
{% for permission in grant.permissions %}
<Permission>{{ permission }}</Permission>
{% endfor %}
</Grant>
{% endfor %}
</AccessControlList>
</AccessControlPolicy>"""
S3_OBJECT_LEGAL_HOLD = """<?xml version="1.0" encoding="UTF-8"?>
<LegalHold>
<Status>{{ legal_hold }}</Status>
</LegalHold>
"""
S3_OBJECT_TAGGING_RESPONSE = """\
<?xml version="1.0" encoding="UTF-8"?>
<Tagging xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<TagSet>
{% for tag in tags %}
<Tag>
<Key>{{ tag.Key }}</Key>
<Value>{{ tag.Value }}</Value>
</Tag>
{% endfor %}
</TagSet>
</Tagging>"""
S3_BUCKET_CORS_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CORSConfiguration>
{% for cors in cors %}
<CORSRule>
{% for origin in cors.allowed_origins %}
<AllowedOrigin>{{ origin }}</AllowedOrigin>
{% endfor %}
{% for method in cors.allowed_methods %}
<AllowedMethod>{{ method }}</AllowedMethod>
{% endfor %}
{% if cors.allowed_headers is not none %}
{% for header in cors.allowed_headers %}
<AllowedHeader>{{ header }}</AllowedHeader>
{% endfor %}
{% endif %}
{% if cors.exposed_headers is not none %}
{% for header in cors.exposed_headers %}
<ExposedHeader>{{ header }}</ExposedHeader>
{% endfor %}
{% endif %}
{% if cors.max_age_seconds is not none %}
<MaxAgeSeconds>{{ cors.max_age_seconds }}</MaxAgeSeconds>
{% endif %}
</CORSRule>
{% endfor %}
</CORSConfiguration>
"""
S3_OBJECT_COPY_RESPONSE = """\
<CopyObjectResult xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<ETag>{{ key.etag }}</ETag>
<LastModified>{{ key.last_modified_ISO8601 }}</LastModified>
</CopyObjectResult>"""
S3_MULTIPART_INITIATE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<InitiateMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
</InitiateMultipartUploadResult>"""
S3_MULTIPART_UPLOAD_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CopyPartResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
</CopyPartResult>"""
S3_MULTIPART_LIST_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<ListPartsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<UploadId>{{ upload_id }}</UploadId>
<StorageClass>STANDARD</StorageClass>
<Initiator>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<PartNumberMarker>{{ part_number_marker }}</PartNumberMarker>
<NextPartNumberMarker>{{ next_part_number_marker }}</NextPartNumberMarker>
<MaxParts>{{ max_parts }}</MaxParts>
<IsTruncated>{{ is_truncated }}</IsTruncated>
{% for part in parts %}
<Part>
<PartNumber>{{ part.name }}</PartNumber>
<LastModified>{{ part.last_modified_ISO8601 }}</LastModified>
<ETag>{{ part.etag }}</ETag>
<Size>{{ part.size }}</Size>
</Part>
{% endfor %}
</ListPartsResult>"""
S3_MULTIPART_COMPLETE_RESPONSE = """<?xml version="1.0" encoding="UTF-8"?>
<CompleteMultipartUploadResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Location>http://{{ bucket_name }}.s3.amazonaws.com/{{ key_name }}</Location>
<Bucket>{{ bucket_name }}</Bucket>
<Key>{{ key_name }}</Key>
<ETag>{{ etag }}</ETag>
</CompleteMultipartUploadResult>
"""
S3_ALL_MULTIPARTS = (
"""<?xml version="1.0" encoding="UTF-8"?>
<ListMultipartUploadsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Bucket>{{ bucket_name }}</Bucket>
<KeyMarker></KeyMarker>
<UploadIdMarker></UploadIdMarker>
<MaxUploads>1000</MaxUploads>
<IsTruncated>false</IsTruncated>
{% for upload in uploads %}
<Upload>
<Key>{{ upload.key_name }}</Key>
<UploadId>{{ upload.id }}</UploadId>
<Initiator>
<ID>arn:aws:iam::"""
+ ACCOUNT_ID
+ """:user/user1-11111a31-17b5-4fb7-9df5-b111111f13de</ID>
<DisplayName>user1-11111a31-17b5-4fb7-9df5-b111111f13de</DisplayName>
</Initiator>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>webfile</DisplayName>
</Owner>
<StorageClass>STANDARD</StorageClass>
<Initiated>2010-11-10T20:48:33.000Z</Initiated>
</Upload>
{% endfor %}
</ListMultipartUploadsResult>
"""
)
S3_NO_POLICY = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchBucketPolicy</Code>
<Message>The bucket policy does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_LIFECYCLE = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchLifecycleConfiguration</Code>
<Message>The lifecycle configuration does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_BUCKET_TAGGING = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchTagSet</Code>
<Message>The TagSet does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_BUCKET_WEBSITE_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchWebsiteConfiguration</Code>
<Message>The specified bucket does not have a website configuration</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_INVALID_CORS_REQUEST = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchWebsiteConfiguration</Code>
<Message>The specified bucket does not have a website configuration</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_CORS_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchCORSConfiguration</Code>
<Message>The CORS configuration does not exist</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_LOGGING_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01">
<LoggingEnabled>
<TargetBucket>{{ logging["TargetBucket"] }}</TargetBucket>
<TargetPrefix>{{ logging["TargetPrefix"] }}</TargetPrefix>
{% if logging.get("TargetGrants") %}
<TargetGrants>
{% for grant in logging["TargetGrants"] %}
<Grant>
<Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:type="{{ grant.grantees[0].type }}">
{% if grant.grantees[0].uri %}
<URI>{{ grant.grantees[0].uri }}</URI>
{% endif %}
{% if grant.grantees[0].id %}
<ID>{{ grant.grantees[0].id }}</ID>
{% endif %}
{% if grant.grantees[0].display_name %}
<DisplayName>{{ grant.grantees[0].display_name }}</DisplayName>
{% endif %}
</Grantee>
<Permission>{{ grant.permissions[0] }}</Permission>
</Grant>
{% endfor %}
</TargetGrants>
{% endif %}
</LoggingEnabled>
</BucketLoggingStatus>
"""
S3_NO_LOGGING_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<BucketLoggingStatus xmlns="http://doc.s3.amazonaws.com/2006-03-01" />
"""
S3_ENCRYPTION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<ServerSideEncryptionConfiguration xmlns="http://doc.s3.amazonaws.com/2006-03-01">
{% for entry in encryption %}
<Rule>
<ApplyServerSideEncryptionByDefault>
<SSEAlgorithm>{{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["SSEAlgorithm"] }}</SSEAlgorithm>
{% if entry["Rule"]["ApplyServerSideEncryptionByDefault"].get("KMSMasterKeyID") %}
<KMSMasterKeyID>{{ entry["Rule"]["ApplyServerSideEncryptionByDefault"]["KMSMasterKeyID"] }}</KMSMasterKeyID>
{% endif %}
</ApplyServerSideEncryptionByDefault>
<BucketKeyEnabled>{{ 'true' if entry["Rule"].get("BucketKeyEnabled") == 'true' else 'false' }}</BucketKeyEnabled>
</Rule>
{% endfor %}
</ServerSideEncryptionConfiguration>
"""
S3_INVALID_PRESIGNED_PARAMETERS = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>SignatureDoesNotMatch</Code>
<Message>The request signature we calculated does not match the signature you provided. Check your key and signing method.</Message>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_NO_ENCRYPTION = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>ServerSideEncryptionConfigurationNotFoundError</Code>
<Message>The server side encryption configuration was not found</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>0D68A23BB2E2215B</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
S3_GET_BUCKET_NOTIFICATION_CONFIG = """<?xml version="1.0" encoding="UTF-8"?>
<NotificationConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{% for topic in config.topic %}
<TopicConfiguration>
<Id>{{ topic.id }}</Id>
<Topic>{{ topic.arn }}</Topic>
{% for event in topic.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if topic.filters %}
<Filter>
<S3Key>
{% for rule in topic.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</TopicConfiguration>
{% endfor %}
{% for queue in config.queue %}
<QueueConfiguration>
<Id>{{ queue.id }}</Id>
<Queue>{{ queue.arn }}</Queue>
{% for event in queue.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if queue.filters %}
<Filter>
<S3Key>
{% for rule in queue.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</QueueConfiguration>
{% endfor %}
{% for cf in config.cloud_function %}
<CloudFunctionConfiguration>
<Id>{{ cf.id }}</Id>
<CloudFunction>{{ cf.arn }}</CloudFunction>
{% for event in cf.events %}
<Event>{{ event }}</Event>
{% endfor %}
{% if cf.filters %}
<Filter>
<S3Key>
{% for rule in cf.filters["S3Key"]["FilterRule"] %}
<FilterRule>
<Name>{{ rule["Name"] }}</Name>
<Value>{{ rule["Value"] }}</Value>
</FilterRule>
{% endfor %}
</S3Key>
</Filter>
{% endif %}
</CloudFunctionConfiguration>
{% endfor %}
</NotificationConfiguration>
"""
S3_BUCKET_ACCELERATE = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Status>{{ bucket.accelerate_configuration }}</Status>
</AccelerateConfiguration>
"""
S3_BUCKET_ACCELERATE_NOT_SET = """
<AccelerateConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>
"""
S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION = """
<PublicAccessBlockConfiguration>
<BlockPublicAcls>{{public_block_config.block_public_acls}}</BlockPublicAcls>
<IgnorePublicAcls>{{public_block_config.ignore_public_acls}}</IgnorePublicAcls>
<BlockPublicPolicy>{{public_block_config.block_public_policy}}</BlockPublicPolicy>
<RestrictPublicBuckets>{{public_block_config.restrict_public_buckets}}</RestrictPublicBuckets>
</PublicAccessBlockConfiguration>
"""
S3_BUCKET_LOCK_CONFIGURATION = """
<ObjectLockConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
{%if lock_enabled %}
<ObjectLockEnabled>Enabled</ObjectLockEnabled>
{% else %}
<ObjectLockEnabled>Disabled</ObjectLockEnabled>
{% endif %}
{% if mode %}
<Rule>
<DefaultRetention>
<Mode>{{mode}}</Mode>
<Days>{{days}}</Days>
<Years>{{years}}</Years>
</DefaultRetention>
</Rule>
{% endif %}
</ObjectLockConfiguration>
"""
S3_DUPLICATE_BUCKET_ERROR = """<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>BucketAlreadyOwnedByYou</Code>
<Message>Your previous request to create the named bucket succeeded and you already own it.</Message>
<BucketName>{{ bucket_name }}</BucketName>
<RequestId>44425877V1D0A2F9</RequestId>
<HostId>9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg=</HostId>
</Error>
"""
| 38.059041 | 168 | 0.58727 | [
"Apache-2.0"
] | nom3ad/moto | moto/s3/responses.py | 103,140 | Python |
# -*- coding: utf-8 -*-
#
# Python Github documentation build configuration file, created by
# sphinx-quickstart on Tue Feb 3 23:23:15 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Python Github'
copyright = u'2015, Nicolas Mendoza'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PythonGithubdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PythonGithub.tex', u'Python Github Documentation',
u'Nicolas Mendoza', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pythongithub', u'Python Github Documentation',
[u'Nicolas Mendoza'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PythonGithub', u'Python Github Documentation',
u'Nicolas Mendoza', 'PythonGithub', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| 31.524164 | 79 | 0.71816 | [
"MIT"
] | nicchub/PythonGithub | docs/conf.py | 8,480 | Python |
from setuptools import setup
setup(
name='dst',
version='0.1.5',
author='Jeroen Janssens',
author_email='jeroen@jeroenjanssens.com',
packages=['dst'],
url='http://datasciencetoolbox.org',
license='BSD',
description='Data Science Toolbox -- Start doing data science in minutes.',
long_description=open('README.txt').read(),
install_requires=[
"ansible >= 1.5",
],
entry_points={
'console_scripts': ['dst = dst.dst:main']
},
classifiers=[ # https://pypi.python.org/pypi?:action=list_classifiers
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Topic :: Scientific/Engineering',
'Topic :: Utilities',
'Topic :: System :: Software Distribution',
'Topic :: System :: Systems Administration',
'Programming Language :: Python :: 2.7',
'Programming Language :: Unix Shell',
],
)
| 34.742857 | 79 | 0.60773 | [
"BSD-2-Clause"
] | CleverProgrammer/data-science-toolbox | manager/setup.py | 1,216 | Python |
#!coding:utf8
#author:yqq
#date:2020/4/30 0030 17:11
#description:
import os
import pymysql
SQL_PASSWD = os.environ.get('SQL_PWD')
def open(host : str,usr : str, passwd : str,db_name : str):
conn = pymysql.connect(host=host, user=usr,
password=passwd, db=db_name,
charset='utf8', cursorclass=pymysql.cursors.DictCursor)
return conn
def close(conn):
conn.close()
def execute(conn,cmd):
cur = conn.cursor()
cur.execute(cmd)
conn.commit() #fixed bug by yqq 2019-05-01
return cur.fetchall()
def run(cmd):
conn = open()
result = execute(conn,cmd)
close(conn)
return result
def get_column_values(conn,table_name,column_name):
cmd = "SELECT {0} FROM {1}".format(column_name,table_name)
return execute(conn,cmd)
def main():
host = '192.168.10.29'
usr = 'root'
passwd = 'eWFuZ3FpbmdxaW5n'
dbname = 'test_1'
conn = open(host=host, usr=usr, passwd=passwd, db_name=dbname )
print(get_column_values(conn,'t_test_student','name'))
close(conn)
if __name__ == "__main__":
main()
| 21.666667 | 71 | 0.643439 | [
"MIT"
] | songning4/QBlockChainNotes | Python3/Tornado/apps/pg/PG_Admin/lib/sql.py | 1,105 | Python |
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is borrowed from ros2/rosidl:
# https://github.com/ros2/rosidl/blob/master/rosidl_adapter/rosidl_adapter/resource/__init__.py
# Slight modifications were made, so proper paths to files are accepted.
from io import StringIO
import os
import sys
import em
def expand_template(template_name, data, output_file, encoding='utf-8'):
content = evaluate_template(template_name, data)
if output_file.exists():
existing_content = output_file.read_text(encoding=encoding)
if existing_content == content:
return
elif output_file.parent:
os.makedirs(str(output_file.parent), exist_ok=True)
output_file.write_text(content, encoding=encoding)
_interpreter = None
def evaluate_template(template_name, data):
global _interpreter
# create copy before manipulating
data = dict(data)
data['TEMPLATE'] = _evaluate_template
#template_path = os.path.join(os.path.dirname(__file__), template_name)
template_path = template_name
output = StringIO()
try:
_interpreter = em.Interpreter(
output=output,
options={
em.BUFFERED_OPT: True,
em.RAW_OPT: True,
})
with open(template_path, 'r') as h:
content = h.read()
_interpreter.invoke(
'beforeFile', name=template_name, file=h, locals=data)
_interpreter.string(content, template_path, locals=data)
_interpreter.invoke('afterFile')
return output.getvalue()
except Exception as e: # noqa: F841
print(
f"{e.__class__.__name__} processing template '{template_name}'",
file=sys.stderr)
raise
finally:
_interpreter.shutdown()
_interpreter = None
def _evaluate_template(template_name, **kwargs):
global _interpreter
#template_path = os.path.join(os.path.dirname(__file__), template_name)
template_path = template_name
with open(template_path, 'r') as h:
_interpreter.invoke(
'beforeInclude', name=template_path, file=h, locals=kwargs)
content = h.read()
try:
_interpreter.string(content, template_path, kwargs)
except Exception as e: # noqa: F841
print(
f"{e.__class__.__name__} processing template '{template_name}': "
f'{e}', file=sys.stderr)
sys.exit(1)
_interpreter.invoke('afterInclude') | 32.55914 | 95 | 0.679326 | [
"Apache-2.0"
] | Blutkoete/ecal | doc/extensions/empy_helpers/__init__.py | 3,028 | Python |
"""
This magical module will rewrite all public methods in the public interface
of the library so they can run the loop on their own if it's not already
running. This rewrite may not be desirable if the end user always uses the
methods they way they should be ran, but it's incredibly useful for quick
scripts and the runtime overhead is relatively low.
Some really common methods which are hardly used offer this ability by
default, such as ``.start()`` and ``.run_until_disconnected()`` (since
you may want to start, and then run until disconnected while using async
event handlers).
"""
import asyncio
import functools
import inspect
from . import connection
from .client.account import _TakeoutClient
from .client.telegramclient import TelegramClient
from .tl import types, functions, custom
from .tl.custom import (
Draft, Dialog, MessageButton, Forward, Button,
Message, InlineResult, Conversation
)
from .tl.custom.chatgetter import ChatGetter
from .tl.custom.sendergetter import SenderGetter
def _syncify_wrap(t, method_name):
method = getattr(t, method_name)
@functools.wraps(method)
def syncified(*args, **kwargs):
coro = method(*args, **kwargs)
loop = asyncio.get_event_loop()
if loop.is_running():
return coro
else:
return loop.run_until_complete(coro)
# Save an accessible reference to the original method
setattr(syncified, '__tl.sync', method)
setattr(t, method_name, syncified)
def syncify(*types):
"""
Converts all the methods in the given types (class definitions)
into synchronous, which return either the coroutine or the result
based on whether ``asyncio's`` event loop is running.
"""
# Our asynchronous generators all are `RequestIter`, which already
# provide a synchronous iterator variant, so we don't need to worry
# about asyncgenfunction's here.
for t in types:
for name in dir(t):
if not name.startswith('_') or name == '__call__':
if inspect.iscoroutinefunction(getattr(t, name)):
_syncify_wrap(t, name)
syncify(TelegramClient, _TakeoutClient, Draft, Dialog, MessageButton,
ChatGetter, SenderGetter, Forward, Message, InlineResult, Conversation)
__all__ = [
'TelegramClient', 'Button',
'types', 'functions', 'custom', 'errors',
'events', 'utils', 'connection'
]
| 33.873239 | 79 | 0.710603 | [
"MIT"
] | SlavikMIPT/Telethon | telethon/sync.py | 2,405 | Python |
"""
Plotting code for nilearn
"""
# Original Authors: Chris Filo Gorgolewski, Gael Varoquaux
import os
import sys
import importlib
###############################################################################
# Make sure that we don't get DISPLAY problems when running without X on
# unices
def _set_mpl_backend():
# We are doing local imports here to avoid polluting our namespace
try:
import matplotlib
except ImportError:
if importlib.util.find_spec("pytest") is not None:
from .._utils.testing import skip_if_running_tests
# No need to fail when running tests
skip_if_running_tests('matplotlib not installed')
raise
else:
from ..version import (_import_module_with_version_check,
OPTIONAL_MATPLOTLIB_MIN_VERSION)
# When matplotlib was successfully imported we need to check
# that the version is greater that the minimum required one
_import_module_with_version_check('matplotlib',
OPTIONAL_MATPLOTLIB_MIN_VERSION)
current_backend = matplotlib.get_backend().lower()
if 'inline' in current_backend or 'nbagg' in current_backend:
return
# Set the backend to a non-interactive one for unices without X
# (see gh-2560)
if (sys.platform not in ('darwin', 'win32') and
'DISPLAY' not in os.environ):
matplotlib.use('Agg')
_set_mpl_backend()
###############################################################################
from . import cm
from .img_plotting import (
plot_img, plot_anat, plot_epi, plot_roi, plot_stat_map,
plot_glass_brain, plot_connectome, plot_connectome_strength,
plot_markers, plot_prob_atlas, plot_carpet, plot_img_comparison, show)
from .find_cuts import find_xyz_cut_coords, find_cut_slices, \
find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords
from .matrix_plotting import (plot_matrix, plot_contrast_matrix,
plot_design_matrix, plot_event)
from .html_surface import view_surf, view_img_on_surf
from .html_stat_map import view_img
from .html_connectome import view_connectome, view_markers
from .surf_plotting import (plot_surf, plot_surf_stat_map, plot_surf_roi,
plot_img_on_surf, plot_surf_contours)
__all__ = ['cm', 'plot_img', 'plot_anat', 'plot_epi',
'plot_roi', 'plot_stat_map', 'plot_glass_brain',
'plot_markers', 'plot_connectome', 'plot_prob_atlas',
'find_xyz_cut_coords', 'find_cut_slices',
'plot_img_comparison',
'show', 'plot_matrix',
'plot_design_matrix', 'plot_contrast_matrix', 'plot_event',
'view_surf', 'view_img_on_surf',
'view_img', 'view_connectome', 'view_markers',
'find_parcellation_cut_coords',
'find_probabilistic_atlas_cut_coords',
'plot_surf', 'plot_surf_stat_map', 'plot_surf_roi',
'plot_img_on_surf', 'plot_connectome_strength', 'plot_carpet',
'plot_surf_contours']
| 42.712329 | 79 | 0.645927 | [
"BSD-2-Clause"
] | AKSoo/nilearn | nilearn/plotting/__init__.py | 3,118 | Python |
import setuptools
import os
own_dir = os.path.abspath(os.path.dirname(__file__))
def requirements():
with open(os.path.join(own_dir, 'requirements.oci.txt')) as f:
for line in f.readlines():
line = line.strip()
if not line or line.startswith('#'):
continue
yield line
def modules():
return [
]
def version():
with open(os.path.join(own_dir, 'VERSION')) as f:
return f.read().strip()
setuptools.setup(
name='gardener-oci',
version=version(),
description='gardener OCI lib',
python_requires='>=3.9.*',
py_modules=modules(),
packages=['oci'],
package_data={
'ci':['version'],
},
install_requires=list(requirements()),
entry_points={
},
)
| 19.04878 | 66 | 0.583867 | [
"BSD-3-Clause"
] | MrBatschner/cc-utils | setup.oci.py | 781 | Python |
import asyncio
import logging
import voluptuous as vol
from homeassistant.components.system_log import CONF_LOGGER
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant, Event
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.helpers.storage import Store
from .core import logger
from .core.gateway3 import Gateway3
from .core.helpers import DevicesRegistry
from .core.utils import DOMAIN, XiaomiGateway3Debug
from .core.xiaomi_cloud import MiCloud
_LOGGER = logging.getLogger(__name__)
DOMAINS = ['binary_sensor', 'climate', 'cover', 'light', 'remote', 'sensor',
'switch', 'alarm_control_panel']
CONF_DEVICES = 'devices'
CONF_ATTRIBUTES_TEMPLATE = 'attributes_template'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_DEVICES): {
cv.string: vol.Schema({
vol.Optional('occupancy_timeout'): cv.positive_int,
}, extra=vol.ALLOW_EXTRA),
},
CONF_LOGGER: logger.CONFIG_SCHEMA,
vol.Optional(CONF_ATTRIBUTES_TEMPLATE): cv.template
}, extra=vol.ALLOW_EXTRA),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass: HomeAssistant, hass_config: dict):
config = hass_config.get(DOMAIN) or {}
if CONF_LOGGER in config:
logger.init(__name__, config[CONF_LOGGER], hass.config.config_dir)
info = await hass.helpers.system_info.async_get_system_info()
_LOGGER.debug(f"SysInfo: {info}")
# update global debug_mode for all gateways
if 'debug_mode' in config[CONF_LOGGER]:
setattr(Gateway3, 'debug_mode', config[CONF_LOGGER]['debug_mode'])
if CONF_DEVICES in config:
for k, v in config[CONF_DEVICES].items():
# AA:BB:CC:DD:EE:FF => aabbccddeeff
k = k.replace(':', '').lower()
DevicesRegistry.defaults[k] = v
hass.data[DOMAIN] = {
CONF_ATTRIBUTES_TEMPLATE: config.get(CONF_ATTRIBUTES_TEMPLATE)
}
await _handle_device_remove(hass)
# utils.migrate_unique_id(hass)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Support two kind of enties - MiCloud and Gateway."""
# entry for MiCloud login
if 'servers' in entry.data:
return await _setup_micloud_entry(hass, entry)
# migrate data (also after first setup) to options
if entry.data:
hass.config_entries.async_update_entry(entry, data={},
options=entry.data)
await _setup_logger(hass)
# add options handler
if not entry.update_listeners:
entry.add_update_listener(async_update_options)
hass.data[DOMAIN][entry.entry_id] = Gateway3(**entry.options)
hass.async_create_task(_setup_domains(hass, entry))
return True
async def async_update_options(hass: HomeAssistant, entry: ConfigEntry):
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
# check unload cloud integration
if entry.entry_id not in hass.data[DOMAIN]:
return
# remove all stats entities if disable stats
if not entry.options.get('stats'):
suffix = ('_gateway', '_zigbee', '_ble')
registry: EntityRegistry = hass.data['entity_registry']
remove = [
entity.entity_id
for entity in list(registry.entities.values())
if (entity.config_entry_id == entry.entry_id and
entity.unique_id.endswith(suffix))
]
for entity_id in remove:
registry.async_remove(entity_id)
gw: Gateway3 = hass.data[DOMAIN][entry.entry_id]
await gw.stop()
await asyncio.gather(*[
hass.config_entries.async_forward_entry_unload(entry, domain)
for domain in DOMAINS
])
return True
async def _setup_domains(hass: HomeAssistant, entry: ConfigEntry):
# init setup for each supported domains
await asyncio.gather(*[
hass.config_entries.async_forward_entry_setup(entry, domain)
for domain in DOMAINS
])
gw: Gateway3 = hass.data[DOMAIN][entry.entry_id]
gw.start()
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, gw.stop)
)
async def _setup_micloud_entry(hass: HomeAssistant, config_entry):
data: dict = config_entry.data.copy()
session = async_create_clientsession(hass)
hass.data[DOMAIN]['cloud'] = cloud = MiCloud(session, data['servers'])
if 'service_token' in data:
# load devices with saved MiCloud auth
cloud.auth = data
devices = await cloud.get_devices()
else:
devices = None
if devices is None:
_LOGGER.debug(f"Login to MiCloud for {config_entry.title}")
if await cloud.login(data['username'], data['password']):
# update MiCloud auth in .storage
data.update(cloud.auth)
hass.config_entries.async_update_entry(config_entry, data=data)
devices = await cloud.get_devices()
if devices is None:
_LOGGER.error("Can't load devices from MiCloud")
else:
_LOGGER.error("Can't login to MiCloud")
# load devices from or save to .storage
store = Store(hass, 1, f"{DOMAIN}/{data['username']}.json")
if devices is None:
_LOGGER.debug("Loading a list of devices from the .storage")
devices = await store.async_load()
else:
_LOGGER.debug(f"Loaded from MiCloud {len(devices)} devices")
await store.async_save(devices)
if devices is None:
_LOGGER.debug("No devices in .storage")
return False
# TODO: Think about a bunch of devices
if 'devices' not in hass.data[DOMAIN]:
hass.data[DOMAIN]['devices'] = devices
else:
hass.data[DOMAIN]['devices'] += devices
for device in devices:
# key - mac for BLE, and did for others
did = device['did'] if device['pid'] not in '6' else \
device['mac'].replace(':', '').lower()
DevicesRegistry.defaults.setdefault(did, {})
# don't override name if exists
DevicesRegistry.defaults[did].setdefault('device_name', device['name'])
return True
async def _handle_device_remove(hass: HomeAssistant):
"""Remove device from Hass and Mi Home if the device is renamed to
`delete`.
"""
async def device_registry_updated(event: Event):
if event.data['action'] != 'update':
return
registry = hass.data['device_registry']
hass_device = registry.async_get(event.data['device_id'])
# check empty identifiers
if not hass_device or not hass_device.identifiers:
return
# handle only our devices
for hass_did in hass_device.identifiers:
if hass_did[0] == DOMAIN and hass_device.name_by_user == 'delete':
break
else:
return
# remove from Mi Home
for gw in hass.data[DOMAIN].values():
if not isinstance(gw, Gateway3):
continue
gw_device = gw.get_device(hass_did[1])
if not gw_device:
continue
if gw_device['type'] == 'zigbee':
gw.debug(f"Remove device: {gw_device['did']}")
await gw.miio.send('remove_device', [gw_device['did']])
break
# remove from Hass
registry.async_remove_device(hass_device.id)
hass.bus.async_listen('device_registry_updated', device_registry_updated)
async def _setup_logger(hass: HomeAssistant):
if not hasattr(_LOGGER, 'defaul_level'):
# default level from Hass config
_LOGGER.defaul_level = _LOGGER.level
entries = hass.config_entries.async_entries(DOMAIN)
web_logs = any(e.options.get('debug') for e in entries)
# only if global logging don't set
if _LOGGER.defaul_level == logging.NOTSET:
# disable log to console
_LOGGER.propagate = web_logs is False
# set debug if any of integrations has debug
_LOGGER.setLevel(logging.DEBUG if web_logs else logging.NOTSET)
# if don't set handler yet
if web_logs:
# skip if already added
if any(isinstance(h, XiaomiGateway3Debug) for h in _LOGGER.handlers):
return
handler = XiaomiGateway3Debug(hass)
_LOGGER.addHandler(handler)
if _LOGGER.defaul_level == logging.NOTSET:
info = await hass.helpers.system_info.async_get_system_info()
_LOGGER.debug(f"SysInfo: {info}")
| 32.758364 | 79 | 0.661825 | [
"Unlicense"
] | Gamma-Software/HomeAssistantConfig | custom_components/xiaomi_gateway3/__init__.py | 8,812 | Python |
import logging
from aiohttp.web import Application
from virtool.pg.base import Base
from virtool.startup import get_scheduler_from_app
logger = logging.getLogger(__name__)
async def shutdown_client(app: Application):
"""
Attempt to close the async HTTP client session.
:param app: The application object
"""
logger.info("Stopping HTTP client")
try:
await app["client"].close()
except KeyError:
pass
async def shutdown_dispatcher(app: Application):
"""
Attempt to close the app's `Dispatcher` object.
:param app: The application object
"""
logger.info("Stopping dispatcher")
try:
await app["dispatcher"].close()
except KeyError:
pass
async def shutdown_executors(app: Application):
"""
Attempt to close the `ThreadPoolExecutor` and `ProcessPoolExecutor`.
:param app: the application object
"""
try:
app["executor"].shutdown(wait=True)
except KeyError:
pass
try:
app["process_executor"].shutdown(wait=True)
except KeyError:
pass
async def shutdown_scheduler(app: Application):
"""
Attempt to the close the app's `aiojobs` scheduler.
:param app: The application object
"""
scheduler = get_scheduler_from_app(app)
await scheduler.close()
async def shutdown_redis(app: Application):
"""
Attempt to close the app's `redis` instance.
:param app: The application object
"""
logger.info("Closing Redis connection")
try:
app["redis"].close()
await app["redis"].wait_closed()
except KeyError:
pass
async def drop_fake_postgres(app: Application):
"""
Drop a fake PostgreSQL database if the instance was run with the ``--fake`` option.
:param app: the application object
"""
if app["config"].fake and "fake_" in app["config"].postgres_connection_string:
async with app["pg"].begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
logger.debug("Dropped fake PostgreSQL database.")
| 22.597826 | 87 | 0.658009 | [
"MIT"
] | KingBain/virtool | virtool/shutdown.py | 2,079 | Python |
# User class to hold name and __data
class User:
### Instance Variables ###
__userName = ""
__validUser = None
__data = []
__weights = []
__notes = []
__dates = []
__intWeights = []
__avgWeight = 0
__minWeight = 0
__maxWeight = 0
##########################
### Getters ###
def getUserName(self):
return self.__userName
def getData(self):
return self.__data
def getValidUser(self):
return self.__validUser
def getWeights(self):
return self.__weights
def getNotes(self):
return self.__notes
def getDates(self):
return self.__dates
def getAvgWeight(self):
return str(self.__avgWeight)
def getMinWeight(self):
return str(self.__minWeight)
def getMaxWeight(self):
return str(self.__maxWeight)
################
### Setters ###
def setUserName(self, name):
self.__userName = name
def setData(self, data):
self.__data = data
def setValidUser(self, valid):
self.__validUser = valid
def setWeights(self, weights):
self.__weights = weights
def setNotes(self, notes):
self.__notes = notes
def setDates(self, dates):
self.__dates = dates
################
def addData(self, data):
self.__data.append(data)
def addWeight(self, weight):
self.__weights.append(weight)
def addNote(self, note):
self.__notes.append(note)
def addDate(self, date):
self.__dates.append(date)
def calcAvg(self):
self.__avgWeight = int(sum(self.__intWeights)/len(self.__intWeights))
def calcMaxWeight(self):
self.__maxWeight = max(self.__intWeights)
def calacMinWeight(self):
self.__minWeight = min(self.__intWeights)
def averageWeightDelta(self, weightData):
pass
def convertWeightList(self, weightData):
for i in range(len(weightData)):
weightData[i] = int(weightData[i])
self.__intWeights = weightData
| 21.071429 | 77 | 0.595157 | [
"MIT"
] | imjacksonchen/weightTracker | User.py | 2,065 | Python |
from unittest.mock import patch
from django.test import TestCase
from django_logic.state import State
from django_logic.transition import Transition
from tests.models import Invoice
def disable_invoice(invoice: Invoice, *args, **kwargs):
invoice.is_available = False
invoice.save()
def update_invoice(invoice, is_available, customer_received, *args, **kwargs):
invoice.is_available = is_available
invoice.customer_received = customer_received
invoice.save()
def enable_invoice(invoice: Invoice, *args, **kwargs):
invoice.is_available = True
invoice.save()
def fail_invoice(invoice: Invoice, *args, **kwargs):
raise Exception
def receive_invoice(invoice: Invoice, *args, **kwargs):
invoice.customer_received = True
invoice.save()
def debug_action(*args, **kwargs):
pass
class TransitionSideEffectsTestCase(TestCase):
def setUp(self) -> None:
self.invoice = Invoice.objects.create(status='draft')
def test_one_side_effect(self):
transition = Transition('test', sources=[], target='cancelled', side_effects=[disable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, transition.target)
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_many_side_effects(self):
transition = Transition('test', sources=[], target='cancelled',
side_effects=[disable_invoice, enable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, transition.target)
self.assertTrue(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_failure_during_side_effect(self):
transition = Transition('test', sources=[], target='cancelled',
side_effects=[disable_invoice, fail_invoice, enable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, 'draft')
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_failure_during_side_effect_with_failed_state(self):
transition = Transition('test', sources=[], target='cancelled', failed_state='failed',
side_effects=[disable_invoice, fail_invoice, enable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, 'failed')
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_side_effect_with_parameters(self):
update_invoice(self.invoice, is_available=True, customer_received=True)
transition = Transition('test', sources=[], target='cancelled', failed_state='failed',
side_effects=[update_invoice])
self.invoice.refresh_from_db()
self.assertTrue(self.invoice.is_available)
self.assertTrue(self.invoice.customer_received)
state = State(self.invoice, 'status')
transition.change_state(state, is_available=False, customer_received=False)
self.invoice.refresh_from_db()
self.assertFalse(self.invoice.is_available)
self.assertFalse(self.invoice.customer_received)
self.assertFalse(state.is_locked())
class TransitionCallbacksTestCase(TestCase):
def setUp(self) -> None:
self.invoice = Invoice.objects.create(status='draft')
def test_one_callback(self):
transition = Transition('test', sources=[], target='cancelled', callbacks=[disable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, transition.target)
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_many_callbacks(self):
transition = Transition('test', sources=[], target='cancelled',
callbacks=[disable_invoice, enable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, transition.target)
self.assertTrue(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_failure_during_callbacks(self):
transition = Transition('test', sources=[], target='cancelled',
callbacks=[disable_invoice, fail_invoice, enable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, 'cancelled')
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_failure_during_callbacks_with_failed_state(self):
transition = Transition('test', sources=[], target='cancelled', failed_state='failed',
side_effects=[disable_invoice, fail_invoice, enable_invoice])
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, 'failed')
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_callbacks_with_parameters(self):
update_invoice(self.invoice, is_available=True, customer_received=True)
transition = Transition('test', sources=[], target='cancelled', failed_state='failed',
callbacks=[update_invoice])
self.invoice.refresh_from_db()
self.assertTrue(self.invoice.is_available)
self.assertTrue(self.invoice.customer_received)
state = State(self.invoice, 'status')
transition.change_state(state, is_available=False, customer_received=False)
self.invoice.refresh_from_db()
self.assertFalse(self.invoice.is_available)
self.assertFalse(self.invoice.customer_received)
self.assertFalse(state.is_locked())
class TransitionFailureCallbacksTestCase(TestCase):
def setUp(self) -> None:
self.invoice = Invoice.objects.create(status='draft')
def test_one_callback(self):
transition = Transition('test', sources=[], target='success', side_effects=[fail_invoice],
failure_callbacks=[disable_invoice], failed_state='failed')
self.assertTrue(self.invoice.is_available)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, 'failed')
self.assertFalse(self.invoice.is_available)
self.assertFalse(state.is_locked())
def test_many_callback(self):
transition = Transition('test', sources=[], target='success', side_effects=[fail_invoice],
failure_callbacks=[disable_invoice, receive_invoice], failed_state='failed')
self.assertTrue(self.invoice.is_available)
self.assertFalse(self.invoice.customer_received)
state = State(self.invoice, 'status')
transition.change_state(state)
self.assertEqual(self.invoice.status, 'failed')
self.assertFalse(self.invoice.is_available)
self.assertTrue(self.invoice.customer_received)
self.assertFalse(state.is_locked())
def test_callbacks_with_parameters(self):
update_invoice(self.invoice, is_available=True, customer_received=True)
transition = Transition('test', sources=[], target='success', failed_state='failed',
side_effects=[fail_invoice], failure_callbacks=[update_invoice])
self.invoice.refresh_from_db()
self.assertTrue(self.invoice.is_available)
self.assertTrue(self.invoice.customer_received)
state = State(self.invoice, 'status')
transition.change_state(state, is_available=False, customer_received=False)
self.invoice.refresh_from_db()
self.assertEqual(self.invoice.status, 'failed')
self.assertFalse(self.invoice.is_available)
self.assertFalse(self.invoice.customer_received)
self.assertFalse(state.is_locked())
@patch('tests.test_transition.debug_action')
def test_failure_callback_exception_passed(self, debug_mock):
update_invoice(self.invoice, is_available=True, customer_received=True)
transition = Transition('test', sources=[], target='success', failed_state='failed',
side_effects=[fail_invoice], failure_callbacks=[debug_action])
self.invoice.refresh_from_db()
state = State(self.invoice, 'status')
transition.change_state(state, foo="bar")
self.assertTrue(debug_mock.called)
self.assertEqual(debug_mock.call_count, 1)
call_args = debug_mock.call_args[0]
call_kwargs = debug_mock.call_args[1]
self.assertEqual(call_args, (self.invoice,))
self.assertEqual(len(call_kwargs), 2)
self.assertTrue(isinstance(call_kwargs['exception'], Exception))
self.assertEqual(call_kwargs['foo'], 'bar')
| 45.507109 | 108 | 0.687669 | [
"MIT"
] | khamenman/django-logic | tests/test_transition.py | 9,602 | Python |
# Generated by Django 3.2.4 on 2021-07-04 11:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Game',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tower_blocks_score', models.IntegerField(default=0)),
('bounce_score', models.IntegerField(default=0)),
('kill_birds_score', models.IntegerField(default=0)),
('snake_score', models.IntegerField(default=0)),
('last_updated', models.DateTimeField(auto_now_add=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 51.458333 | 266 | 0.631174 | [
"MIT"
] | ergauravsoni/final-year-backend | app/core/migrations/0001_initial.py | 2,470 | Python |
from simplecoremidi import send_midi
from time import sleep
def play_a_scale():
root_note = 60 # This is middle C
channel = 1 # This is MIDI channel 1
note_on_action = 0x90
major_steps = [2, 2, 1, 2, 2, 2, 1, 0]
velocity = 127
note = root_note
for step in major_steps:
send_midi((note_on_action | channel,
note,
velocity))
sleep(0.1)
send_midi((note_on_action | channel,
note,
0)) # A note-off is just a note-on with velocity 0
note += step
sleep(0.2)
if __name__=='__main__':
while True:
play_a_scale()
| 24.703704 | 70 | 0.554723 | [
"MIT"
] | Miselu/simplecoremidi | simplecoremidi/examples/play_a_scale.py | 667 | Python |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Anscombe'] , ['MovingMedian'] , ['NoCycle'] , ['LSTM'] ); | 38.25 | 80 | 0.745098 | [
"BSD-3-Clause"
] | antoinecarme/pyaf | tests/model_control/detailed/transf_Anscombe/model_control_one_enabled_Anscombe_MovingMedian_NoCycle_LSTM.py | 153 | Python |
# if funktioniert (fast) wie in allen anderen Sprachen
# - Einrückungen ersetzen { } Gilt für Python generell!
# - Es gibt ein elif statt einem else if
weight = 50 # kg
height = 190 # cm
bmi = weight / (height/100)**2
# bmi < 18.5 : Untergewicht
# bmi > 25 : Übergewicht
# sonst : Normalgewicht
if bmi < 18.5:
print("Untergewicht")
print("Mehr essen!")
elif bmi > 25:
print("Übergewicht")
else:
print("Normalgewicht") | 22.25 | 55 | 0.653933 | [
"Unlicense"
] | slogslog/Coding-Kurzgeschichten | Crashkurs Python/03_if.py | 449 | Python |
import sys
import os
import math
import shutil
import disk_sort
import struct
import operator
import logging
from decimal import Decimal
from fractions import Fraction
import numpy
from scipy.linalg import eig
import scipy.ndimage
import cProfile
import pstats
from osgeo import gdal, ogr
import pygeoprocessing.geoprocessing
import shutil
logging.basicConfig(format='%(asctime)s %(name)-20s %(levelname)-8s \
%(message)s', level=logging.DEBUG, datefmt='%m/%d/%Y %H:%M:%S ')
LOGGER = logging.getLogger('invest_natcap.scenario_generator.scenario_generator')
def calculate_weights(arr, rounding=4):
PLACES = Decimal(10) ** -(rounding)
# get eigenvalues and vectors
evas, eves = eig(arr)
# get primary eigenvalue and vector
eva = max(evas)
eva_idx = evas.tolist().index(eva)
eve = eves.take((eva_idx,), axis=1)
# priority vector = normalized primary eigenvector
normalized = eve / sum(eve)
# turn into list of real part values
vector = [abs(e[0]) for e in normalized]
# return nice rounded Decimal values with labels
return [ Decimal( str(v) ).quantize(PLACES) for v in vector ]
def calculate_priority(table_uri):
table = [line.strip().split(",") for line in open(table_uri).readlines()]
id_index = table[0].index("Id")
cover_id_list = [row[id_index] for row in table]
cover_id_list.pop(0)
cover_id_index_list = [table[0].index(cover_id) for cover_id in cover_id_list]
matrix = numpy.zeros((len(cover_id_list),len(cover_id_list)))
for row in range(len(cover_id_list)):
for col in range(row+1):
matrix[row][col] = float(table[row+1][cover_id_index_list[col]])
matrix[col][row] = 1 / matrix[row][col]
cover_id_list = [int(cover_id) for cover_id in cover_id_list]
return dict(zip(cover_id_list, calculate_weights(matrix, 4)))
def calculate_distance_raster_uri(dataset_in_uri, dataset_out_uri):
# Compute pixel distance
pygeoprocessing.geoprocessing.distance_transform_edt(dataset_in_uri, dataset_out_uri)
# Convert to meters
def pixel_to_meters_op(x):
x[x != nodata] *= cell_size
return x
cell_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(dataset_in_uri)
nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(dataset_out_uri)
tmp = pygeoprocessing.geoprocessing.temporary_filename()
pygeoprocessing.geoprocessing.vectorize_datasets(
[dataset_out_uri], \
pixel_to_meters_op, \
tmp, \
gdal.GDT_Float64, \
nodata, \
cell_size, \
'union', \
vectorize_op = False)
def identity_op(x):
return x
pygeoprocessing.geoprocessing.vectorize_datasets(
[tmp], \
identity_op, \
dataset_out_uri, \
gdal.GDT_Float64, \
nodata, \
cell_size, \
'union', \
vectorize_op = False)
# Compute raster stats so the raster is viewable in QGIS and Arc
pygeoprocessing.geoprocessing.calculate_raster_stats_uri(dataset_out_uri)
##def calculate_distance_raster_uri(dataset_in_uri, dataset_out_uri, cell_size = None, max_distance = None):
## if cell_size == None:
## cell_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(dataset_in_uri)
##
## memory_array = pygeoprocessing.geoprocessing.load_memory_mapped_array(dataset_in_uri, pygeoprocessing.geoprocessing.temporary_filename())
##
## memory_array = scipy.ndimage.morphology.distance_transform_edt(memory_array) * cell_size
##
## nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(dataset_in_uri)
##
#### if max_distance != None:
#### memory_array[memory_array > max_distance] = nodata
##
## pygeoprocessing.geoprocessing.new_raster_from_base_uri(dataset_in_uri, dataset_out_uri, 'GTiff', nodata, gdal.GDT_Float32)
##
## dataset_out = gdal.Open(dataset_out_uri, 1)
## band = dataset_out.GetRasterBand(1)
## band.WriteArray(memory_array)
##
## band = None
## dataset_out = None
shapeTypes= {0: "Null Shape", 1: "Point", 3: "PolyLine", 5: "Polygon",
8: "MultiPoint", 11: "PointZ", 13: "PolyLineZ",
15: "PolygonZ", 18: "MultiPointZ", 21: "PointM",
23: "PolyLineM", 25: "PolygonM", 28: "MultiPointM",
31: "MultiPatch"}
def get_geometry_type_from_uri(datasource_uri):
datasource = open(datasource_uri, 'r')
datasource.seek(32)
shape_type ,= struct.unpack('<i',datasource.read(4))
datasource.close()
return shape_type
def get_transition_set_count_from_uri(dataset_uri_list):
cell_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(dataset_uri_list[0])
lulc_nodata = int(pygeoprocessing.geoprocessing.get_nodata_from_uri(dataset_uri_list[0]))
nodata = 0
#reclass rasters to compact bit space
lulc_codes = set()
unique_raster_values_count = {}
for dataset_uri in dataset_uri_list:
unique_raster_values_count[dataset_uri] = pygeoprocessing.geoprocessing.unique_raster_values_count(dataset_uri)
lulc_codes.update(unique_raster_values_count[dataset_uri].keys())
lulc_codes = list(lulc_codes)
lulc_codes.sort()
if len(lulc_codes) < 2 ** 8:
data_type = gdal.GDT_UInt16
shift = 8
elif len(lulc_codes) < 2 ** 16:
data_type = gdal.GDT_UInt32
shift = 16
else:
raise ValueError, "Too many LULC codes."
#renumber and reclass rasters
reclass_orig_dict = dict(zip(lulc_codes,range(1,len(lulc_codes)+1)))
reclass_dest_dict = {}
for key in reclass_orig_dict:
reclass_dest_dict[key] = reclass_orig_dict[key] << shift
def add_op(orig, dest):
return orig + dest
counts={}
for i in range(len(dataset_uri_list)-1):
orig_uri = pygeoprocessing.geoprocessing.temporary_filename()
dest_uri = pygeoprocessing.geoprocessing.temporary_filename()
multi_uri = pygeoprocessing.geoprocessing.temporary_filename()
#reclass orig values
pygeoprocessing.geoprocessing.reclassify_dataset_uri(dataset_uri_list[i],
reclass_orig_dict,
orig_uri,
data_type,
nodata,
exception_flag="values_required")
#reclass dest values
pygeoprocessing.geoprocessing.reclassify_dataset_uri(dataset_uri_list[i+1],
reclass_dest_dict,
dest_uri,
data_type,
nodata,
exception_flag="values_required")
#multiplex orig with dest
pygeoprocessing.geoprocessing.vectorize_datasets([orig_uri, dest_uri],
add_op,
multi_uri,
data_type,
nodata,
cell_size,
"union")
#get unique counts
counts[i]=pygeoprocessing.geoprocessing.unique_raster_values_count(multi_uri, False)
restore_classes = {}
for key in reclass_orig_dict:
restore_classes[reclass_orig_dict[key]] = key
restore_classes[nodata] = lulc_nodata
LOGGER.debug("Decoding transition table.")
transitions = {}
for key in counts:
transitions[key]={}
for k in counts[key]:
try:
orig = restore_classes[k % (2**shift)]
except KeyError:
orig = lulc_nodata
try:
dest = restore_classes[k >> shift]
except KeyError:
dest = lulc_nodata
try:
transitions[key][orig][dest] = counts[key][k]
except KeyError:
transitions[key][orig] = {dest : counts[key][k]}
return unique_raster_values_count, transitions
def generate_chart_html(cover_dict, cover_names_dict, workspace_dir):
html = "\n<table BORDER=1>"
html += "\n<TR><td>Id</td><td>% Before</td><td>% After</td></TR>"
cover_id_list = cover_dict.keys()
cover_id_list.sort()
cover_id_list_chart = cover_names_dict.keys()
cover_id_list_chart.sort()
pixcount = 0
for cover_id in cover_id_list:
pixcount += cover_dict[cover_id][0]
pixcount = float(pixcount)
for cover_id in cover_id_list:
html += "\n<TR><td>%i</td><td>%i</td><td>%i</td></TR>" % (cover_id,
(cover_dict[cover_id][0] / pixcount) * 100,
(cover_dict[cover_id][1] / pixcount) * 100 )
html += "\n<table>"
#create three charts for original, final and change
thecharts = [
['Original',0],
['Final',1],
['Change',2]
]
hainitial = ""
hainitialnegative = ""
hainitiallist = []
hafinal = ""
hafinalnegative = ""
hafinallist = []
hachange = ""
hachangelist = []
haall = []
initialcover = []
finalcover = []
for cover_id in cover_id_list_chart:
try:
initialcover.append((cover_dict[cover_id][0] / pixcount) * 100)
except KeyError:
initialcover.append(0)
try:
finalcover.append((cover_dict[cover_id][1] / pixcount) * 100)
except KeyError:
finalcover.append(0)
#return html
html += "<style type='text/css'>"
html += "body {font-family: Arial, Helvetica, sans-serif; font-size: 0.9em;}"
html += "table#results {margin: 20px auto}"
html += "table#results th {text-align: left}"
html += "</style>"
html += "<script type='text/javascript'>\n"
html += "var chart,\n"
categories = []
html += "categories = ["
for cover_id in cover_id_list_chart:
#pass
categories.append("'"+cover_names_dict[cover_id]+"'")
html += ",".join(categories)
html += "]\n"
html +="$(document).ready(function() {\n"
for x in initialcover:
hainitial = hainitial +str(x)+","
hainitialnegative = hainitialnegative + "0,"
hainitiallist.append(float(x))
temp = []
temp.append(hainitial)
temp.append(hainitialnegative)
haall.append(temp)
thecharts[0].append(max(hainitiallist))
thecharts[0].append(min(hainitiallist))
for x in finalcover:
hafinal = hafinal +str(x)+","
hafinalnegative = hafinalnegative + "0,"
hafinallist.append(float(x))
temp = []
temp.append(hafinal)
temp.append(hafinalnegative)
haall.append(temp)
thecharts[1].append(max(hafinallist))
thecharts[1].append(min(hafinallist))
for x in range(len(initialcover)):
hachange = hachange + str(float(finalcover[x]) - float(initialcover[x]))+","
hachangelist.append(float(finalcover[x]) - float(initialcover[x]))
#split the change values
hachangelistnegative = ""
hachangelistpositive = ""
for item in hachangelist:
if item < 0:
hachangelistnegative=hachangelistnegative+str(item)+","
hachangelistpositive=hachangelistpositive+"0,"
else:
hachangelistpositive=hachangelistpositive+str(item)+","
hachangelistnegative=hachangelistnegative+"0,"
temp = []
temp.append(hachangelistpositive)
temp.append(hachangelistnegative)
haall.append(temp)
thecharts[2].append(max(hachangelist))
thecharts[2].append(min(hachangelist))
if thecharts[0][2] > thecharts[1][2]:
thecharts[1][2] = thecharts[0][2]
thecharts[2][2] = thecharts[0][2]
else:
thecharts[0][2] = thecharts[1][2]
thecharts[2][2] = thecharts[1][2]
for x in thecharts:
if x[0] == 'Change':
themin = x[3]
else:
themin = 0
html += "chart = new Highcharts.Chart({\n"
html += "chart: {renderTo: '"+x[0]+"container',defaultSeriesType: 'bar'},"
html += "title: {text: '"+x[0]+" Landcover'},"
html += "subtitle: {text: ''},"
html += "xAxis: [{categories: categories,reversed: false}, {opposite: true, reversed: false,categories: categories,linkedTo: 0}],"
html += "yAxis: {title: {text: null},labels: {formatter: function(){return Math.abs(this.value)}},min: "+str(themin)+",max: "+str(x[2])+"},"
html += "plotOptions: {series: { stacking: 'normal', showInLegend: false } },"
html += "tooltip: { formatter: function(){return '<b>'+ this.point.category +'</b><br/>'+'Area: '+ Highcharts.numberFormat(Math.abs(this.point.y), 0)+'%';}},"
html += "series: [{name: '',"
html += "data: ["+haall[x[1]][0]+"]}, {"
html += "name: '',"
html += "data: ["+haall[x[1]][1]+"]}]});\n"
html += "});\n"
html += "</script>\n"
for x in thecharts:
html += "<div id='"+x[0]+"container' style='width: 800px; height: 400px; margin: 20px 0'></div>\n"
return html
def filter_fragments(input_uri, size, output_uri):
#clump and sieve
LOGGER.debug("Filtering patches smaller than %i from %s.", size, input_uri)
src_ds = gdal.Open(input_uri)
src_band = src_ds.GetRasterBand(1)
src_array = src_band.ReadAsArray()
driver = gdal.GetDriverByName("GTiff")
driver.CreateCopy(output_uri, src_ds, 0 )
dst_ds = gdal.Open(output_uri, 1)
dst_band = dst_ds.GetRasterBand(1)
dst_array = numpy.copy(src_array)
suitability_values = numpy.unique(src_array)
if suitability_values[0] == 0:
suitability_values = suitability_values[1:]
#8 connectedness preferred, 4 connectedness allowed
eight_connectedness = numpy.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
four_connectedness = numpy.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
suitability_values_count = suitability_values.size
for v in range(1, suitability_values_count):
LOGGER.debug('Processing suitability value ' + \
str(suitability_values.size - v))
value = suitability_values[v]
# Pixels of interest set to 1, 0 otherwise
mask = src_array == value
# Number of pixels to process
ones_in_mask = numpy.sum(mask)
# Label and count disconnected components (fragments)
label_im, nb_labels = scipy.ndimage.label(mask, four_connectedness)
# Compute fragment sizes
fragment_sizes = \
scipy.ndimage.sum(mask, label_im, range(nb_labels + 1))
# List fragments
fragment_labels = numpy.array(range(nb_labels + 1))
# Discard large fragments
small_fragment_mask = numpy.where(fragment_sizes <= size)
# Gather small fragment information
small_fragment_sizes = fragment_sizes[small_fragment_mask]
small_fragment_labels = fragment_labels[small_fragment_mask]
combined_small_fragment_size = numpy.sum(small_fragment_sizes)
# Find each fragment
fragments_location = scipy.ndimage.find_objects(label_im, nb_labels)
removed_pixels = 0
small_fragment_labels_count = small_fragment_labels.size
for l in range(small_fragment_labels_count-1):
label = small_fragment_labels[l+1]
last_label = small_fragment_labels[l]
size = small_fragment_sizes[l+1]
source = label_im[fragments_location[last_label]]
target = dst_array[fragments_location[last_label]]
pixels_to_remove = numpy.where(source == label)
target[pixels_to_remove] = 0
dst_band.WriteArray(dst_array)
def sum_uri(dataset_uri, datasource_uri):
"""Wrapper call to pygeoprocessing.geoprocessing.aggregate_raster_values_uri to extract total
:param dataset_uri: The uri for the input raster.
:type dataset_uri: str
:return: None
:rtype: None
"""
total = pygeoprocessing.geoprocessing.aggregate_raster_values_uri(dataset_uri, datasource_uri).total
return total.__getitem__(total.keys().pop())
def execute(args):
###
#overiding, non-standard field names
###
# Preliminary tests
if ('transition' in args) and ('suitability' in args):
assert args['transition'] != args['suitability'], \
'Transition and suitability tables are the same: ' + \
args['transition'] + '. The model expects different tables.'
#transition table fields
args["transition_id"] = "Id"
args["percent_field"] = "Percent Change"
args["area_field"] = "Area Change"
args["priority_field"] = "Priority"
args["proximity_field"] = "Proximity"
args["proximity_weight"] = "0.3"
args["patch_field"] = "Patch ha"
#factors table fields
args["suitability_id"] = "Id"
args["suitability_layer"] = "Layer"
args["suitability_weight"] = "Wt"
args["suitability_field"] = "Suitfield"
args["distance_field"] = "Dist"
args["suitability_cover_id"] = "Cover ID"
#exercise fields
args["returns_cover_id"] = "Cover ID"
args["returns_layer"] = "/Users/olwero/Dropbox/Work/Ecosystem_Services/NatCap/Olympics/2014/Scenarios/Exercise/inputtest/returns.csv"
###
#get parameters, set outputs
###
workspace = args["workspace_dir"]
if not os.path.exists(workspace):
os.makedirs(workspace)
landcover_uri = args["landcover"]
if len(args["suffix"]) > 0:
suffix = "_" + args["suffix"].strip("_")
else:
suffix = ""
intermediate_dir = "intermediate"
if not os.path.exists(os.path.join(workspace, intermediate_dir)):
os.makedirs(os.path.join(workspace, intermediate_dir))
proximity_weight = float(args["proximity_weight"])
#it might be better to just check if factors being used
try:
physical_suitability_weight = float(args["weight"])
except KeyError:
physical_suitability_weight = 0.5
##output file names
#absolute paths
landcover_resample_uri = os.path.join(workspace, "resample" + suffix + ".tif")
landcover_transition_uri = os.path.join(workspace,"transitioned" + suffix + ".tif")
override_dataset_uri = os.path.join(workspace,"override" + suffix + ".tif")
landcover_htm_uri = os.path.join(workspace,"scenario-output-summary" + suffix + ".html")
pygeoprocessing.geoprocessing.create_directories([workspace])
#relative paths, or with patterned name
transition_name = os.path.join(intermediate_dir, "transition_%i" + suffix + ".tif")
suitability_name = os.path.join(intermediate_dir, "%s_%s" + suffix + ".tif")
normalized_name = os.path.join(intermediate_dir, "%s_%s_norm" + suffix + ".tif")
combined_name = os.path.join(intermediate_dir, "factors_%s" + suffix + ".tif")
constraints_name = os.path.join(intermediate_dir, "constraints" + suffix + ".tif")
filter_name = os.path.join(intermediate_dir, "filter_%i" + suffix + ".tif")
factors_name = os.path.join(intermediate_dir, "suitability_%s" + suffix + ".tif")
cover_name = os.path.join(intermediate_dir, "cover_%i" + suffix + ".tif")
proximity_name = os.path.join(intermediate_dir, "proximity_%s" + suffix + ".tif")
normalized_proximity_name = os.path.join(intermediate_dir, "proximity_norm_%s" + suffix + ".tif")
adjusted_suitability_name = os.path.join(intermediate_dir, "adjusted_suitability_%s" + suffix + ".tif")
scenario_name = "scenario" + suffix + ".tif"
###
#constants
###
raster_format = "GTiff"
transition_type = gdal.GDT_Int16
transition_nodata = -1
change_nodata = -9999
#value to multiply transition matrix entries (ie covert 10 point scale to 100 point scale)
transition_scale = 10
distance_scale = 100
suitability_nodata = 0
suitability_type = gdal.GDT_Int16
def suitability_op(trans, suit):
if trans == 0:
return 0
return round(((1 - physical_suitability_weight) * trans)\
+ (physical_suitability_weight * suit))
ds_type = "GTiff"
driver = gdal.GetDriverByName(ds_type)
###
#validate data
###
#raise warning if nothing is going to happen
if not any([args["calculate_transition"],
args["calculate_factors"],
args["override_layer"]]):
msg = "You must select at least one of the following: specify transitions, use factors, or override layer."
LOGGER.error(msg)
raise ValueError, msg
##transition table validation
#raise error if transition table provided, but not used
if args["transition"] and not(args["calculate_transition"] or args["calculate_factors"]):
msg = "Transition table provided but not used."
LOGGER.warn(msg)
#raise ValueError, msg
transition_dict = {}
if args["calculate_transition"] or args["calculate_factors"]:
#load transition table
transition_dict = pygeoprocessing.geoprocessing.get_lookup_from_csv(args["transition"], args["transition_id"])
#raise error if LULC contains cover id's not in transition table
landcover_count_dict = pygeoprocessing.geoprocessing.unique_raster_values_count(landcover_uri)
missing_lulc = set(landcover_count_dict).difference(transition_dict.keys())
if len(missing_lulc) > 0 :
missing_lulc = list(missing_lulc)
missing_lulc.sort()
mising_lulc = ", ".join([str(l) for l in missing_lulc])
msg = "Missing suitability information for cover(s) %s." % missing_lulc
LOGGER.error(msg)
raise ValueError, msg
for cover_id in transition_dict:
#raise error if percent change for new LULC
if (transition_dict[cover_id][args["percent_field"]] > 0) and not (cover_id in landcover_count_dict):
msg = "Cover %i does not exist in LULC and therefore cannot have a percent change." % cover_id
LOGGER.error(msg)
raise ValueError, msg
#raise error if change by percent and area both specified
if (transition_dict[cover_id][args["percent_field"]] > 0) and (transition_dict[cover_id][args["area_field"]] > 0):
msg = "Cover %i cannot have both an increase by percent and area." % cover_id
LOGGER.error(msg)
raise ValueError, msg
##factor parameters validation
if args["calculate_factors"]:
pass
#error if overall physical weight not in [0, 1] range
##factor table validation
#if polygon no distance field allowed
#if point or line, integer distance field only
#error if same factor twice for same coverage
###
#resample, align and rasterize data
###
if args["calculate_priorities"]:
LOGGER.info("Calculating priorities.")
priorities_dict = calculate_priority(args["priorities_csv_uri"])
#check geographic extents, projections
## #validate resampling size
## if args["resolution"] != "":
## if args["resolution"] < pygeoprocessing.geoprocessing.get_cell_size_from_uri(landcover_uri):
## msg = "The analysis resolution cannot be smaller than the input."
## LOGGER.error(msg)
## raise ValueError, msg
##
## else:
## LOGGER.info("Resampling land cover.")
## #gdal.GRA_Mode might be a better resample method, but requires GDAL >= 1.10.0
## bounding_box = pygeoprocessing.geoprocessing.get_bounding_box(landcover_uri)
## pygeoprocessing.geoprocessing.resize_and_resample_dataset_uri(landcover_uri,
## bounding_box,
## args["resolution"],
## landcover_resample_uri,
## "nearest")
## LOGGER.debug("Changing landcover uri to resampled uri.")
## landcover_uri = landcover_resample_uri
cell_size = pygeoprocessing.geoprocessing.get_cell_size_from_uri(landcover_uri)
suitability_transition_dict = {}
if args["calculate_transition"]:
for next_lulc in transition_dict:
this_uri = os.path.join(workspace, transition_name % next_lulc)
#construct reclass dictionary
reclass_dict = {}
all_zeros = True
for this_lulc in transition_dict:
value = int(transition_dict[this_lulc][str(next_lulc)])
reclass_dict[this_lulc] = value * transition_scale
all_zeros = all_zeros and (value == 0)
if not all_zeros:
#reclass lulc by reclass_dict
pygeoprocessing.geoprocessing.reclassify_dataset_uri(landcover_uri,
reclass_dict,
this_uri,
transition_type,
suitability_nodata,
exception_flag = "values_required")
#changing nodata value so 0's no longer nodata
dataset = gdal.Open(this_uri, 1)
band = dataset.GetRasterBand(1)
nodata = band.SetNoDataValue(transition_nodata)
dataset = None
suitability_transition_dict[next_lulc] = this_uri
suitability_factors_dict = {}
if args["calculate_factors"]:
factor_dict = pygeoprocessing.geoprocessing.get_lookup_from_csv(args["suitability"], args["suitability_id"])
factor_uri_dict = {}
factor_folder = args["suitability_folder"]
if not args["factor_inclusion"]:
option_list=["ALL_TOUCHED=TRUE"]
else:
option_list = ["ALL_TOUCHED=FALSE"]
for factor_id in factor_dict:
factor = factor_dict[factor_id][args["suitability_layer"]]
factor_stem, _ = os.path.splitext(factor)
suitability_field_name = factor_dict[factor_id][args["suitability_field"]]
distance = factor_dict[factor_id][args["distance_field"]]
cover_id = int(factor_dict[factor_id][args["suitability_cover_id"]])
weight = int(factor_dict[factor_id][args["suitability_weight"]])
LOGGER.debug("Found reference to factor (%s, %s, %s) for cover %i.", factor_stem, suitability_field_name, distance, cover_id)
if not (factor_stem, suitability_field_name, distance) in factor_uri_dict:
factor_uri = os.path.join(factor_folder, factor)
if not os.path.exists(factor_uri):
msg = "Missing file %s." % factor_uri
LOGGER.error(msg)
raise ValueError, msg
shape_type = get_geometry_type_from_uri(factor_uri)
LOGGER.debug("Processing %s.", shapeTypes[shape_type])
if shape_type in [5, 15, 25, 31]: #polygon
LOGGER.info("Rasterizing %s using sutibality field %s.", factor_stem, suitability_field_name)
ds_uri = os.path.join(workspace, suitability_name % (factor_stem, suitability_field_name))
burn_value = [1]
suitability_field = ["ATTRIBUTE=%s" % suitability_field_name]
gdal_format = gdal.GDT_Float64
pygeoprocessing.geoprocessing.new_raster_from_base_uri(landcover_uri, ds_uri, raster_format, transition_nodata, gdal_format, fill_value = 0)
pygeoprocessing.geoprocessing.rasterize_layer_uri(ds_uri, factor_uri, burn_value, option_list=option_list + suitability_field)
factor_uri_dict[(factor_stem, suitability_field_name, distance)] = ds_uri
elif shape_type in [1, 3, 8, 11, 13, 18, 21, 23, 28]: #point or line
# For features with no area, it's (almost) impossible to
# hit the center pixel, so we use ALL_TOUCHED=TRUE
option_list=["ALL_TOUCHED=TRUE"]
distance = int(distance)
ds_uri = os.path.join(workspace, suitability_name % (factor_stem, str(distance) + '_raw_raster'))
distance_uri = os.path.join(workspace, suitability_name % (factor_stem, str(distance) + '_raw_distance'))
fdistance_uri = os.path.join(workspace, suitability_name % (factor_stem, distance))
normalized_uri = os.path.join(workspace, normalized_name % (factor_stem, distance))
burn_value = [1]
LOGGER.info("Buffering rasterization of %s to distance of %i.", factor_stem, distance)
gdal_format = gdal.GDT_Byte
pygeoprocessing.geoprocessing.new_raster_from_base_uri(landcover_uri, ds_uri, raster_format, -1, gdal_format)
landcover_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(landcover_uri)
ds_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(ds_uri)
pygeoprocessing.geoprocessing.vectorize_datasets([landcover_uri], \
lambda x: 0 if x != landcover_nodata else -1, \
ds_uri, \
pygeoprocessing.geoprocessing.get_datatype_from_uri(ds_uri), \
ds_nodata, \
pygeoprocessing.geoprocessing.get_cell_size_from_uri(ds_uri), \
'intersection')
pygeoprocessing.geoprocessing.rasterize_layer_uri(ds_uri, factor_uri, burn_value, option_list)
calculate_distance_raster_uri(ds_uri, distance_uri)
def threshold(value):
result = numpy.where(value > distance, transition_nodata, value)
return numpy.where(value == transition_nodata, transition_nodata, result)
pygeoprocessing.geoprocessing.vectorize_datasets([distance_uri],
threshold,
fdistance_uri,
pygeoprocessing.geoprocessing.get_datatype_from_uri(distance_uri),
transition_nodata,
cell_size,
"union",
vectorize_op = False)
pygeoprocessing.geoprocessing.calculate_raster_stats_uri(fdistance_uri)
minimum, maximum, _, _ = pygeoprocessing.geoprocessing.get_statistics_from_uri(fdistance_uri)
def normalize_op(value):
diff = float(maximum - minimum)
return numpy.where(
value == transition_nodata,
suitability_nodata,
((distance_scale - 1) - (((value - minimum) / \
diff) * (distance_scale - 1))) + 1)
pygeoprocessing.geoprocessing.vectorize_datasets([fdistance_uri],
normalize_op,
normalized_uri,
transition_type,
transition_nodata,
cell_size,
"union",
vectorize_op = False)
factor_uri_dict[(factor_stem, suitability_field_name, distance)] = normalized_uri
else:
raise ValueError, "Invalid geometry type %i." % shape_type
# Apply nodata to the factors raster
landcover_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(landcover_uri)
temp_uri = pygeoprocessing.geoprocessing.temporary_filename()
def apply_nodata_op(landcover, value):
return numpy.where(landcover == landcover_uri, 0, value)
pygeoprocessing.geoprocessing.vectorize_datasets( \
[landcover_uri,
factor_uri_dict[(factor_stem, suitability_field_name, distance)]],
apply_nodata_op,
temp_uri,
transition_type,
transition_nodata,
cell_size,
"union",
vectorize_op = False)
def identity_op(x):
return x
pygeoprocessing.geoprocessing.vectorize_datasets( \
[temp_uri],
identity_op,
factor_uri_dict[(factor_stem, suitability_field_name, distance)],
transition_type,
transition_nodata,
cell_size,
"union",
vectorize_op = False)
else:
LOGGER.debug("Skipping already processed suitability layer.")
LOGGER.debug("Adding factor (%s, %s, %s) to cover %i suitability list.", factor_stem, suitability_field_name, distance, cover_id)
if cover_id in suitability_factors_dict:
suitability_factors_dict[cover_id].append((factor_uri_dict[(factor_stem, suitability_field_name, distance)], weight))
else:
suitability_factors_dict[cover_id] = [(factor_uri_dict[(factor_stem, suitability_field_name, distance)], weight)]
for cover_id in suitability_factors_dict:
if len(suitability_factors_dict[cover_id]) > 1:
LOGGER.info("Combining factors for cover type %i.", cover_id)
ds_uri = os.path.join(workspace, combined_name % cover_id)
uri_list, weights_list = apply(zip, suitability_factors_dict[cover_id])
total = float(sum(weights_list))
weights_list = [weight / total for weight in weights_list]
def weighted_op(*values):
result = (values[0] * weights_list[0]).astype(float)
for v, w in zip(values[1:], weights_list[1:]):
result += v * w
return result
# print('------files:', uri_list, weights_list)
pygeoprocessing.geoprocessing.vectorize_datasets(list(uri_list),
weighted_op,
ds_uri,
suitability_type,
transition_nodata,
cell_size,
"union",
vectorize_op = False)
suitability_factors_dict[cover_id] = ds_uri
else:
suitability_factors_dict[cover_id] = suitability_factors_dict[cover_id][0][0]
suitability_dict = {}
if args["calculate_transition"]:
suitability_dict = suitability_transition_dict
if args["calculate_factors"]:
for cover_id in suitability_factors_dict:
if cover_id in suitability_dict:
LOGGER.info("Combining suitability for cover %i.", cover_id)
ds_uri = os.path.join(workspace, factors_name % cover_id)
print('cover_ids', suitability_dict.keys())
pygeoprocessing.geoprocessing.vectorize_datasets([suitability_transition_dict[cover_id],
suitability_factors_dict[cover_id]],
suitability_op,
ds_uri,
transition_type,
transition_nodata,
cell_size,
"union")
suitability_dict[cover_id] = ds_uri
else:
suitability_dict[cover_id] = suitability_factors_dict[cover_id]
elif args["calculate_factors"]:
suitability_dict = suitability_factors_dict
#clump and sieve
for cover_id in transition_dict:
if (transition_dict[cover_id][args["patch_field"]] > 0) and (cover_id in suitability_dict):
LOGGER.info("Filtering patches from %i.", cover_id)
size = 10000 * int(math.ceil( \
transition_dict[cover_id][args["patch_field"]] / \
(cell_size ** 2)))
output_uri = os.path.join(workspace, filter_name % cover_id)
filter_fragments(suitability_dict[cover_id], size, output_uri)
suitability_dict[cover_id] = output_uri
###
#compute intermediate data if needed
###
#contraints raster (reclass using permability values, filters on clump size)
if args["calculate_constraints"]:
LOGGER.info("Rasterizing constraints.")
constraints_uri = args["constraints"]
constraints_field_name = args["constraints_field"]
constraints_ds_uri = os.path.join(workspace, constraints_name)
option_list = ["ALL_TOUCHED=FALSE"]
burn_value = [0]
constraints_field = ["ATTRIBUTE=%s" % constraints_field_name]
gdal_format = gdal.GDT_Float64
pygeoprocessing.geoprocessing.new_raster_from_base_uri(landcover_uri, constraints_ds_uri, raster_format, transition_nodata, gdal_format, fill_value = 1)
pygeoprocessing.geoprocessing.rasterize_layer_uri(constraints_ds_uri, constraints_uri, burn_value, option_list=option_list + constraints_field)
# Check that the values make sense
raster = gdal.Open(constraints_ds_uri)
band = raster.GetRasterBand(1)
array = band.ReadAsArray()
unique = numpy.unique(array)
assert (unique[0] >= 0.0) and (unique[-1] <= 1.0), \
'Invalid raster value in field ' + constraints_field_name + ' in ' \
+ constraints_uri
else:
LOGGER.info("Constraints not included.")
proximity_dict = {}
if args["calculate_proximity"]:
LOGGER.info("Calculating proximity.")
cover_types = transition_dict.keys()
for cover_id in transition_dict:
if transition_dict[cover_id][args["proximity_field"]] > 0 and cover_id in suitability_dict:
distance = int(transition_dict[cover_id][args["proximity_field"]])
LOGGER.info("Calculating proximity for %i.", cover_id)
reclass_dict = dict(zip(cover_types, [1] * len(cover_types)))
reclass_dict[cover_id] = 0
ds_uri = os.path.join(workspace, cover_name % cover_id)
distance_uri = pygeoprocessing.geoprocessing.temporary_filename()
fdistance_uri = os.path.join(workspace, proximity_name % cover_id)
normalized_uri = os.path.join(workspace, normalized_proximity_name % cover_id)
pygeoprocessing.geoprocessing.reclassify_dataset_uri(landcover_uri,
reclass_dict,
ds_uri,
transition_type,
transition_nodata,
exception_flag = "values_required")
calculate_distance_raster_uri(ds_uri, distance_uri)
def threshold(value):
if value > distance:
return transition_nodata
return value
pygeoprocessing.geoprocessing.vectorize_datasets([distance_uri],
threshold,
fdistance_uri,
pygeoprocessing.geoprocessing.get_datatype_from_uri(distance_uri),
transition_nodata,
cell_size,
"union")
minimum, maximum, _, _ = pygeoprocessing.geoprocessing.get_statistics_from_uri(fdistance_uri)
assert minimum < maximum, "Wrong distance (min, max) = (" + \
str(minimum) + ", " + str(maximum) + ") in " + fdistance_uri
def normalize_op(value):
if value == transition_nodata:
return suitability_nodata
else:
return ((distance_scale - 1) \
- (((value - minimum) \
/ float(maximum - minimum)) \
* (distance_scale - 1))) \
+ 1
pygeoprocessing.geoprocessing.vectorize_datasets([fdistance_uri],
normalize_op,
normalized_uri,
transition_type,
transition_nodata,
cell_size,
"union")
proximity_dict[cover_id] = normalized_uri
def es_change_op(final_es ,initial_es):
return final_es - initial_es
def constraint_op(suit, cons):
return suit * cons
def proximity_op(suit, prox):
v = suit + (prox * proximity_weight)
if v > 100:
return 100
else:
return v
def constraint_proximity_op(suit, cons, prox):
v = (cons * suit) + (prox * proximity_weight)
if v > 100:
return 100
else:
return v
for cover_id in suitability_dict:
suitability_uri = os.path.join(workspace, adjusted_suitability_name % cover_id)
if args["calculate_constraints"]:
if cover_id in proximity_dict:
LOGGER.info("Combining suitability, proximity, and constraints for %i.", cover_id)
uri_list = [suitability_dict[cover_id],
constraints_ds_uri,
proximity_dict[cover_id]]
LOGGER.info("Vectorizing: %s", ", ".join(uri_list))
pygeoprocessing.geoprocessing.vectorize_datasets(uri_list,
constraint_proximity_op,
suitability_uri,
transition_type,
transition_nodata,
cell_size,
"union")
suitability_dict[cover_id] = suitability_uri
else:
LOGGER.info("Combining suitability and constraints for %i.", cover_id)
uri_list = [suitability_dict[cover_id],
constraints_ds_uri]
# print('------suitability and constraint files:', uri_list)
LOGGER.info("Vectorizing: %s", ", ".join(uri_list))
pygeoprocessing.geoprocessing.vectorize_datasets(uri_list,
constraint_op,
suitability_uri,
transition_type,
transition_nodata,
cell_size,
"union")
suitability_dict[cover_id] = suitability_uri
elif cover_id in proximity_dict:
LOGGER.info("Combining suitability and proximity for %i.", cover_id)
uri_list = [suitability_dict[cover_id],
proximity_dict[cover_id]]
LOGGER.info("Vectorizing: %s", ", ".join(uri_list))
pygeoprocessing.geoprocessing.vectorize_datasets(uri_list,
proximity_op,
suitability_uri,
transition_type,
transition_nodata,
cell_size,
"union")
suitability_dict[cover_id] = suitability_uri
#normalize probabilities to be on a 10 point scale
#probability raster (reclass using probability matrix)
#proximity raster (gaussian for each landcover type, using max distance)
#InVEST 2 uses 4-connectedness?
#combine rasters for weighting into sutibility raster, multiply proximity by 0.3
#[suitability * (1-factor weight)] + (factors * factor weight) or only single raster
###
#reallocate pixels (disk heap sort, randomly reassign equal value pixels, applied in order)
###
#copy initial LULC
scenario_uri = os.path.join(workspace, scenario_name)
src_ds = gdal.Open(landcover_uri)
n_cols = src_ds.RasterXSize
n_rows = src_ds.RasterYSize
dst_ds = driver.CreateCopy(scenario_uri, src_ds, 0)
dst_ds = None
src_ds = None
#identify LULC types undergoing change
change_list = []
if args["calculate_priorities"]:
for cover_id in transition_dict:
percent_change = transition_dict[cover_id][args["percent_field"]]
area_change = transition_dict[cover_id][args["area_field"]]
if percent_change > 0:
change_list.append((priorities_dict[cover_id],
cover_id,
int((percent_change / 100.0) \
* landcover_count_dict[cover_id])))
elif area_change > 0:
change_list.append((priorities_dict[cover_id],
cover_id,
10000 * int(math.ceil(area_change \
/ (cell_size**2)))))
else:
LOGGER.warn("Cover %i suitability specified, but no change indicated.", cover_id)
else:
for cover_id in transition_dict:
percent_change = transition_dict[cover_id][args["percent_field"]]
area_change = transition_dict[cover_id][args["area_field"]]
if percent_change > 0:
change_list.append((transition_dict[cover_id][args["priority_field"]],
cover_id,
int((percent_change / 100.0) \
* landcover_count_dict[cover_id])))
elif area_change > 0:
change_list.append((transition_dict[cover_id][args["priority_field"]],
cover_id,
10000 * int(math.ceil(area_change \
/ (cell_size**2)))))
else:
LOGGER.warn("Cover %i suitability specified, but no change indicated.", cover_id)
change_list.sort(reverse=True)
#change pixels
scenario_ds = gdal.Open(scenario_uri, 1)
scenario_band = scenario_ds.GetRasterBand(1)
scenario_array = scenario_band.ReadAsArray()
unconverted_pixels = {}
for index, (priority, cover_id, count) in enumerate(change_list):
LOGGER.debug("Increasing cover %i by %i pixels.", cover_id, count)
#open all lower priority suitability rasters and assign changed pixels value of 0
update_ds = {}
update_bands = {}
update_arrays = {}
for _, update_id, _ in change_list[index+1:]:
update_ds[update_id] = gdal.Open(suitability_dict[update_id], 1)
update_bands[update_id] = update_ds[update_id].GetRasterBand(1)
update_arrays[update_id] = update_bands[update_id].ReadAsArray()
##select pixels
#open suitability raster
src_ds = gdal.Open(suitability_dict[cover_id], 1)
src_band = src_ds.GetRasterBand(1)
src_array = src_band.ReadAsArray()
pixels_changed = 0
suitability_values = list(numpy.unique(src_array))
suitability_values.sort(reverse=True)
if suitability_values[-1]==0:
suitability_values.pop(-1)
for suitability_score in suitability_values:
# Check if suitsbility is between 0 and 100 inclusive
if abs(suitability_score - 50) > 50:
print('suitability_values:', suitability_dict[cover_id])
for v in suitability_values:
print v, ' ',
assert abs(suitability_score - 50) <= 50, \
'Invalid suitability score ' + str(suitability_score)
if pixels_changed == count:
LOGGER.debug("All necessay pixels converted.")
break
LOGGER.debug("Checking pixels with suitability of %i.", suitability_score)
#mask out everything except the current suitability score
mask = src_array == suitability_score
#label patches
label_im, nb_labels = scipy.ndimage.label(mask)
#get patch sizes
patch_sizes = scipy.ndimage.sum(mask, label_im, range(1, nb_labels + 1))
patch_labels = numpy.array(range(1, nb_labels + 1))
patch_locations = scipy.ndimage.find_objects(label_im, nb_labels)
#randomize patch order
numpy.random.shuffle(patch_labels)
#check patches for conversion
patch_label_count = patch_labels.size
for l in range(patch_label_count):
label = patch_labels[l]
source = label_im[patch_locations[label-1]]
target = scenario_array[patch_locations[label-1]]
pixels_to_change = numpy.where(source == label)
assert pixels_to_change[0].size == patch_sizes[label-1]
if patch_sizes[label-1] + pixels_changed > count:
#mask out everything except the current patch
#patch = numpy.where(label_im == label)
#patch_mask = numpy.zeros_like(scenario_array)
patch_mask = numpy.zeros_like(target)
#patch_mask[patch] = 1
patch_mask[pixels_to_change] = 1
#calculate the distance to exit the patch
#tmp_array = scipy.ndimage.morphology.distance_transform_edt(patch_mask)
tmp_array = scipy.ndimage.morphology.distance_transform_edt(patch_mask)
#tmp_array = tmp_array[patch]
tmp_array = tmp_array[pixels_to_change]
#select the number of pixels that need to be converted
tmp_index = numpy.argsort(tmp_array)
tmp_index = tmp_index[:count - pixels_changed]
#convert the selected pixels into coordinates
#pixels_to_change = numpy.array(zip(patch[0], patch[1]))
pixels_to_change = numpy.array(zip(pixels_to_change[0], pixels_to_change[1]))
pixels_to_change = pixels_to_change[tmp_index]
pixels_to_change = apply(zip, pixels_to_change)
#change the pixels in the scenario
#scenario_array[pixels_to_change] = cover_id
target[pixels_to_change] = cover_id
pixels_changed = count
#alter other suitability rasters to prevent double conversion
for _, update_id, _ in change_list[index+1:]:
#update_arrays[update_id][pixels_to_change] = 0
target = update_arrays[update_id][patch_locations[label-1]]
target[pixels_to_change] = 0
break
else:
#convert patch, increase count of changes
target[pixels_to_change] = cover_id
pixels_changed += patch_sizes[label-1]
#alter other suitability rasters to prevent double conversion
for _, update_id, _ in change_list[index+1:]:
target = update_arrays[update_id][patch_locations[label-1]]
target[pixels_to_change] = 0
#report and record unchanged pixels
if pixels_changed < count:
LOGGER.warn("Not all pixels converted.")
unconverted_pixels[cover_id] = count - pixels_changed
#write new suitability arrays
for _, update_id, _ in change_list[index+1:]:
update_bands[update_id].WriteArray(update_arrays[update_id])
update_arrays[update_id] = None
update_bands[update_id] = None
update_ds[update_id] = None
scenario_band.WriteArray(scenario_array)
scenario_array = None
scenario_band = None
scenario_ds = None
#apply override
if args["override_layer"]:
LOGGER.info("Overriding pixels using values from field %s.", args["override_field"])
datasource = ogr.Open(args["override"])
layer = datasource.GetLayer()
dataset = gdal.Open(scenario_uri, 1)
if dataset == None:
msg = "Could not open landcover transition raster."
LOGGER.error(msg)
raise IOError, msg
if datasource == None:
msg = "Could not open override vector."
LOGGER.error(msg)
raise IOError, msg
if not bool(args["override_inclusion"]):
LOGGER.debug("Overriding all touched pixels.")
options = ["ALL_TOUCHED=TRUE", "ATTRIBUTE=%s" % args["override_field"]]
else:
LOGGER.debug("Overriding only pixels with covered center points.")
options = ["ATTRIBUTE=%s" % args["override_field"]]
gdal.RasterizeLayer(dataset, [1], layer, options=options)
dataset.FlushCache()
datasource = None
dataset = None
###
#tabulate coverages
###
unique_raster_values_count, transitions = get_transition_set_count_from_uri([landcover_uri, scenario_uri])
htm = open(landcover_htm_uri,'w')
htm.write("<html><head><title>Scenario Generator Report</title>")
htm.write("<style type='text/css'>")
htm.write("table {border-collapse: collapse; font-size: 1em;}")
htm.write("td {padding: 10px;}")
htm.write('body {font-family: Arial, Helvetica, sans-serif; font-size: 1em;}')
htm.write('h2 {background: #DDDDDD; padding: 10px;}')
htm.write("</style>")
jquery_uri = os.path.join(os.path.dirname(os.path.abspath(__file__)), "jquery-1.6.2.min.js")
htm.write("<script>\n" + open(jquery_uri).read() + "\n</script>")
highcharts_uri = os.path.join(os.path.dirname(os.path.abspath(__file__)), "highcharts.js")
htm.write("<script>\n" + open(highcharts_uri).read() + "\n</script>")
htm.write("</head><body>")
htm.write("<div style=''>")
htm.write("<h1>Scenario Output Summary</h1>")
htm.write("<h2>Initial Landscape</h2>")
htm.write("\n<table BORDER=1>")
initial_cover_id_list = unique_raster_values_count[landcover_uri].keys()
initial_cover_id_list.sort()
htm.write("\n<tr><td>ID</td><td>")
htm.write("</td><td>".join([str(cover_id) for cover_id in initial_cover_id_list]))
htm.write("\n</td></tr>")
htm.write("\n<tr><td>Count</td><td>")
htm.write("</td><td>".join([str(unique_raster_values_count[landcover_uri][cover_id]) for cover_id in initial_cover_id_list]))
htm.write("\n</td></tr>")
htm.write("\n</table>")
htm.write("<h2>Scenario Landscape</h2>")
htm.write("\n<table BORDER=1>")
scenario_cover_id_list = unique_raster_values_count[scenario_uri].keys()
scenario_cover_id_list.sort()
htm.write("\n<tr><td>ID</td><td>")
htm.write("</td><td>".join([str(cover_id) for cover_id in scenario_cover_id_list]))
htm.write("\n</td></tr>")
htm.write("\n<tr><td>Count</td><td>")
htm.write("</td><td>".join([str(unique_raster_values_count[scenario_uri][cover_id]) for cover_id in scenario_cover_id_list]))
htm.write("\n</td></tr>")
htm.write("\n</table>")
cover_dict = {}
for cover_id in set(unique_raster_values_count[landcover_uri].keys()).union(set(unique_raster_values_count[scenario_uri].keys())):
try:
before = unique_raster_values_count[landcover_uri][cover_id]
except KeyError:
before = 0
try:
after =unique_raster_values_count[scenario_uri][cover_id]
except KeyError:
after = 0
cover_dict[cover_id] = (before, after)
htm.write("<h2>Change Table</h2>")
cover_names_dict = {}
transition_dict = pygeoprocessing.geoprocessing.get_lookup_from_csv(args["transition"], args["transition_id"])
cover_names_dict = {}
for cover in transition_dict:
cover_names_dict[cover] = transition_dict[cover]["Name"]
htm.write(generate_chart_html(cover_dict, cover_names_dict, workspace))
htm.write("<h2>Transition Matrix</h2>")
htm.write("\n<table BORDER=1>")
htm.write("\n<tr><td>ID</td><td>")
htm.write("</td><td>".join([str(cover_id) for cover_id in scenario_cover_id_list]))
htm.write("\n</td></tr>")
for initial_cover_id in initial_cover_id_list:
htm.write("\n<tr><td>%i</td>" % initial_cover_id)
for scenario_cover_id in scenario_cover_id_list:
try:
htm.write("<td>%i</td>" % transitions[0][initial_cover_id][scenario_cover_id])
except KeyError:
htm.write("<td><FONT COLOR=lightgray>%i</FONT></td>" % 0)
htm.write("\n</tr>")
htm.write("\n</table>")
unconverted_cover_id_list = unconverted_pixels.keys()
unconverted_cover_id_list.sort()
if len(unconverted_cover_id_list) > 0:
htm.write("<h2>Unconverted Pixels</h2>")
htm.write("\n<table BORDER=1>")
htm.write("<tr><td>ID</td><td>Count</td></tr>")
for cover_id in unconverted_cover_id_list:
htm.write("<tr><td>%i</td><td>%i</td></tr>" % (cover_id, unconverted_pixels[cover_id]))
htm.write("\n</table>")
else:
htm.write("<p><i>All target pixels converted.</i></p>")
htm.write("\n</html>")
#input CSVs
input_csv_list = []
if args["calculate_priorities"]:
input_csv_list.append((args["priorities_csv_uri"], "Priorities Table"))
if args["calculate_transition"] or args["calculate_factors"]:
input_csv_list.append((args["transition"], "Transition Table"))
if args["calculate_factors"]:
input_csv_list.append((args["suitability"], "Factors Table"))
htm.write("<h1>Input Tables</h1>")
for csv_uri, name in input_csv_list:
table = "\n<table BORDER=1><tr><td>" + open(csv_uri).read().strip().replace(",","</td><td>").replace("\n","</td></tr><tr><td>") + "</td></tr></table>"
htm.write("<h2>%s</h2>" % name)
htm.write(table)
htm.write("\n</div>\n</body>\n</html>")
htm.close()
| 42.042076 | 166 | 0.583283 | [
"BSD-3-Clause"
] | natcap/invest-natcap.invest-3 | invest_natcap/scenario_generator/scenario_generator.py | 59,952 | Python |
from __future__ import annotations
import argparse
import logging
from typing import TextIO
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("input", type=argparse.FileType('r'),
metavar="PUZZLE_INPUT")
parser.add_argument('-d', '--debug', action='store_true')
args = parser.parse_args(args)
return args
def init_logging(debug=False):
msg_format = '%(asctime)s %(levelname)s %(message)s'
date_format = '%m/%d/%Y %H:%M:%S'
level = logging.INFO
if debug:
level = logging.DEBUG
logging.basicConfig(format=msg_format, datefmt=date_format, level=level)
class Image:
def __init__(self, pixels: dict[tuple[int, int], str], void_pixel: str):
self.pixels = pixels
self.void_pixel = void_pixel
def __getitem__(self, key: tuple[int, int]) -> str:
try:
return self.pixels[key]
except KeyError:
return self.void_pixel
@staticmethod
def from_grid(grid: list[list[str]]) -> Image:
pixels = Image.grid2pixel(grid)
return Image(pixels, '.')
@staticmethod
def grid2pixel(grid: list[list[str]]) -> dict[tuple[int, int], str]:
image = {}
for y in range(len(grid)):
for x in range(len(grid[0])):
image[(x, y)] = grid[y][x]
return image
@staticmethod
def neighbors(pixel: tuple[int, int]) -> list[tuple[int, int]]:
x = pixel[0]
y = pixel[1]
return [(x - 1, y - 1), (x, y - 1), (x + 1, y - 1),
(x - 1, y), (x, y), (x + 1, y),
(x - 1, y + 1), (x, y + 1), (x + 1, y + 1)]
def pixel2idx(self, pixel: str) -> int:
bin_rep = pixel.replace('#', '1').replace('.', '0')
return int(bin_rep, base=2)
def enhance_pixel(self, iea: str, pixel: tuple[int, int]) -> str:
surround = [self[n] for n in self.neighbors(pixel)]
idx = self.pixel2idx(''.join(surround))
return iea[idx]
def bounds(self) -> tuple[int, ...]:
x_values = [p[0] for p in self.pixels]
y_values = [p[1] for p in self.pixels]
return min(x_values), min(y_values), max(x_values), max(y_values)
def enhance(self, iea: str) -> Image:
new_pixels = {}
min_x, min_y, max_x, max_y = self.bounds()
for x in range(min_x - 2, max_x + 2):
for y in range(min_y - 2, max_y + 2):
new_pixels[(x, y)] = self.enhance_pixel(iea, (x, y))
void_pixel = iea[self.pixel2idx(self.void_pixel * 9)]
return Image(new_pixels, void_pixel)
def lit_count(self):
return len([v for v in self.pixels.values() if v == '#'])
def load_input(fp: TextIO):
data = fp.read().strip().split('\n\n')
iea = data[0]
assert len(iea) == 512
grid = []
for line in data[1].strip().split('\n'):
grid.append(list(line))
image = Image.from_grid(grid)
return iea, image
def puzzle1(iea: str, image: Image) -> int:
for i in range(2):
image = image.enhance(iea)
return image.lit_count()
def puzzle2(iea, image) -> int:
for i in range(50):
image = image.enhance(iea)
return image.lit_count()
def main(argv=None):
args = parse_args(argv)
# Init logging
init_logging(args.debug)
iea, image = load_input(args.input)
answer = puzzle1(iea, image)
logging.info('Puzzle 1: %d', answer)
answer = puzzle2(iea, image)
logging.info('Puzzle 2: %d', answer)
if __name__ == '__main__':
main()
| 29.00813 | 76 | 0.575673 | [
"MIT"
] | tcmitchell/AdventOfCode | 2021/day20/day20.py | 3,568 | Python |
Subsets and Splits