hexsha
stringlengths 40
40
| size
int64 5
1.03M
| ext
stringclasses 9
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
241
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
208k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
241
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
241
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
1.03M
| avg_line_length
float64 1.5
756k
| max_line_length
int64 4
869k
| alphanum_fraction
float64 0.01
0.98
| count_classes
int64 0
3.38k
| score_classes
float64 0
0.01
| count_generators
int64 0
832
| score_generators
float64 0
0
| count_decorators
int64 0
2.75k
| score_decorators
float64 0
0
| count_async_functions
int64 0
623
| score_async_functions
float64 0
0
| count_documentation
int64 3
581k
| score_documentation
float64 0.4
0.6
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d9b0c3d32e07c56a0732f0fca454740538a940fe | 451 | py | Python | setup.py | Kaslanarian/PythonSVM | 715eeef2a245736167addf45a6aee8b40b54d0c7 | [
"MIT"
] | 2 | 2021-09-25T01:00:37.000Z | 2021-09-27T12:13:24.000Z | setup.py | Kaslanarian/PythonSVM | 715eeef2a245736167addf45a6aee8b40b54d0c7 | [
"MIT"
] | 1 | 2021-09-17T12:08:14.000Z | 2021-09-17T12:08:14.000Z | setup.py | Kaslanarian/PythonSVM | 715eeef2a245736167addf45a6aee8b40b54d0c7 | [
"MIT"
] | null | null | null | import setuptools #enables develop
setuptools.setup(
name='pysvm',
version='0.1',
description='PySVM : A NumPy implementation of SVM based on SMO algorithm',
author_email="191300064@smail.nju.edu.cn",
packages=['pysvm'],
license='MIT License',
long_description=open('README.md', encoding='utf-8').read(),
install_requires=[ #自动安装依赖
'numpy', 'sklearn'
],
url='https://github.com/Kaslanarian/PySVM',
)
| 28.1875 | 79 | 0.660754 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.4946 |
d9b0df7f5ef294a68858d836af143c289d120187 | 4,375 | py | Python | Object_detection_image.py | hiperus0988/pyao | 72c56975a3d45aa033bdf7650b5369d59240395f | [
"Apache-2.0"
] | 1 | 2021-06-09T22:17:57.000Z | 2021-06-09T22:17:57.000Z | Object_detection_image.py | hiperus0988/pyao | 72c56975a3d45aa033bdf7650b5369d59240395f | [
"Apache-2.0"
] | null | null | null | Object_detection_image.py | hiperus0988/pyao | 72c56975a3d45aa033bdf7650b5369d59240395f | [
"Apache-2.0"
] | null | null | null | ######## Image Object Detection Using Tensorflow-trained Classifier #########
#
# Author: Evan Juras
# Date: 1/15/18
# Description:
# This program uses a TensorFlow-trained classifier to perform object detection.
# It loads the classifier uses it to perform object detection on an image.
# It draws boxes and scores around the objects of interest in the image.
## Some of the code is copied from Google's example at
## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
## and some is copied from Dat Tran's example at
## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py
## but I changed it to make it more understandable to me.
# Import packages
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites
from utils import label_map_util
from utils import visualization_utils as vis_util
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'inference_graph'
IMAGE_NAME = 'test1.jpg'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')
# Path to image
PATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_NAME)
# Number of classes the object detector can identify
NUM_CLASSES = 6
# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Load image using OpenCV and
# expand image dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
image = cv2.imread(PATH_TO_IMAGE)
image_expanded = np.expand_dims(image, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_expanded})
# Draw the results of the detection (aka 'visulaize the results')
vis_util.visualize_boxes_and_labels_on_image_array(
image,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.60)
# All the results have been drawn on image. Now display the image.
cv2.imshow('Object detector', image)
# Press any key to close the image
cv2.waitKey(0)
# Clean up
cv2.destroyAllWindows()
| 36.458333 | 122 | 0.779886 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,505 | 0.572571 |
d9b62ab258f0b51ef25d431f8fa66de9acd438a7 | 1,895 | py | Python | setup.py | giggslam/python-messengerbot-sdk | 4a6fadf96fe3425da9abc4726fbb84db6d84f7b5 | [
"Apache-2.0"
] | 23 | 2019-03-05T08:33:34.000Z | 2021-12-13T01:52:47.000Z | setup.py | giggslam/python-messengerbot-sdk | 4a6fadf96fe3425da9abc4726fbb84db6d84f7b5 | [
"Apache-2.0"
] | null | null | null | setup.py | giggslam/python-messengerbot-sdk | 4a6fadf96fe3425da9abc4726fbb84db6d84f7b5 | [
"Apache-2.0"
] | 6 | 2019-03-07T07:58:02.000Z | 2020-12-18T10:08:47.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
__version__ = ''
with open('facebookbot/__about__.py', 'r') as fd:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fd:
m = reg.match(line)
if m:
__version__ = m.group(1)
break
def _requirements():
with open('requirements.txt', 'r') as fd:
return [name.strip() for name in fd.readlines()]
with open('README.rst', 'r') as fd:
long_description = fd.read()
setup(
name="fbsdk",
version=__version__,
author="Sam Chang",
author_email="t0915290092@gmail.com",
maintainer="Sam Chang",
maintainer_email="t0915290092@gmail.com",
url="https://github.com/boompieman/fbsdk",
description="Facebook Messaging API SDK for Python",
long_description=long_description,
license='Apache License 2.0',
packages=[
"facebookbot", "facebookbot.models"
],
install_requires=_requirements(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Topic :: Software Development"
]
)
| 30.079365 | 76 | 0.663852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,092 | 0.576253 |
d9b8347698a1fe18b6d9ec66f6bfbfa77f2567be | 1,566 | py | Python | using_paramiko.py | allupramodreddy/cisco_py | 5488b56d9324011860b78998e694dcce6da5e3d1 | [
"Apache-2.0"
] | null | null | null | using_paramiko.py | allupramodreddy/cisco_py | 5488b56d9324011860b78998e694dcce6da5e3d1 | [
"Apache-2.0"
] | null | null | null | using_paramiko.py | allupramodreddy/cisco_py | 5488b56d9324011860b78998e694dcce6da5e3d1 | [
"Apache-2.0"
] | null | null | null | #!/usr/local/bin/python3
import paramiko,time
#using as SSH Client
client = paramiko.SSHClient()
# check dir(client) to find available options.
# auto adjust host key verification with yes or no
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# time for connecting to remote Cisco IOS
"""
Manually taking input
addr = input('Provide IP address to connect to: ')
user = input('Username: ')
pwd = getpass.getpass('Password: ')"""
# Taking input from files
f1 = open("devices.txt","r")
f2 = open("commands.txt","r")
for line in f1:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
data = line.split(" ")
# print(data)
addr = data[0]
user = data[1]
pwd = data[2]
f3 = open(addr+".txt","w+")
# print(addr +" "+ user +" " +pwd)
client.connect(addr,username=user,password=pwd,allow_agent=False,look_for_keys=False)
# we have to ask for Shell
device_access = client.invoke_shell()
for line in f2:
device_access.send(line)
time.sleep(1)
output = device_access.recv(55000).decode('ascii')
f3.write(output)
"""
THIS CODE IS FOR SINGLE COMMAND, FOR MULTIPLE COMMANDS CODE BELOW
# send command to the device
device_access.send("ter len 0\nshow run \n")
time.sleep(2)
# receive output from the device, convert it to byte-like format and print it
print(device_access.recv(550000).decode('ascii'))
# We can print the same to a file too
with open("csr1000v.txt","w") as f:
f.write(device_access.recv(550000).decode('ascii'))""" | 23.727273 | 89 | 0.691571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 907 | 0.579183 |
d9b86cc42aaff67200ff3f4f5f6d27121835fd8c | 733 | py | Python | old/.history/a_20201125192943.py | pscly/bisai1 | e619186cec5053a8e02bd59e48fc3ad3af47d19a | [
"MulanPSL-1.0"
] | null | null | null | old/.history/a_20201125192943.py | pscly/bisai1 | e619186cec5053a8e02bd59e48fc3ad3af47d19a | [
"MulanPSL-1.0"
] | null | null | null | old/.history/a_20201125192943.py | pscly/bisai1 | e619186cec5053a8e02bd59e48fc3ad3af47d19a | [
"MulanPSL-1.0"
] | null | null | null | # for n in range(400,500):
# i = n // 100
# j = n // 10 % 10
# k = n % 10
# if n == i ** 3 + j ** 3 + k ** 3:
# print(n)
# 第一道题(16)
# input("请输入(第一次):")
# s1 = input("请输入(第二次):")
# l1 = s1.split(' ')
# l2 = []
# for i in l1:
# if i.isdigit():
# l2.append(int(i))
# for i in l2:
# if not (i % 6):
# print(i, end=" ")
# 第二道题(17)
out_l1 = []
def bian_int_list(l1):
re_l1 = [] # 返回出去的列表
for i in l1:
re_l1.append(i)
def jisuan(str_num):
he1 = 0
global out_l1
for i in l1():
he1 += int(i)**2
if he1 > int(str_num):
out_l1.append(str_num)
return None
while 1:
in_1 = input("请输入数值:")
nums_l1 = in_1.split(' ')
| 13.089286 | 39 | 0.452933 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 441 | 0.553325 |
d9c69927875c451378bcb7d50069e903036beefa | 5,490 | py | Python | bathymetry_blink/bathymetry_blink.py | poster515/BlinkyTape_Python | edc2f7e43fbf07dbfdeba60da7acb7ae7a3707d0 | [
"MIT"
] | 26 | 2015-02-14T11:37:21.000Z | 2021-05-10T17:24:16.000Z | bathymetry_blink/bathymetry_blink.py | poster515/BlinkyTape_Python | edc2f7e43fbf07dbfdeba60da7acb7ae7a3707d0 | [
"MIT"
] | 8 | 2015-02-14T17:33:24.000Z | 2021-10-05T20:32:19.000Z | bathymetry_blink/bathymetry_blink.py | poster515/BlinkyTape_Python | edc2f7e43fbf07dbfdeba60da7acb7ae7a3707d0 | [
"MIT"
] | 15 | 2015-01-24T23:36:54.000Z | 2021-10-02T23:40:08.000Z | """
This script will modulate the blinky lights using the following algorithm:
1) uses user-provided location to obtain row of pixel data from bathy image
2) samples a 'number of LEDs' number of pixels from that row
3) shifts the sampled row data to center it at the location specified by user
4) displays resulting pixels on Blinky Tape
5) shifts next row by a given latitude, also specified by user
6) sleeps for user-specified period of time
Uses the following arguments:
-l/--location: tuple
Location of the user in tuple(lat, lon). This represents the center of the LED strip. Defaults to (0, 0)
-u/--update-interval: int
Update interval of the script, in minutes. Defaults to 10.
-p/--port: str
Serial port of the BlinkyLight (e.g., 'ttyAMA0', 'COM3'). Defaults to 'COM5'.
-d/--delta_latitude: int
Vertical change in latitude every update rate. May be 0, but this will result in a never-changing LEDs.
-i/--image: str
Name of the PNG image that contains the color coded pathymetric data.
The file current named mapserv.png was obtained using the following API:
https://www.gebco.net/data_and_products/gebco_web_services/web_map_service/mapserv?request=getmap&service=wms&BBOX=-90,-180,90,180&format=image/png&height=600&width=1200&crs=EPSG:4326&layers=GEBCO_LATEST_SUB_ICE_TOPO&version=1.3.0
In lieu of providing command line arguments, you may alternatively edit the defaults in bath_config.json.
NOTE: runs via:
runfile('/BlinkyTape_Python/bathymetry_blink/bathymetry_blink.py', wdir='/BlinkyTape_Python/')
(C) 2021 Joseph Post (https://joeycodes.dev)
MIT Licensed
"""
import optparse
import json
from blinkytape import BlinkyTape
from time import sleep
from PIL import Image
import numpy as np
import sys
MAX_ERRORS = 3
num_errors = 0
# Obtain default parameters
with open("./bathymetry_blink/bathy_config.json") as f:
config = json.load(f)
# Default Blinky Tape port on Raspberry Pi is /dev/ttyACM0
parser = optparse.OptionParser()
parser.add_option("-p", "--port", dest="portname",
help="serial port (ex: /dev/ttyACM0)", default=config["port"])
parser.add_option("-l", "--location", dest="location",
help="Location of the center of the LED strip (ex: 70,-110)", default=config["location"])
parser.add_option("-u", "--update-rate", dest="update_rate",
help="How often to update elevation profile (mins) (ex: 5)", default=config["update_rate"])
parser.add_option("-d", "--delta-latitude", dest="delta_latitude",
help="Change in latitude during update (ex: 5)", default=config["delta_latitude"])
parser.add_option("-n", "--num-leds", dest="num_leds",
help="Number of LEDs in strip (ex: 60)", default=config["num_leds"])
parser.add_option("-i", "--image", dest="image_name",
help="Name of the map/bathymetry image (ex: ./mapserv.png)", default=config["image"])
(options, args) = parser.parse_args()
if args:
print("Unknown parameters: " + args)
# grab the values provided by user (or defaults)
port = options.portname
loc = options.location
rate = options.update_rate
delta = options.delta_latitude
n_leds = options.num_leds
i_name = options.image_name
# Some visual indication that it works, for headless setups (green tape)
bt = BlinkyTape(port, n_leds)
bt.displayColor(0, 100, 0)
bt.show()
sleep(2)
while True:
try:
# first, load image
im = Image.open(i_name) # Can be many different formats.
cols, rows = im.size
a = np.asarray(im) # of shape (rows, cols, channels)
# map loc latitude to 0-based index
latitude_index = min(rows - 1, max(0, (int)(((loc[0] - -90) / (90 - -90)) * (rows - 0) + 0)))
longitude_index = min(cols - 1, max(0, (int)(((loc[1] - -180) / (180 - -180)) * (cols - 0) + 0)))
# update the location of the next row of elevation data to take
loc[0] += delta
loc[0] = ((loc[0] + 90) % 180) - 90 # wraps to next pole if overflow
print("Lat index: " + str(latitude_index))
print("Lon index: " + str(longitude_index))
print("Next latitude: " + str(loc[0]))
# grab the applicable pixel indices
indices = [(int)(x*(cols/n_leds)) for x in range(n_leds)]
# sample that row of pixel data
output_pixels = np.take(a[latitude_index], indices, axis=0)
# rotate the row to center around the specified longitude
output_pixels = np.roll(output_pixels, longitude_index, axis=0)
# send all pixel data to bt
for pixel in output_pixels:
print("Sending r: {}, g: {}, b: {}".format(*pixel))
bt.sendPixel(*pixel)
# finally, show the image
bt.show()
# delete variables for memory management
del a
del im
# Tape resets to stored pattern after a few seconds of inactivity
sleep(rate * 60) # Wait specified number of minutes
# sleep(10) # Wait specified number of minutes
except KeyboardInterrupt:
print("Keyboard interrupt, ending program.")
sys.exit()
except RuntimeError as e:
print("Encountered runtime error: " + e.args[0])
# flush any incomplete data
bt.show()
num_errors += 1
if num_errors > MAX_ERRORS:
sys.exit("Error count exceeds that allowed.")
| 36.845638 | 230 | 0.654098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,142 | 0.572313 |
d9d317f8ac0c3d87ca7347265d7a9836b41ed098 | 2,481 | py | Python | gci-vci-serverless/src/helpers/vp_saves_helpers.py | ClinGen/gene-and-variant-curation-tools | 30f21d8f03d8b5c180c1ce3cb8401b5abc660080 | [
"MIT"
] | 1 | 2021-09-17T20:39:07.000Z | 2021-09-17T20:39:07.000Z | gci-vci-serverless/src/helpers/vp_saves_helpers.py | ClinGen/gene-and-variant-curation-tools | 30f21d8f03d8b5c180c1ce3cb8401b5abc660080 | [
"MIT"
] | 133 | 2021-08-29T17:24:26.000Z | 2022-03-25T17:24:31.000Z | gci-vci-serverless/src/helpers/vp_saves_helpers.py | ClinGen/gene-and-variant-curation-tools | 30f21d8f03d8b5c180c1ce3cb8401b5abc660080 | [
"MIT"
] | null | null | null | import datetime
import uuid
import simplejson as json
from src.db.s3_client import Client as S3Client
from decimal import Decimal
def get_from_archive(archive_key):
''' Download a VP Save from S3.
:param str archive_key: The vp_save data's location (S3 bucket and file path). This value is required.
'''
if archive_key is None or '/' not in archive_key:
raise ValueError()
bucket, key = archive_key.split('/', 1)
s3_client = S3Client()
try:
archive_object = json.loads(s3_client.get_object(bucket, key)['Body'].read(),parse_float=Decimal)
except Exception as e:
print('ERROR: Error downloading ' + key + ' from ' + bucket + ' bucket. ERROR\n%s' %e)
raise
return archive_object
def build(vp_save={}):
''' Builds and returns a valid vp_save object.
Builds a new vp_save object by creating default values for
required fields and combines any of the given attributes.
'''
vp_save['PK'] = str(uuid.uuid4())
# Set timestamps (for new data)
now = datetime.datetime.now().isoformat()
vp_save['date_created'] = now
vp_save['last_modified'] = now
vp_save['item_type'] = 'vp_save'
return vp_save
def archive(bucket, vp_save_pk, save_data):
''' Archives a vp save data to S3.
Uploads the save data object as a JSON file to S3. The location of the archive
depends on the bucket and the primary key of the save data. If the upload fails,
an exception is raised. If successful, returns the archive location.
:param str bucket: The name of the S3 bucket for the archive. This value is required.
:param str vp_save_pk: The vp_save PK to use as the name of the JSON file. This value is required.
:param obj save_data: The save data object to archive. This value is required.
'''
if bucket is None or len(bucket) <= 0:
raise ValueError()
if vp_save_pk is None or len(vp_save_pk) <= 0:
raise ValueError()
if not save_data:
raise ValueError()
archive_file = __archive_key(save_data) + '/' + vp_save_pk + '.json'
# Upload curation data to S3 archive bucket.
s3_client = S3Client()
try:
s3_client.put_object(
bytes(json.dumps(save_data).encode('UTF-8')),
bucket,
archive_file
)
except Exception as e:
print('ERROR: Error uploading ' + archive_file + ' to ' + bucket + ' bucket. ERROR\n%s' %e)
raise
archive_key_comps = [bucket, archive_file]
return '/'.join(archive_key_comps)
def __archive_key(save_data):
return save_data['PK']
| 27.263736 | 104 | 0.699315 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,134 | 0.457074 |
d9d368d362ab070d71b3363fe0fb20728ec9660d | 5,985 | py | Python | src/entity/002_createRdf.py | toyo-bunko/paper_app | f988e05cf83711d98c5ed735c0fd74fcf11e0f05 | [
"Apache-2.0"
] | 1 | 2021-02-28T15:38:37.000Z | 2021-02-28T15:38:37.000Z | src/entity/002_createRdf.py | toyo-bunko/paper_app | f988e05cf83711d98c5ed735c0fd74fcf11e0f05 | [
"Apache-2.0"
] | null | null | null | src/entity/002_createRdf.py | toyo-bunko/paper_app | f988e05cf83711d98c5ed735c0fd74fcf11e0f05 | [
"Apache-2.0"
] | null | null | null | import shutil
import os
import json
import glob
import yaml
import sys
import urllib
import ssl
import csv
import time
import requests
import json
import csv
from rdflib import URIRef, BNode, Literal, Graph
from rdflib.namespace import RDF, RDFS, FOAF, XSD
from rdflib import Namespace
all = Graph()
with open("data/dict.json") as f:
ln_map = json.load(f)
st_path = "../data/index.json"
with open(st_path) as f:
result = json.load(f)
uris = []
for obj in result:
fields = ["spatial", "agential"]
for field in fields:
values = obj[field]
for value in values:
uri = "chname:"+value
if field == "spatial":
uri = "place:"+value
if uri not in uris:
uris.append(uri)
for uri in uris:
print(uri)
tmp = uri.split(":")
prefix = tmp[0]
suffix = tmp[1]
ln = suffix
ln_org = ""
if ln in ln_map:
ln_org = ln
ln = ln_map[ln]
if len(ln) > 20:
continue
# ln = obj["uri"].split(":")[1]
'''
wiki_path = "data/wikidata/"+ln+".json"
wiki = {}
if os.path.exists(wiki_path):
with open(wiki_path) as f:
wiki = json.load(f)
# sameAs
stmt = (subject, URIRef("http://www.w3.org/2002/07/owl#sameAs"), URIRef(wiki_url))
all.add(stmt)
obj = wiki["entities"][wiki_url.split("/")[-1]]
# description
if "descriptions" in obj and "ja" in obj["descriptions"]:
stmt = (subject, URIRef("http://schema.org/description"), Literal(obj["descriptions"]["ja"]["value"], lang="ja"))
all.add(stmt)
# label
if "labels" in obj and "ja" in obj["labels"]:
stmt = (subject, RDFS.label, Literal(obj["labels"]["ja"]["value"]))
all.add(stmt)
ln = wiki_url.split("/")[-1]
'''
db_path = "data/dbpedia_ja/"+ln+".json"
wiki_path = "data/wikidata/"+ln+".json"
db = {}
wiki = {}
if os.path.exists(db_path):
with open(db_path) as f:
db = json.load(f)
if os.path.exists(wiki_path):
with open(wiki_path) as f:
wiki = json.load(f)
db_uri = "http://ja.dbpedia.org/resource/"+ln
if db_uri not in db:
print("not" , db_uri)
continue
# ######
subject = URIRef("https://shibusawa-dlab.github.io/lab1/api/"+prefix+"/"+ln)
if prefix == "chname":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Agent"))
all.add(stmt)
elif prefix == "time":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Time"))
all.add(stmt)
elif prefix == "place":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Place"))
all.add(stmt)
elif prefix == "event":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Event"))
all.add(stmt)
elif prefix == "org":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Organization"))
all.add(stmt)
elif prefix == "keyword":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Keyword"))
all.add(stmt)
elif prefix == "type":
stmt = (subject, RDF.type, URIRef("https://jpsearch.go.jp/term/type/Type"))
all.add(stmt)
# ######
obj = db[db_uri]
stmt = (subject, URIRef("http://www.w3.org/2002/07/owl#sameAs"), URIRef(db_uri))
all.add(stmt)
if "http://dbpedia.org/ontology/thumbnail" in obj:
stmt = (subject, URIRef("http://schema.org/image"), URIRef(obj["http://dbpedia.org/ontology/thumbnail"][0]["value"]))
all.add(stmt)
if "http://www.w3.org/2000/01/rdf-schema#label" in obj:
labels = obj["http://www.w3.org/2000/01/rdf-schema#label"]
for label in labels:
if label["lang"] == "ja":
stmt = (subject, RDFS.label, Literal(label["value"]))
all.add(stmt)
if "http://www.w3.org/2000/01/rdf-schema#comment" in obj:
labels = obj["http://www.w3.org/2000/01/rdf-schema#comment"]
for label in labels:
stmt = (subject, URIRef("http://schema.org/description"), Literal(label["value"], lang=label["lang"]))
all.add(stmt)
if "http://www.w3.org/2002/07/owl#sameAs" in obj:
labels = obj["http://www.w3.org/2002/07/owl#sameAs"]
for label in labels:
value = label["value"]
if "http://dbpedia.org" in value or "http://ja.dbpedia.org" in value or "www.wikidata.org" in value:
stmt = (subject, URIRef("http://www.w3.org/2002/07/owl#sameAs"), URIRef(value))
all.add(stmt)
# 位置情報
'''
if "point" in obj and prefix == "place":
value = obj["point"]["value"].split(" ")
# addGeo関数
geoUri = addGeo({
"lat" : float(value[0]),
"long": float(value[1])
})
stmt = (subject, URIRef("http://schema.org/geo"), geoUri)
if suffix not in places:
places[suffix] = {
"lat" : float(value[0]),
"long": float(value[1])
}
all.add(stmt)
'''
# 正規化前
if ln_org != "" and ln != ln_org:
stmt = (subject, URIRef("http://schema.org/name"), Literal(ln_org))
all.add(stmt)
path = "data/all.json"
all.serialize(destination=path, format='json-ld')
all.serialize(destination=path.replace(".json", ".rdf"), format='pretty-xml') | 29.338235 | 129 | 0.513116 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,677 | 0.445795 |
d9d80db949c5d5f415b809076411a2404da55e53 | 10,912 | py | Python | sympy/combinatorics/testutil.py | ethankward/sympy | 44664d9f625a1c68bc492006cfe1012cb0b49ee4 | [
"BSD-3-Clause"
] | 2 | 2019-05-18T22:36:49.000Z | 2019-05-24T05:56:16.000Z | sympy/combinatorics/testutil.py | ethankward/sympy | 44664d9f625a1c68bc492006cfe1012cb0b49ee4 | [
"BSD-3-Clause"
] | 1 | 2020-04-22T12:45:26.000Z | 2020-04-22T12:45:26.000Z | sympy/combinatorics/testutil.py | ethankward/sympy | 44664d9f625a1c68bc492006cfe1012cb0b49ee4 | [
"BSD-3-Clause"
] | 3 | 2021-02-16T16:40:49.000Z | 2022-03-07T18:28:41.000Z | from sympy.combinatorics import Permutation
from sympy.combinatorics.util import _distribute_gens_by_base
rmul = Permutation.rmul
def _cmp_perm_lists(first, second):
"""
Compare two lists of permutations as sets.
This is used for testing purposes. Since the array form of a
permutation is currently a list, Permutation is not hashable
and cannot be put into a set.
Examples
========
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.testutil import _cmp_perm_lists
>>> a = Permutation([0, 2, 3, 4, 1])
>>> b = Permutation([1, 2, 0, 4, 3])
>>> c = Permutation([3, 4, 0, 1, 2])
>>> ls1 = [a, b, c]
>>> ls2 = [b, c, a]
>>> _cmp_perm_lists(ls1, ls2)
True
"""
return {tuple(a) for a in first} == \
{tuple(a) for a in second}
def _naive_list_centralizer(self, other, af=False):
from sympy.combinatorics.perm_groups import PermutationGroup
"""
Return a list of elements for the centralizer of a subgroup/set/element.
This is a brute force implementation that goes over all elements of the
group and checks for membership in the centralizer. It is used to
test ``.centralizer()`` from ``sympy.combinatorics.perm_groups``.
Examples
========
>>> from sympy.combinatorics.testutil import _naive_list_centralizer
>>> from sympy.combinatorics.named_groups import DihedralGroup
>>> D = DihedralGroup(4)
>>> _naive_list_centralizer(D, D)
[Permutation([0, 1, 2, 3]), Permutation([2, 3, 0, 1])]
See Also
========
sympy.combinatorics.perm_groups.centralizer
"""
from sympy.combinatorics.permutations import _af_commutes_with
if hasattr(other, 'generators'):
elements = list(self.generate_dimino(af=True))
gens = [x._array_form for x in other.generators]
commutes_with_gens = lambda x: all(_af_commutes_with(x, gen) for gen in gens)
centralizer_list = []
if not af:
for element in elements:
if commutes_with_gens(element):
centralizer_list.append(Permutation._af_new(element))
else:
for element in elements:
if commutes_with_gens(element):
centralizer_list.append(element)
return centralizer_list
elif hasattr(other, 'getitem'):
return _naive_list_centralizer(self, PermutationGroup(other), af)
elif hasattr(other, 'array_form'):
return _naive_list_centralizer(self, PermutationGroup([other]), af)
def _verify_bsgs(group, base, gens):
"""
Verify the correctness of a base and strong generating set.
This is a naive implementation using the definition of a base and a strong
generating set relative to it. There are other procedures for
verifying a base and strong generating set, but this one will
serve for more robust testing.
Examples
========
>>> from sympy.combinatorics.named_groups import AlternatingGroup
>>> from sympy.combinatorics.testutil import _verify_bsgs
>>> A = AlternatingGroup(4)
>>> A.schreier_sims()
>>> _verify_bsgs(A, A.base, A.strong_gens)
True
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.schreier_sims
"""
from sympy.combinatorics.perm_groups import PermutationGroup
strong_gens_distr = _distribute_gens_by_base(base, gens)
current_stabilizer = group
for i in range(len(base)):
candidate = PermutationGroup(strong_gens_distr[i])
if current_stabilizer.order() != candidate.order():
return False
current_stabilizer = current_stabilizer.stabilizer(base[i])
if current_stabilizer.order() != 1:
return False
return True
def _verify_centralizer(group, arg, centr=None):
"""
Verify the centralizer of a group/set/element inside another group.
This is used for testing ``.centralizer()`` from
``sympy.combinatorics.perm_groups``
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.combinatorics.testutil import _verify_centralizer
>>> S = SymmetricGroup(5)
>>> A = AlternatingGroup(5)
>>> centr = PermutationGroup([Permutation([0, 1, 2, 3, 4])])
>>> _verify_centralizer(S, A, centr)
True
See Also
========
_naive_list_centralizer,
sympy.combinatorics.perm_groups.PermutationGroup.centralizer,
_cmp_perm_lists
"""
if centr is None:
centr = group.centralizer(arg)
centr_list = list(centr.generate_dimino(af=True))
centr_list_naive = _naive_list_centralizer(group, arg, af=True)
return _cmp_perm_lists(centr_list, centr_list_naive)
def _verify_normal_closure(group, arg, closure=None):
from sympy.combinatorics.perm_groups import PermutationGroup
"""
Verify the normal closure of a subgroup/subset/element in a group.
This is used to test
sympy.combinatorics.perm_groups.PermutationGroup.normal_closure
Examples
========
>>> from sympy.combinatorics.named_groups import (SymmetricGroup,
... AlternatingGroup)
>>> from sympy.combinatorics.testutil import _verify_normal_closure
>>> S = SymmetricGroup(3)
>>> A = AlternatingGroup(3)
>>> _verify_normal_closure(S, A, closure=A)
True
See Also
========
sympy.combinatorics.perm_groups.PermutationGroup.normal_closure
"""
if closure is None:
closure = group.normal_closure(arg)
conjugates = set()
if hasattr(arg, 'generators'):
subgr_gens = arg.generators
elif hasattr(arg, '__getitem__'):
subgr_gens = arg
elif hasattr(arg, 'array_form'):
subgr_gens = [arg]
for el in group.generate_dimino():
for gen in subgr_gens:
conjugates.add(gen ^ el)
naive_closure = PermutationGroup(list(conjugates))
return closure.is_subgroup(naive_closure)
def canonicalize_naive(g, dummies, sym, *v):
"""
Canonicalize tensor formed by tensors of the different types
g permutation representing the tensor
dummies list of dummy indices
msym symmetry of the metric
v is a list of (base_i, gens_i, n_i, sym_i) for tensors of type `i`
base_i, gens_i BSGS for tensors of this type
n_i number ot tensors of type `i`
sym_i symmetry under exchange of two component tensors of type `i`
None no symmetry
0 commuting
1 anticommuting
Return 0 if the tensor is zero, else return the array form of
the permutation representing the canonical form of the tensor.
Examples
========
>>> from sympy.combinatorics.testutil import canonicalize_naive
>>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs
>>> from sympy.combinatorics import Permutation, PermutationGroup
>>> g = Permutation([1, 3, 2, 0, 4, 5])
>>> base2, gens2 = get_symmetric_group_sgs(2)
>>> canonicalize_naive(g, [2, 3], 0, (base2, gens2, 2, 0))
[0, 2, 1, 3, 4, 5]
"""
from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.combinatorics.tensor_can import gens_products, dummy_sgs
from sympy.combinatorics.permutations import Permutation, _af_rmul
v1 = []
for i in range(len(v)):
base_i, gens_i, n_i, sym_i = v[i]
v1.append((base_i, gens_i, [[]]*n_i, sym_i))
size, sbase, sgens = gens_products(*v1)
dgens = dummy_sgs(dummies, sym, size-2)
if isinstance(sym, int):
num_types = 1
dummies = [dummies]
sym = [sym]
else:
num_types = len(sym)
dgens = []
for i in range(num_types):
dgens.extend(dummy_sgs(dummies[i], sym[i], size - 2))
S = PermutationGroup(sgens)
D = PermutationGroup([Permutation(x) for x in dgens])
dlist = list(D.generate(af=True))
g = g.array_form
st = set()
for s in S.generate(af=True):
h = _af_rmul(g, s)
for d in dlist:
q = tuple(_af_rmul(d, h))
st.add(q)
a = list(st)
a.sort()
prev = (0,)*size
for h in a:
if h[:-2] == prev[:-2]:
if h[-1] != prev[-1]:
return 0
prev = h
return list(a[0])
def graph_certificate(gr):
"""
Return a certificate for the graph
gr adjacency list
The graph is assumed to be unoriented and without
external lines.
Associate to each vertex of the graph a symmetric tensor with
number of indices equal to the degree of the vertex; indices
are contracted when they correspond to the same line of the graph.
The canonical form of the tensor gives a certificate for the graph.
This is not an efficient algorithm to get the certificate of a graph.
Examples
========
>>> from sympy.combinatorics.testutil import graph_certificate
>>> gr1 = {0:[1, 2, 3, 5], 1:[0, 2, 4], 2:[0, 1, 3, 4], 3:[0, 2, 4], 4:[1, 2, 3, 5], 5:[0, 4]}
>>> gr2 = {0:[1, 5], 1:[0, 2, 3, 4], 2:[1, 3, 5], 3:[1, 2, 4, 5], 4:[1, 3, 5], 5:[0, 2, 3, 4]}
>>> c1 = graph_certificate(gr1)
>>> c2 = graph_certificate(gr2)
>>> c1
[0, 2, 4, 6, 1, 8, 10, 12, 3, 14, 16, 18, 5, 9, 15, 7, 11, 17, 13, 19, 20, 21]
>>> c1 == c2
True
"""
from sympy.combinatorics.permutations import _af_invert
from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize
items = list(gr.items())
items.sort(key=lambda x: len(x[1]), reverse=True)
pvert = [x[0] for x in items]
pvert = _af_invert(pvert)
# the indices of the tensor are twice the number of lines of the graph
num_indices = 0
for v, neigh in items:
num_indices += len(neigh)
# associate to each vertex its indices; for each line
# between two vertices assign the
# even index to the vertex which comes first in items,
# the odd index to the other vertex
vertices = [[] for i in items]
i = 0
for v, neigh in items:
for v2 in neigh:
if pvert[v] < pvert[v2]:
vertices[pvert[v]].append(i)
vertices[pvert[v2]].append(i+1)
i += 2
g = []
for v in vertices:
g.extend(v)
assert len(g) == num_indices
g += [num_indices, num_indices + 1]
size = num_indices + 2
assert sorted(g) == list(range(size))
g = Permutation(g)
vlen = [0]*(len(vertices[0])+1)
for neigh in vertices:
vlen[len(neigh)] += 1
v = []
for i in range(len(vlen)):
n = vlen[i]
if n:
base, gens = get_symmetric_group_sgs(i)
v.append((base, gens, n, 0))
v.reverse()
dummies = list(range(num_indices))
can = canonicalize(g, dummies, 0, *v)
return can
| 32.47619 | 98 | 0.641679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,734 | 0.525477 |
d9d95781d1bacab44253ba285649d7b99ee1e33d | 542 | py | Python | src/vatic_checker/config.py | jonkeane/vatic-checker | fa8aec6946dcfd3f466b62f9c00d81bc43514b22 | [
"MIT"
] | null | null | null | src/vatic_checker/config.py | jonkeane/vatic-checker | fa8aec6946dcfd3f466b62f9c00d81bc43514b22 | [
"MIT"
] | null | null | null | src/vatic_checker/config.py | jonkeane/vatic-checker | fa8aec6946dcfd3f466b62f9c00d81bc43514b22 | [
"MIT"
] | null | null | null | localhost = "http://localhost/" # your local host
database = "mysql://root@localhost/vaticChecker" # server://user:pass@localhost/dbname
min_training = 2 # the minimum number of training videos to be considered
recaptcha_secret = "" # recaptcha secret for verification
duplicate_annotations = False # Should the server allow for duplicate annotations?
import os.path
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# TODO: remove on server
import os
os.environ['PYTHON_EGG_CACHE'] = '/tmp/apache'
| 38.714286 | 94 | 0.745387 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 310 | 0.571956 |
d9e551f94d290cc9b470d1fddfc0e91666dab7ba | 444 | py | Python | setup.py | zhanghang1989/notedown | b0fa1eac88d1cd7fa2261d6c454f82669e6f552b | [
"BSD-2-Clause"
] | null | null | null | setup.py | zhanghang1989/notedown | b0fa1eac88d1cd7fa2261d6c454f82669e6f552b | [
"BSD-2-Clause"
] | null | null | null | setup.py | zhanghang1989/notedown | b0fa1eac88d1cd7fa2261d6c454f82669e6f552b | [
"BSD-2-Clause"
] | null | null | null | from setuptools import setup
# create __version__
exec(open('./_version.py').read())
setup(
name="notedown",
version=__version__,
description="Convert markdown to IPython notebook.",
author="Aaron O'Leary",
author_email='dev@aaren.me',
url='http://github.com/aaren/notedown',
install_requires=['ipython', ],
entry_points={
'console_scripts': [
'notedown = notedown:cli',
],
}
)
| 22.2 | 56 | 0.628378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 198 | 0.445946 |
d9e5c18f6a37dd4a96dd21f7ddefb31b197848dd | 2,853 | py | Python | multithreaded_webcrawler.py | the-muses-ltd/Multithreaded-Webcrawler-Cassandra- | eee68faf3c6ecb548edd0e96ce445dcd366fb735 | [
"MIT"
] | null | null | null | multithreaded_webcrawler.py | the-muses-ltd/Multithreaded-Webcrawler-Cassandra- | eee68faf3c6ecb548edd0e96ce445dcd366fb735 | [
"MIT"
] | null | null | null | multithreaded_webcrawler.py | the-muses-ltd/Multithreaded-Webcrawler-Cassandra- | eee68faf3c6ecb548edd0e96ce445dcd366fb735 | [
"MIT"
] | null | null | null | # This is a reusable webcraawler architecture that can be adapted to scrape any webstie.
# RESULTS:
# Roughly 24 seconds per thousand courses scraped for ThreadPoolExecutor vs 63s for unthreaded script.
# This is a very basic implementation of multithreading in order to show the proof of concept, but is a good base to build off of.
import requests
from bs4 import BeautifulSoup
import csv
from concurrent.futures import ProcessPoolExecutor, as_completed, ThreadPoolExecutor
import time
import logging
from mitopencourseware_crawler_worker import mit_crawler
def courses_spider(max_pages):
data_to_csv = [] #holds all data to send to csv
print("Webcrawler workers have started, please wait while we finish crawling...")
# remove max pages loop (unecessary)
page = 1
while page <= max_pages:
url = 'https://ocw.mit.edu/courses/'
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, 'html.parser')
# Multithread only the work:
# Tuning is required to find the most efficient amount of workers in the thread pool.
with ThreadPoolExecutor(max_workers=30) as executor:
start = time.time()
futures = [ executor.submit(work, link) for link in soup.findAll('h4', {'class': 'course_title'}, limit=100) ]
data_to_csv = []
for result in as_completed(futures):
data_to_csv.append(result.result())
end = time.time()
print("Time Taken to complete: {:.6f}s".format(end-start))
print("Courses extracted: ", len(data_to_csv))
page += 1
export_to_csv(data_to_csv)
def work(link):
# replace this fucntion with the specific crawler you want to use:
return mit_crawler(link)
# Exports data to a formatted csv file, this will be replaced with multithreaded API calls to the Cassandra Prisma Database
# or on the cloud in production, it will be sent to the S3 temporary database to be picked up by the AWS Lambda funtion which will push it to the Cassandra Database
def export_to_csv(csv_data):
with open('web_crawl_data.csv',mode='w') as csv_file:
field_names = ['Title','URL extension','External Website Logo','URL(href)','Description','Course logo URL']
csv_writer = csv.DictWriter(csv_file, fieldnames=field_names)#delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writeheader()
for course in csv_data:
course_data = {
'Title':course[0],
'URL extension':course[1],
'External Website Logo':course[2],
'URL(href)':course[3],
'Description':course[4],
'Course logo URL':course[5],
}
csv_writer.writerow(course_data)
| 42.58209 | 164 | 0.667368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,311 | 0.459516 |
d9efa4ffda8cacd286187e29ce110d292c7a1e64 | 946 | py | Python | clpy/sparse/util.py | fixstars/clpy | 693485f85397cc110fa45803c36c30c24c297df0 | [
"BSD-3-Clause"
] | 142 | 2018-06-07T07:43:10.000Z | 2021-10-30T21:06:32.000Z | clpy/sparse/util.py | fixstars/clpy | 693485f85397cc110fa45803c36c30c24c297df0 | [
"BSD-3-Clause"
] | 282 | 2018-06-07T08:35:03.000Z | 2021-03-31T03:14:32.000Z | clpy/sparse/util.py | fixstars/clpy | 693485f85397cc110fa45803c36c30c24c297df0 | [
"BSD-3-Clause"
] | 19 | 2018-06-19T11:07:53.000Z | 2021-05-13T20:57:04.000Z | import clpy
import clpy.sparse.base
_preamble_atomic_add = '''
#if __CUDA_ARCH__ < 600
__device__ double atomicAdd(double* address, double val) {
unsigned long long* address_as_ull =
(unsigned long long*)address;
unsigned long long old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
'''
def isintlike(x):
try:
return bool(int(x) == x)
except (TypeError, ValueError):
return False
def isscalarlike(x):
return clpy.isscalar(x) or (clpy.sparse.base.isdense(x) and x.ndim == 0)
def isshape(x):
if not isinstance(x, tuple) or len(x) != 2:
return False
m, n = x
return isintlike(m) and isintlike(n)
| 24.25641 | 76 | 0.60148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 524 | 0.553911 |
d9f9cd4e7a0b73e79eb71d2bdbfa755d69a9cc9d | 597 | py | Python | examples/first_char_last_column.py | clarkfitzg/sta141c | 129704ba0952a4b80f9b093dcfa49f49f37b052d | [
"MIT"
] | 24 | 2019-01-08T20:10:11.000Z | 2021-11-26T12:18:58.000Z | examples/first_char_last_column.py | timilchene/sta141c-winter19 | 129704ba0952a4b80f9b093dcfa49f49f37b052d | [
"MIT"
] | 1 | 2017-06-25T05:35:24.000Z | 2017-06-25T05:35:24.000Z | examples/first_char_last_column.py | timilchene/sta141c-winter19 | 129704ba0952a4b80f9b093dcfa49f49f37b052d | [
"MIT"
] | 22 | 2019-01-08T20:02:15.000Z | 2021-12-16T23:27:56.000Z | #!/usr/bin/env python3
"""
For the last column, print only the first character.
Usage:
$ printf "100,200\n0,\n" | python3 first_char_last_column.py
Should print "100,2\n0,"
"""
import csv
from sys import stdin, stdout
def main():
reader = csv.reader(stdin)
writer = csv.writer(stdout)
for row in reader:
try:
row[-1] = row[-1][0]
except IndexError:
# Python: Better to ask forgiveness than permission
# Alternative: Look before you leap
pass
writer.writerow(row)
if __name__ == "__main__":
main()
| 19.258065 | 64 | 0.606365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 277 | 0.463987 |
8a045d9a56c4a8715b77c0b2cd2d5ff977fa98ed | 609 | py | Python | conf/feature_config.py | pupuwudi/nlp_xiaojiang | 182ac4522b6012a52de6e1d0db7e6a47cb716e5b | [
"MIT"
] | null | null | null | conf/feature_config.py | pupuwudi/nlp_xiaojiang | 182ac4522b6012a52de6e1d0db7e6a47cb716e5b | [
"MIT"
] | null | null | null | conf/feature_config.py | pupuwudi/nlp_xiaojiang | 182ac4522b6012a52de6e1d0db7e6a47cb716e5b | [
"MIT"
] | 2 | 2021-01-18T10:07:20.000Z | 2022-01-12T10:09:47.000Z | # -*- coding: UTF-8 -*-
# !/usr/bin/python
# @time :2019/5/10 9:13
# @author :Mo
# @function :path of FeatureProject
import pathlib
import sys
import os
# base dir
projectdir = str(pathlib.Path(os.path.abspath(__file__)).parent.parent)
sys.path.append(projectdir)
# path of BERT model
model_dir = projectdir + '/Data/chinese_L-12_H-768_A-12'
config_name = model_dir + '/bert_config.json'
ckpt_name = model_dir + '/bert_model.ckpt'
vocab_file = model_dir + '/vocab.txt'
# gpu使用率
gpu_memory_fraction = 0.32
# 默认取倒数第二层的输出值作为句向量
layer_indexes = [-2]
# 序列的最大程度
max_seq_len = 32
| 22.555556 | 72 | 0.689655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 328 | 0.494721 |
8a1292fe9e365e4f3b12243aeeeb62b3fcd34222 | 1,067 | py | Python | MIT/600.1x - Introduction to Computer Science and Programming Using Python/Unit 4/Problem Set 4/get_word_score.py | henriqueumeda/-Python-study | 28e93a377afa4732037a29eb74d4bc7c9e24b62f | [
"MIT"
] | null | null | null | MIT/600.1x - Introduction to Computer Science and Programming Using Python/Unit 4/Problem Set 4/get_word_score.py | henriqueumeda/-Python-study | 28e93a377afa4732037a29eb74d4bc7c9e24b62f | [
"MIT"
] | null | null | null | MIT/600.1x - Introduction to Computer Science and Programming Using Python/Unit 4/Problem Set 4/get_word_score.py | henriqueumeda/-Python-study | 28e93a377afa4732037a29eb74d4bc7c9e24b62f | [
"MIT"
] | null | null | null | SCRABBLE_LETTER_VALUES = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1,
'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10
}
def getWordScore(word, n):
"""
Returns the score for a word. Assumes the word is a valid word.
The score for a word is the sum of the points for letters in the
word, multiplied by the length of the word, PLUS 50 points if all n
letters are used on the first turn.
Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is
worth 3, D is worth 2, E is worth 1, and so on (see SCRABBLE_LETTER_VALUES)
word: string (lowercase letters)
n: integer (HAND_SIZE; i.e., hand size required for additional points)
returns: int >= 0
"""
total_points = 0
for letter in word:
total_points += SCRABBLE_LETTER_VALUES[letter]
total_points *= len(word)
if len(word) == n:
total_points += 50
return total_points
print(getWordScore('waybill', 7))
| 35.566667 | 115 | 0.585754 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 636 | 0.596064 |
8a15ab57e7398ab067062419a83d15fd9bf34d36 | 434 | py | Python | ex062.py | noahbarros/Python-Exercises | fafda898473bc984280e201ed11d8ad76cc8624a | [
"MIT"
] | 1 | 2021-07-13T21:41:00.000Z | 2021-07-13T21:41:00.000Z | ex062.py | noahbarros/Python-Exercises | fafda898473bc984280e201ed11d8ad76cc8624a | [
"MIT"
] | null | null | null | ex062.py | noahbarros/Python-Exercises | fafda898473bc984280e201ed11d8ad76cc8624a | [
"MIT"
] | null | null | null | primeiro = int(input('Digite o priemiro termo da PA: '))
razão = int(input('Digite a razão da PA: '))
termo = primeiro
cont = 1
total = 0
mais = 10
while mais != 0:
total += mais
while cont <= total:
print(f'{termo} ', end='')
termo += razão
cont += 1
print('Pausa')
mais = int(input('Quantos termos você quer usar a mais? '))
print(f'a progressão foi finalizada com {total} termos mostrados')
| 27.125 | 66 | 0.612903 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 179 | 0.407745 |
8a19876a956cc7df8eee4ce39d6fc5531c4cfc7c | 3,401 | py | Python | src/api/datamanage/pro/lifecycle/data_trace/data_set_create.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 84 | 2021-06-30T06:20:23.000Z | 2022-03-22T03:05:49.000Z | src/api/datamanage/pro/lifecycle/data_trace/data_set_create.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 7 | 2021-06-30T06:21:16.000Z | 2022-03-29T07:36:13.000Z | src/api/datamanage/pro/lifecycle/data_trace/data_set_create.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 40 | 2021-06-30T06:21:26.000Z | 2022-03-29T12:42:26.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from copy import deepcopy
from datamanage.pro import exceptions as dm_pro_errors
from datamanage.utils.api import MetaApi
from datamanage.pro.utils.time import utc_to_local, str_to_datetime
from datamanage.pro.lifecycle.models_dict import (
DATASET_CREATE_MAPPINGS,
DATASET_CREATE_EVENT_INFO_DICT,
DataTraceShowType,
ComplexSearchBackendType,
DataTraceFinishStatus,
)
def get_dataset_create_info(dataset_id, dataset_type):
"""获取数据足迹中和数据创建相关信息
:param dataset_id: 数据id
:param dataset_type: 数据类型
:return: 数据创建相关信息
:rtype: list
"""
# 1)从dgraph中获取数据创建相关信息
data_set_create_info_statement = """
{
get_dataset_create_info(func: eq(%s, "%s")){created_by created_at}
}
""" % (
DATASET_CREATE_MAPPINGS[dataset_type]['data_set_pk'],
dataset_id,
)
query_result = MetaApi.complex_search(
{"backend_type": ComplexSearchBackendType.DGRAPH.value, "statement": data_set_create_info_statement}, raw=True
)
create_info_ret = query_result['data']['data']['get_dataset_create_info']
if not (isinstance(create_info_ret, list) and create_info_ret):
raise dm_pro_errors.GetDataSetCreateInfoError(message_kv={'dataset_id': dataset_id})
# 2)得到格式化创建信息
create_trace_dict = deepcopy(DATASET_CREATE_EVENT_INFO_DICT)
create_trace_dict.update(
{
"sub_type": dataset_type,
"sub_type_alias": DATASET_CREATE_MAPPINGS[dataset_type]['data_set_create_alias'],
"description": DATASET_CREATE_MAPPINGS[dataset_type]['data_set_create_alias'],
"created_at": utc_to_local(create_info_ret[0]['created_at']),
"created_by": create_info_ret[0]['created_by'],
"show_type": DataTraceShowType.DISPLAY.value,
"datetime": str_to_datetime(utc_to_local(create_info_ret[0]['created_at'])),
"status": DataTraceFinishStatus.STATUS,
"status_alias": DataTraceFinishStatus.STATUS_ALIAS,
}
)
return [create_trace_dict]
| 44.168831 | 118 | 0.728021 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,039 | 0.576151 |
8a20fc9b93bd3fc7e19c79190d5875b049bc7526 | 4,136 | py | Python | build/lib/FinMesh/usgov/__init__.py | johnjdailey/FinMesh | 64048b02bfec1a24de840877b38e82f4fa813d22 | [
"MIT"
] | 1 | 2020-08-14T16:09:54.000Z | 2020-08-14T16:09:54.000Z | build/lib/FinMesh/usgov/__init__.py | johnjdailey/FinMesh | 64048b02bfec1a24de840877b38e82f4fa813d22 | [
"MIT"
] | null | null | null | build/lib/FinMesh/usgov/__init__.py | johnjdailey/FinMesh | 64048b02bfec1a24de840877b38e82f4fa813d22 | [
"MIT"
] | null | null | null | import os
import requests
import xmltodict
import csv
import json
# # # # # # # # # #
# FRED DATA BELOW #
# # # # # # # # # #
FRED_BASE_URL = 'https://api.stlouisfed.org/fred/'
GEOFRED_BASE_URL = 'https://api.stlouisfed.org/geofred/'
def append_fred_token(url):
token = os.getenv('FRED_TOKEN')
return f'{url}&api_key={token}'
FRED_SERIES_OBS_URL = FRED_BASE_URL + 'series/observations?'
def fred_series(series, file_type=None, realtime_start=None, realtime_end=None, limit=None, offset=None, sort_order=None, observation_start=None, observation_end=None, units=None, frequency=None, aggregation_method=None, output_type=None, vintage_dates=None):
## Returns time series historical data for the requested FRED data.
url = FRED_SERIES_OBS_URL + f'series_id={series}'
if file_type: url += f'&file_type={file_type}'
if realtime_start: url += f'&realtime_start={realtime_start}'
if realtime_end: url += f'&realtime_end={realtime_end}'
if limit: url += f'&limit={limit}'
if offset: url += f'&offset={offset}'
if sort_order: url += f'&sort_order={sort_order}'
if observation_start: url += f'&observation_start={observation_start}'
if observation_end: url += f'&observation_end={observation_end}'
if units: url += f'&units={units}'
if frequency: url += f'&frequency={frequency}'
if aggregation_method: url += f'&aggregation_method={aggregation_method}'
if output_type: url += f'&output_type={output_type}'
if vintage_dates: url += f'&vintage_dates={vintage_dates}'
url = append_fred_token(url)
result = requests.get(url)
return result.text
GEOFRED_SERIES_META_URL = GEOFRED_BASE_URL + 'series/group?'
def geofred_series_meta(series_id, file_type=None):
## Returns meta data for the requested FRED data.
url = GEOFRED_SERIES_META_URL + f'series_id={series_id}'
if file_type: url += f'&file_type={file_type}'
url = append_fred_token(url)
result = requests.get(url)
return result.text
GEOFRED_REGIONAL_SERIES_URL = GEOFRED_BASE_URL + 'series/data?'
def geofred_regional_series(series_id, file_type=None, date=None, start_date=None):
## Returns the historical, geographically organized time series data for the requested FRED data.
url = GEOFRED_REGIONAL_SERIES_URL + f'series_id={series_id}'
if file_type: url += f'&file_type={file_type}'
if date: url += f'&date={date}'
if start_date: url += f'&start_date={start_date}'
url = append_fred_token(url)
result = requests.get(url)
return result.text
# # # # # # # # # # # # # # # #
# GOVERNMENT YIELD CURVE DATA #
# # # # # # # # # # # # # # # #
GOV_YIELD_URL = 'https://data.treasury.gov/feed.svc/DailyTreasuryYieldCurveRateData?$filter=month(NEW_DATE)%20eq%204%20and%20year(NEW_DATE)%20eq%202019'
def get_yield():
## Returns government treasury bond yields. Organized in Python dictionary format by bond length.
# Formatting of XML to Python Dict
curve = requests.get(GOV_YIELD_URL)
parse_curve = xmltodict.parse(curve.content)
# This is based around retrieving the n last dates or average of n days.
feed = parse_curve['feed']
entry = feed['entry']
last_entry = len(entry)-1
content = entry[last_entry]['content']['m:properties']
# Dict that contains the whole yield curve so there is no need to bring in each rate.
yield_curve_values = {
'date' : entry[last_entry]['content']['m:properties']['d:NEW_DATE']['#text'],
'1month' : float(content['d:BC_1MONTH']['#text']),
'2month' : float(content['d:BC_2MONTH']['#text']),
'3month' : float(content['d:BC_3MONTH']['#text']),
'6month' : float(content['d:BC_6MONTH']['#text']),
'1year' : float(content['d:BC_1YEAR']['#text']),
'2year' : float(content['d:BC_2YEAR']['#text']),
'3year' : float(content['d:BC_3YEAR']['#text']),
'5year' : float(content['d:BC_5YEAR']['#text']),
'10year' : float(content['d:BC_10YEAR']['#text']),
'20year' : float(content['d:BC_20YEAR']['#text']),
'30year' : float(content['d:BC_30YEAR']['#text']),
}
return yield_curve_values
| 44 | 259 | 0.676499 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,869 | 0.451886 |
8a29eefe067ae42942e4915562e64419af3d1cde | 950 | py | Python | scripts_python3/exchange/deleteExchange.py | bcvsolutions/winrm-ad-connector | 9b45dae78d3ba24fe6b00e090f8763d3162e1570 | [
"Apache-2.0"
] | null | null | null | scripts_python3/exchange/deleteExchange.py | bcvsolutions/winrm-ad-connector | 9b45dae78d3ba24fe6b00e090f8763d3162e1570 | [
"Apache-2.0"
] | 2 | 2020-05-27T07:15:28.000Z | 2020-12-17T05:22:54.000Z | scripts_python3/exchange/deleteExchange.py | bcvsolutions/winrm-ad-connector | 9b45dae78d3ba24fe6b00e090f8763d3162e1570 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# All params from IdM is stored in environment and you can get them by os.environ["paramName"]
import sys, os
# this is needed for importing file winrm_wrapper from parent dir
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import winrm_wrapper
import codecs
uid = os.environ["__UID__"]
winrm_wrapper.writeLog("Delete start for " + uid)
# Load PS script from file and replace params
winrm_wrapper.writeLog("loading script")
f = codecs.open(os.environ["script"], encoding='utf-8', mode='r')
command = f.read()
command = command.replace("$uid", uid)
# Call wrapper
winrm_wrapper.executeScript(os.environ["endpoint"], os.environ["authentication"], os.environ["user"],
os.environ["password"], os.environ["caTrustPath"], os.environ["ignoreCaValidation"], command, uid)
winrm_wrapper.writeLog("Delete end for " + uid)
print("__UID__=" + uid)
sys.exit()
| 35.185185 | 134 | 0.705263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 437 | 0.46 |
8a2f400a7655554fbc57b5f622cd3afad8069e45 | 427 | py | Python | gcp-python-fn/main.py | FuriKuri/faas-playground | 52618e21064e327d2874d2b73cfe5fb247d3dd6e | [
"MIT"
] | 1 | 2019-05-07T13:15:16.000Z | 2019-05-07T13:15:16.000Z | gcp-python-fn/main.py | FuriKuri/faas-playground | 52618e21064e327d2874d2b73cfe5fb247d3dd6e | [
"MIT"
] | null | null | null | gcp-python-fn/main.py | FuriKuri/faas-playground | 52618e21064e327d2874d2b73cfe5fb247d3dd6e | [
"MIT"
] | null | null | null | def hello_world(request):
request_json = request.get_json()
name = 'World'
if request_json and 'name' in request_json:
name = request_json['name']
headers = {
'Access-Control-Allow-Origin': 'https://furikuri.net',
'Access-Control-Allow-Methods': 'GET, POST',
'Access-Control-Allow-Headers': 'Content-Type'
}
return ('Hello ' + name + '! From GCP + Python', 200, headers)
| 35.583333 | 66 | 0.620609 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 184 | 0.430913 |
8a30c3ee79ce2efcb14fdc2c9e26c3ab71e499c1 | 671 | py | Python | tests/test_i18n.py | vthriller/flask-kajiki | eadaa0aa45d23507066758b9e74091bddbc943c4 | [
"BSD-3-Clause"
] | null | null | null | tests/test_i18n.py | vthriller/flask-kajiki | eadaa0aa45d23507066758b9e74091bddbc943c4 | [
"BSD-3-Clause"
] | null | null | null | tests/test_i18n.py | vthriller/flask-kajiki | eadaa0aa45d23507066758b9e74091bddbc943c4 | [
"BSD-3-Clause"
] | null | null | null | from kajiki import i18n
from flask import request
from flask_kajiki import render_template
# N. B. settting i18n.gettext would affect tests from all modules,
# so we test for request path that only functions from this module could set
def gettext(s):
if request.path == '/test_i18n':
return s.upper()
return s
i18n.gettext = gettext
def test_does_translations(app):
"""Callback interface is able to inject Translator filter"""
with app.test_request_context(path='/test_i18n'):
rendered = render_template('i18n.html')
# TODO DOCTYPE; see also render_args
expected = '<p>HELLO!</p>'
assert rendered == expected
| 27.958333 | 76 | 0.704918 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 288 | 0.42921 |
8a3543c746387ad12029585c2e306e26ec984737 | 4,324 | py | Python | Deep_Q_Network/DQN_for_FrozenLake_Discrete_Domain.py | quangnguyendang/Reinforcement_Learning | 2551ce95068561c553500838ee6b976f001ba667 | [
"MIT"
] | null | null | null | Deep_Q_Network/DQN_for_FrozenLake_Discrete_Domain.py | quangnguyendang/Reinforcement_Learning | 2551ce95068561c553500838ee6b976f001ba667 | [
"MIT"
] | null | null | null | Deep_Q_Network/DQN_for_FrozenLake_Discrete_Domain.py | quangnguyendang/Reinforcement_Learning | 2551ce95068561c553500838ee6b976f001ba667 | [
"MIT"
] | null | null | null | # Credit to https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0
import gym
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
env = gym.make('FrozenLake-v0')
# NEURAL NETWORK IMPLEMENTATION
tf.reset_default_graph()
# Feature vector for current state representation
input1 = tf.placeholder(shape=[1, env.observation_space.n], dtype=tf.float32)
# tf.Variable(<initial-value>, name=<optional-name>)
# tf.random_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None)
# Weighting W vector in range 0 - 0.01 (like the way Andrew Ng did with *0.01
W = tf.Variable(tf.random_uniform([env.observation_space.n, env.action_space.n], 0, 0.01))
# Qout with shape [1, env.action_space.n] - Action state value for Q[s, a] with every a available at a state
Qout = tf.matmul(input1, W)
# Greedy action at a state
predict = tf.argmax(Qout, axis=1)
# Feature vector for next state representation
nextQ = tf.placeholder(shape=[1, env.action_space.n], dtype=tf.float32)
# Entropy loss
loss = tf.reduce_sum(tf.square(Qout - nextQ))
trainer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
updateModel = trainer.minimize(loss)
# TRAIN THE NETWORK
init = tf.global_variables_initializer()
# Set learning parameters
y = 0.99
e = 0.1
number_episodes = 2000
# List to store total rewards and steps per episode
jList = []
rList = []
with tf.Session() as sess:
sess.run(init)
for i in range(number_episodes):
print("Episode #{} is running!".format(i))
# First state
s = env.reset()
rAll = 0
d = False
j = 0
# Q network
while j < 200: # or While not d:
j += 1
# Choose action by epsilon (e) greedy
# print("s = ", s," --> Identity s:s+1: ", np.identity(env.observation_space.n)[s:s+1])
# s = 0 --> Identity s: s + 1: [[1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
# s = 1 --> Identity s: s + 1: [[0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
# Identity [s:s+1] is a one-hot vector
# Therefore W is the actual Q value
a, allQ = sess.run([predict, Qout], feed_dict={input1: np.identity(env.observation_space.n)[s:s+1]})
if np.random.rand(1) < e:
a[0] = env.action_space.sample()
s1, r, d, _ = env.step(a[0])
# Obtain next state Q value by feeding the new state throughout the network
Q1 = sess.run(Qout, feed_dict={input1: np.identity(env.observation_space.n)[s1:s1+1]})
maxQ1 = np.max(Q1)
targetQ = allQ
targetQ[0, a[0]] = r + y * maxQ1
# Train our network using target and predicted Q values
_, W1 = sess.run([updateModel, W], feed_dict={input1: np.identity(env.observation_space.n)[s:s+1], nextQ: targetQ})
rAll += r
s = s1
if d:
e = 1./((i/50) + 10)
break
jList.append(j)
rList.append(rAll)
env.close()
plt.figure()
plt.plot(rList, label="Return - Q Learning")
plt.show()
plt.figure()
plt.plot(jList, label="Steps - Q Learning")
plt.show()
# -------------------------------------------------------------------------
# TABULAR IMPLEMENTATION
#
# # Set learning parameters
# lr = 0.8
# y = 0.95
# number_episodes = 20000
#
# # Initial table with all zeros
# Q = np.zeros([env.observation_space.n, env.action_space.n])
#
# # List of reward and steps per episode
# rList = []
# for i in range (number_episodes):
# print("Episode #{} is running!".format(i))
# s = env.reset()
# rAll = 0
# d = False
# j = 0
# while j < 99:
# j += 1
# # Choose an action by greedily (with noise) picking from Q table
# # Because of the noise, it is epsilon-greedy with epsilon decreasing over time
# a = np.argmax(Q[s, :] + np.random.rand(1, env.action_space.n)*(1./(i + 1)))
# s1, r, d, _ = env.step(a)
# # env.render()
#
# # Update Q table with new knowledge
# Q[s, a] = Q[s, a] + lr * (r + y * np.max(Q[s1, :]) - Q[s, a])
# rAll += r
# s = s1
# if d:
# break
# rList.append(rAll)
| 30.666667 | 155 | 0.586725 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,371 | 0.548335 |
8a3651a34d3b1893e6f70ebe64b9db39d329cd63 | 8,496 | py | Python | testing/cross_language/util/supported_key_types.py | chanced/tink | 9cc3a01ac0165b033ed51dc9d0812a98b4b6e305 | [
"Apache-2.0"
] | null | null | null | testing/cross_language/util/supported_key_types.py | chanced/tink | 9cc3a01ac0165b033ed51dc9d0812a98b4b6e305 | [
"Apache-2.0"
] | null | null | null | testing/cross_language/util/supported_key_types.py | chanced/tink | 9cc3a01ac0165b033ed51dc9d0812a98b4b6e305 | [
"Apache-2.0"
] | 1 | 2022-01-02T20:54:04.000Z | 2022-01-02T20:54:04.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All KeyTypes and which languages support them."""
# Placeholder for import for type annotations
from tink import aead
from tink import daead
from tink import hybrid
from tink import mac
from tink import prf
from tink import signature
from tink import streaming_aead
from tink.proto import tink_pb2
# All languages supported by cross-language tests.
ALL_LANGUAGES = ['cc', 'java', 'go', 'python']
# All KeyTypes (without the prefix 'type.googleapis.com/google.crypto.tink.')
AEAD_KEY_TYPES = [
'AesEaxKey',
'AesGcmKey',
'AesGcmSivKey',
'AesCtrHmacAeadKey',
'ChaCha20Poly1305Key',
'XChaCha20Poly1305Key',
]
DAEAD_KEY_TYPES = ['AesSivKey']
STREAMING_AEAD_KEY_TYPES = [
'AesCtrHmacStreamingKey',
'AesGcmHkdfStreamingKey',
]
HYBRID_PRIVATE_KEY_TYPES = ['EciesAeadHkdfPrivateKey']
MAC_KEY_TYPES = [
'AesCmacKey',
'HmacKey',
]
SIGNATURE_KEY_TYPES = [
'EcdsaPrivateKey',
'Ed25519PrivateKey',
'RsaSsaPkcs1PrivateKey',
'RsaSsaPssPrivateKey',
]
PRF_KEY_TYPES = [
'AesCmacPrfKey',
'HmacPrfKey',
'HkdfPrfKey',
]
ALL_KEY_TYPES = (
AEAD_KEY_TYPES + DAEAD_KEY_TYPES + STREAMING_AEAD_KEY_TYPES +
HYBRID_PRIVATE_KEY_TYPES + MAC_KEY_TYPES + SIGNATURE_KEY_TYPES +
PRF_KEY_TYPES)
# All languages that are supported by a KeyType
SUPPORTED_LANGUAGES = {
'AesEaxKey': ['cc', 'java', 'python'],
'AesGcmKey': ['cc', 'java', 'go', 'python'],
'AesGcmSivKey': ['cc', 'python'],
'AesCtrHmacAeadKey': ['cc', 'java', 'go', 'python'],
'ChaCha20Poly1305Key': ['java', 'go'],
'XChaCha20Poly1305Key': ['cc', 'java', 'go', 'python'],
'AesSivKey': ['cc', 'java', 'go', 'python'],
'AesCtrHmacStreamingKey': ['cc', 'java', 'go', 'python'],
'AesGcmHkdfStreamingKey': ['cc', 'java', 'go', 'python'],
'EciesAeadHkdfPrivateKey': ['cc', 'java', 'go', 'python'],
'AesCmacKey': ['cc', 'java', 'go', 'python'],
'HmacKey': ['cc', 'java', 'go', 'python'],
'EcdsaPrivateKey': ['cc', 'java', 'go', 'python'],
'Ed25519PrivateKey': ['cc', 'java', 'go', 'python'],
'RsaSsaPkcs1PrivateKey': ['cc', 'java', 'python'],
'RsaSsaPssPrivateKey': ['cc', 'java', 'python'],
'AesCmacPrfKey': ['cc', 'java', 'go', 'python'],
'HmacPrfKey': ['cc', 'java', 'go', 'python'],
'HkdfPrfKey': ['cc', 'java', 'go', 'python'],
}
KEY_TYPE_FROM_URL = {
'type.googleapis.com/google.crypto.tink.' + key_type: key_type
for key_type in ALL_KEY_TYPES}
# For each KeyType, a list of all KeyTemplate Names that must be supported.
KEY_TEMPLATE_NAMES = {
'AesEaxKey': ['AES128_EAX', 'AES256_EAX'],
'AesGcmKey': ['AES128_GCM', 'AES256_GCM'],
'AesGcmSivKey': ['AES128_GCM_SIV', 'AES256_GCM_SIV'],
'AesCtrHmacAeadKey': ['AES128_CTR_HMAC_SHA256', 'AES256_CTR_HMAC_SHA256'],
'ChaCha20Poly1305Key': ['CHACHA20_POLY1305'],
'XChaCha20Poly1305Key': ['XCHACHA20_POLY1305'],
'AesSivKey': ['AES256_SIV'],
'AesCtrHmacStreamingKey': [
'AES128_CTR_HMAC_SHA256_4KB',
'AES256_CTR_HMAC_SHA256_4KB',
],
'AesGcmHkdfStreamingKey': [
'AES128_GCM_HKDF_4KB',
'AES256_GCM_HKDF_4KB',
'AES256_GCM_HKDF_1MB',
],
'EciesAeadHkdfPrivateKey': [
'ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM',
'ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256'
],
'AesCmacKey': ['AES_CMAC'],
'HmacKey': [
'HMAC_SHA256_128BITTAG', 'HMAC_SHA256_256BITTAG',
'HMAC_SHA512_256BITTAG', 'HMAC_SHA512_512BITTAG'
],
'EcdsaPrivateKey': [
'ECDSA_P256', 'ECDSA_P384', 'ECDSA_P384_SHA384', 'ECDSA_P521',
'ECDSA_P256_IEEE_P1363', 'ECDSA_P384_IEEE_P1363',
'ECDSA_P384_SHA384_IEEE_P1363', 'ECDSA_P521_IEEE_P1363'
],
'Ed25519PrivateKey': ['ED25519'],
'RsaSsaPkcs1PrivateKey': [
'RSA_SSA_PKCS1_3072_SHA256_F4', 'RSA_SSA_PKCS1_4096_SHA512_F4'
],
'RsaSsaPssPrivateKey': [
'RSA_SSA_PSS_3072_SHA256_SHA256_32_F4',
'RSA_SSA_PSS_4096_SHA512_SHA512_64_F4'
],
'AesCmacPrfKey': ['AES_CMAC_PRF'],
'HmacPrfKey': ['HMAC_PRF_SHA256', 'HMAC_PRF_SHA512'],
'HkdfPrfKey': ['HKDF_PRF_SHA256'],
}
# KeyTemplate (as Protobuf) for each KeyTemplate name.
KEY_TEMPLATE = {
'AES128_EAX':
aead.aead_key_templates.AES128_EAX,
'AES256_EAX':
aead.aead_key_templates.AES256_EAX,
'AES128_GCM':
aead.aead_key_templates.AES128_GCM,
'AES256_GCM':
aead.aead_key_templates.AES256_GCM,
'AES128_GCM_SIV':
aead.aead_key_templates.AES128_GCM_SIV,
'AES256_GCM_SIV':
aead.aead_key_templates.AES256_GCM_SIV,
'AES128_CTR_HMAC_SHA256':
aead.aead_key_templates.AES128_CTR_HMAC_SHA256,
'AES256_CTR_HMAC_SHA256':
aead.aead_key_templates.AES256_CTR_HMAC_SHA256,
'CHACHA20_POLY1305':
tink_pb2.KeyTemplate(
type_url=('type.googleapis.com/google.crypto.tink.' +
'ChaCha20Poly1305Key'),
output_prefix_type=tink_pb2.TINK),
'XCHACHA20_POLY1305':
aead.aead_key_templates.XCHACHA20_POLY1305,
'AES256_SIV':
daead.deterministic_aead_key_templates.AES256_SIV,
'AES128_CTR_HMAC_SHA256_4KB':
streaming_aead.streaming_aead_key_templates.AES128_CTR_HMAC_SHA256_4KB,
'AES256_CTR_HMAC_SHA256_4KB':
streaming_aead.streaming_aead_key_templates.AES256_CTR_HMAC_SHA256_4KB,
'AES128_GCM_HKDF_4KB':
streaming_aead.streaming_aead_key_templates.AES128_GCM_HKDF_4KB,
'AES256_GCM_HKDF_4KB':
streaming_aead.streaming_aead_key_templates.AES256_GCM_HKDF_4KB,
'AES256_GCM_HKDF_1MB':
streaming_aead.streaming_aead_key_templates.AES256_GCM_HKDF_1MB,
'ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM':
hybrid.hybrid_key_templates.ECIES_P256_HKDF_HMAC_SHA256_AES128_GCM,
'ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256':
hybrid.hybrid_key_templates
.ECIES_P256_HKDF_HMAC_SHA256_AES128_CTR_HMAC_SHA256,
'AES_CMAC':
mac.mac_key_templates.AES_CMAC,
'HMAC_SHA256_128BITTAG':
mac.mac_key_templates.HMAC_SHA256_128BITTAG,
'HMAC_SHA256_256BITTAG':
mac.mac_key_templates.HMAC_SHA256_256BITTAG,
'HMAC_SHA512_256BITTAG':
mac.mac_key_templates.HMAC_SHA512_256BITTAG,
'HMAC_SHA512_512BITTAG':
mac.mac_key_templates.HMAC_SHA512_512BITTAG,
'ECDSA_P256':
signature.signature_key_templates.ECDSA_P256,
'ECDSA_P384':
signature.signature_key_templates.ECDSA_P384,
'ECDSA_P384_SHA384':
signature.signature_key_templates.ECDSA_P384_SHA384,
'ECDSA_P521':
signature.signature_key_templates.ECDSA_P521,
'ECDSA_P256_IEEE_P1363':
signature.signature_key_templates.ECDSA_P256_IEEE_P1363,
'ECDSA_P384_IEEE_P1363':
signature.signature_key_templates.ECDSA_P384_IEEE_P1363,
'ECDSA_P384_SHA384_IEEE_P1363':
signature.signature_key_templates.ECDSA_P384_SHA384_IEEE_P1363,
'ECDSA_P521_IEEE_P1363':
signature.signature_key_templates.ECDSA_P521_IEEE_P1363,
'ED25519':
signature.signature_key_templates.ED25519,
'RSA_SSA_PKCS1_3072_SHA256_F4':
signature.signature_key_templates.RSA_SSA_PKCS1_3072_SHA256_F4,
'RSA_SSA_PKCS1_4096_SHA512_F4':
signature.signature_key_templates.RSA_SSA_PKCS1_4096_SHA512_F4,
'RSA_SSA_PSS_3072_SHA256_SHA256_32_F4':
signature.signature_key_templates.RSA_SSA_PSS_3072_SHA256_SHA256_32_F4,
'RSA_SSA_PSS_4096_SHA512_SHA512_64_F4':
signature.signature_key_templates.RSA_SSA_PSS_4096_SHA512_SHA512_64_F4,
'AES_CMAC_PRF':
prf.prf_key_templates.AES_CMAC,
'HMAC_PRF_SHA256':
prf.prf_key_templates.HMAC_SHA256,
'HMAC_PRF_SHA512':
prf.prf_key_templates.HMAC_SHA512,
'HKDF_PRF_SHA256':
prf.prf_key_templates.HKDF_SHA256,
}
SUPPORTED_LANGUAGES_BY_TEMPLATE_NAME = {
name: SUPPORTED_LANGUAGES[KEY_TYPE_FROM_URL[template.type_url]]
for name, template in KEY_TEMPLATE.items()
}
| 37.263158 | 79 | 0.711982 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,121 | 0.485052 |
8a43f4805ca2bfbefacf005fd91befea7f1c3e71 | 492 | py | Python | gen-cfg.py | magetron/secure-flow-prototype | c683939620fec889f882ea095d2b27e3e4bb98fe | [
"Apache-2.0"
] | null | null | null | gen-cfg.py | magetron/secure-flow-prototype | c683939620fec889f882ea095d2b27e3e4bb98fe | [
"Apache-2.0"
] | null | null | null | gen-cfg.py | magetron/secure-flow-prototype | c683939620fec889f882ea095d2b27e3e4bb98fe | [
"Apache-2.0"
] | null | null | null | from staticfg import CFGBuilder
userCfg = CFGBuilder().build_from_file('user.py', './auction/user.py')
bidCfg = CFGBuilder().build_from_file('bid.py', './auction/bid.py')
auctionCfg = CFGBuilder().build_from_file('auction.py','./auction/auction.py')
#auctionEventCfg = CFGBuilder().build_from_file('auction_event.py','./auction/auction_event.py')
bidCfg.build_visual('bidCfg', 'pdf')
auctionCfg.build_visual('auctionCfg', 'pdf')
#auctionEventCfg.build_visual('auctionEventCfg.pdf', 'pdf')
| 41 | 96 | 0.760163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 273 | 0.554878 |
8a4ccded7f4f9f9be895e48e8a31955a7046241e | 4,371 | py | Python | dddppp/settings.py | tysonclugg/dddppp | 22f52d671ca71c2df8d6ac566a1626e5f05b3159 | [
"MIT"
] | null | null | null | dddppp/settings.py | tysonclugg/dddppp | 22f52d671ca71c2df8d6ac566a1626e5f05b3159 | [
"MIT"
] | null | null | null | dddppp/settings.py | tysonclugg/dddppp | 22f52d671ca71c2df8d6ac566a1626e5f05b3159 | [
"MIT"
] | null | null | null | """
Django settings for dddppp project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import pkg_resources
import pwd
PROJECT_NAME = 'dddppp'
# Enforce a valid POSIX environment
# Get missing environment variables via call to pwd.getpwuid(...)
_PW_CACHE = None
_PW_MAP = {
'LOGNAME': 'pw_name',
'USER': 'pw_name',
'USERNAME': 'pw_name',
'UID': 'pw_uid',
'GID': 'pw_gid',
'HOME': 'pw_dir',
'SHELL': 'pw_shell',
}
for _missing_env in set(_PW_MAP).difference(os.environ):
if _PW_CACHE is None:
_PW_CACHE = pwd.getpwuid(os.getuid())
os.environ[_missing_env] = str(getattr(_PW_CACHE, _PW_MAP[_missing_env]))
del _PW_CACHE, _PW_MAP, pwd
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'nfd_lvt=&k#h#$a^_l09j#5%s=mg+0aw=@t84ry$&rps43c33+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'localhost',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dddp',
'dddp.server',
'dddp.accounts',
'dddppp.slides',
]
for (requirement, pth) in [
('django-extensions', 'django_extensions'),
]:
try:
pkg_resources.get_distribution(requirement)
except (
pkg_resources.DistributionNotFound,
pkg_resources.VersionConflict,
):
continue
INSTALLED_APPS.append(pth)
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#'django.middleware.security.SecurityMiddleware',
]
ROOT_URLCONF = 'dddppp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dddppp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('PGDATABASE', PROJECT_NAME),
'USER': os.environ.get('PGUSER', os.environ['LOGNAME']),
'PASSWORD': os.environ.get('DJANGO_DATABASE_PASSWORD', ''),
'HOST': os.environ.get('PGHOST', ''),
'PORT': os.environ.get('PGPORT', ''),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-au'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# django-secure
# see: https://github.com/carljm/django-secure/ for more options
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
#SECURE_SSL_REDIRECT = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_FRAME_DENY = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
DDDPPP_CONTENT_TYPES = []
PROJ_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
| 26.981481 | 77 | 0.695722 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,561 | 0.585907 |
8a4fee7da31280c4ead726e734baac5bb3fc023e | 1,227 | py | Python | setup.py | dantas/wifi | e9cd6df7d3411f1532843999f6c33f45369c3fe4 | [
"BSD-2-Clause"
] | 1 | 2019-04-29T14:57:45.000Z | 2019-04-29T14:57:45.000Z | setup.py | dantas/wifi | e9cd6df7d3411f1532843999f6c33f45369c3fe4 | [
"BSD-2-Clause"
] | null | null | null | setup.py | dantas/wifi | e9cd6df7d3411f1532843999f6c33f45369c3fe4 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup
import os
__doc__ = """
Command line tool and library wrappers around iwlist and
/etc/network/interfaces.
"""
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
install_requires = [
'setuptools',
'pbkdf2',
]
try:
import argparse
except:
install_requires.append('argparse')
version = '1.0.0'
setup(
name='wifi',
version=version,
author='Rocky Meza, Gavin Wahl',
author_email='rockymeza@gmail.com',
description=__doc__,
long_description=read('README.rst'),
packages=['wifi'],
scripts=['bin/wifi'],
test_suite='tests',
platforms=["Debian"],
license='BSD',
install_requires=install_requires,
classifiers=[
"License :: OSI Approved :: BSD License",
"Topic :: System :: Networking",
"Operating System :: POSIX :: Linux",
"Environment :: Console",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
],
data_files=[
('/etc/bash_completion.d/', ['extras/wifi-completion.bash']),
]
)
| 23.150943 | 70 | 0.625102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 580 | 0.472698 |
8a50f54c898793f1acb00252a2b2f5ed4e326667 | 790 | py | Python | setup.py | skojaku/fastnode2vec | bb65f68469f00f489fa6744d35b8756200b4e285 | [
"MIT"
] | 61 | 2020-04-21T18:58:47.000Z | 2022-03-26T22:41:45.000Z | setup.py | skojaku/fastnode2vec | bb65f68469f00f489fa6744d35b8756200b4e285 | [
"MIT"
] | 17 | 2020-04-21T22:37:17.000Z | 2022-03-31T22:36:03.000Z | setup.py | skojaku/fastnode2vec | bb65f68469f00f489fa6744d35b8756200b4e285 | [
"MIT"
] | 6 | 2020-07-30T01:41:59.000Z | 2022-01-19T10:13:01.000Z | #!/usr/bin/env python3
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="fastnode2vec",
version="0.0.5",
author="Louis Abraham",
license="MIT",
author_email="louis.abraham@yahoo.fr",
description="Fast implementation of node2vec",
long_description=read("README.md"),
long_description_content_type="text/markdown",
url="https://github.com/louisabraham/fastnode2vec",
packages=["fastnode2vec"],
install_requires=["numpy", "numba", "gensim", "click", "tqdm"],
python_requires=">=3.6",
entry_points={"console_scripts": ["fastnode2vec = fastnode2vec.cli:node2vec"]},
classifiers=["Topic :: Scientific/Engineering :: Artificial Intelligence"],
)
| 29.259259 | 83 | 0.694937 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 367 | 0.464557 |
8a54334c8ec0d2c98a16bb220c95973a631adeb1 | 3,810 | py | Python | unit_13/26-Data_Structures/4_Merge_Sort_and_Linked_Lists/3_linked_list_merge_sort.py | duliodenis/python_master_degree | 3ab76838ce2fc1606f28e988a3273dd27122a621 | [
"MIT"
] | 19 | 2019-03-14T01:39:32.000Z | 2022-02-03T00:36:43.000Z | unit_13/26-Data_Structures/4_Merge_Sort_and_Linked_Lists/3_linked_list_merge_sort.py | duliodenis/python_master_degree | 3ab76838ce2fc1606f28e988a3273dd27122a621 | [
"MIT"
] | 1 | 2020-04-10T01:01:16.000Z | 2020-04-10T01:01:16.000Z | unit_13/26-Data_Structures/4_Merge_Sort_and_Linked_Lists/3_linked_list_merge_sort.py | duliodenis/python_master_degree | 3ab76838ce2fc1606f28e988a3273dd27122a621 | [
"MIT"
] | 5 | 2019-01-02T20:46:05.000Z | 2020-07-08T22:47:48.000Z | #
# Data Structures: Linked List Merge Sort: The Conquer Step
# Python Techdegree
#
# Created by Dulio Denis on 3/24/19.
# Copyright (c) 2019 ddApps. All rights reserved.
# ------------------------------------------------
from linked_list import Node, LinkedList
def merge_sort(linked_list):
'''
Sorts a linked list in ascending order.
- Recuresively divide the linked list into sublists containing a single node
- Repeatedly merge the sublists to produce sorted swublists until one remains
Returns a sorted linked list.
Runs in O(kn log n) time.
'''
if linked_list.size() == 1:
return linked_list
elif linked_list.is_empty():
return linked_list
left_half, right_half = split(linked_list)
left = merge_sort(left_half)
right = merge_sort(right_half)
return merge(left, right)
def split(linked_list):
'''
Divide the unsorted list at the midpoint into sublists.
Takes O(k log n) quasilinear time.
'''
if linked_list == None or linked_list.head == None:
left_half = linked_list
right_half = None
return left_half, right_half
else: # non-empty linked lists
size = linked_list.size()
midpoint = size // 2
mid_node = linked_list.node_at_index(midpoint-1)
left_half = linked_list
right_half = LinkedList()
right_half = mid_node.next_node
mid_node.next_node = None
return left_half, right_half
def merge(left, right):
'''
Merges two linked lists, sorting by data in nodes.
Returns a new, merged list.
Runs in O(n) linear time.
'''
# Create a new linked list that contains nodes from
# merging left and right
merged = LinkedList()
# Add a fake head that is discarded later to simplify code
merged.add(0)
# Set current to the head of the linked list
current = merged.head
# Obtain head nodes for left and right linked lists
left_head = left.head
right_head = right.head
# Iterate over left and right until we reach the tail node
# of either
while left_head or right_head:
# If the head node of the left is None, we're past the tail
# Add the node from right to merged linkned list
if left_head is None:
current.next_node = right_head
# Call next on right to set loop condition to False
right_head = right_head.next_node
# If the head node of right is None, we're past the tail
# Add the tail node from left to merged linked list
elif right_head is None:
current.next_node = left_head
# Call next on left to set loop condition to False
left_head = left_head.next_node
else:
# Not at either tail node
# Obtain node data to perform comparison operations
left_data = left_head.data
right_data = right_head.data
# If data on left is less than right, set current to left node
if left_data < right_data:
current.next_node = left_head
# Move left head to next node
left_head = left_head.next_node
# If data on left is greater than right, set current to right node
else:
current.next_node = right_head
# Move right head to next node
right_head = right_head.next_node
# Move current to next node
current = current.next_node
# Discard fake head and set first merged node as head
head = merged.head.next_node
merged.head = head
return merged
l = LinkedList()
l.add(10)
l.add(2)
l.add(44)
l.add(15)
l.add(200)
print(l)
sorted_linked_list = merge_sort(l)
print(sorted_linked_list)
| 32.288136 | 81 | 0.630971 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,722 | 0.451969 |
8a5438fd129b5b6996b6b2555c75bb6bb382b7d5 | 5,639 | py | Python | nearpy/examples/example2.py | samyoo78/NearPy | 1b534b864d320d875508e95cd2b76b6d8c07a90b | [
"MIT"
] | 624 | 2015-01-02T21:45:28.000Z | 2022-03-02T11:04:27.000Z | nearpy/examples/example2.py | samyoo78/NearPy | 1b534b864d320d875508e95cd2b76b6d8c07a90b | [
"MIT"
] | 65 | 2015-02-06T09:47:46.000Z | 2021-09-26T01:45:26.000Z | nearpy/examples/example2.py | samyoo78/NearPy | 1b534b864d320d875508e95cd2b76b6d8c07a90b | [
"MIT"
] | 136 | 2015-01-07T04:45:41.000Z | 2021-11-25T17:46:07.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2013 Ole Krause-Sparmann
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy
import scipy
import unittest
import time
from nearpy import Engine
from nearpy.distances import CosineDistance
from nearpy.hashes import RandomBinaryProjections, HashPermutations, HashPermutationMapper
def example2():
# Dimension of feature space
DIM = 100
# Number of data points (dont do too much because of exact search)
POINTS = 20000
##########################################################
print('Performing indexing with HashPermutations...')
t0 = time.time()
# Create permutations meta-hash
permutations = HashPermutations('permut')
# Create binary hash as child hash
rbp_perm = RandomBinaryProjections('rbp_perm', 14)
rbp_conf = {'num_permutation':50,'beam_size':10,'num_neighbour':100}
# Add rbp as child hash of permutations hash
permutations.add_child_hash(rbp_perm, rbp_conf)
# Create engine
engine_perm = Engine(DIM, lshashes=[permutations], distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_perm.store_vector(v)
# Then update permuted index
permutations.build_permuted_index()
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 3
print('\nNeighbour distances with HashPermutations:')
print(' -> Candidate count is %d' % engine_perm.candidate_count(query))
results = engine_perm.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix, query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
print('\nPerforming indexing with HashPermutationMapper...')
t0 = time.time()
# Create permutations meta-hash
permutations2 = HashPermutationMapper('permut2')
# Create binary hash as child hash
rbp_perm2 = RandomBinaryProjections('rbp_perm2', 14)
# Add rbp as child hash of permutations hash
permutations2.add_child_hash(rbp_perm2)
# Create engine
engine_perm2 = Engine(DIM, lshashes=[permutations2], distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_perm2.store_vector(v)
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 4
print('\nNeighbour distances with HashPermutationMapper:')
print(' -> Candidate count is %d' % engine_perm2.candidate_count(query))
results = engine_perm2.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix,query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
print('\nPerforming indexing with multiple binary hashes...')
t0 = time.time()
hashes = []
for k in range(20):
hashes.append(RandomBinaryProjections('rbp_%d' % k, 10))
# Create engine
engine_rbps = Engine(DIM, lshashes=hashes, distance=CosineDistance())
# First index some random vectors
matrix = numpy.zeros((POINTS,DIM))
for i in range(POINTS):
v = numpy.random.randn(DIM)
matrix[i] = v
engine_rbps.store_vector(v)
t1 = time.time()
print('Indexing took %f seconds' % (t1-t0))
# Get random query vector
query = numpy.random.randn(DIM)
# Do random query on engine 4
print('\nNeighbour distances with multiple binary hashes:')
print(' -> Candidate count is %d' % engine_rbps.candidate_count(query))
results = engine_rbps.neighbours(query)
dists = [x[2] for x in results]
print(dists)
# Real neighbours
print('\nReal neighbour distances:')
query = query.reshape((DIM))
dists = CosineDistance().distance(matrix,query)
dists = dists.reshape((-1,))
dists = sorted(dists)
print(dists[:10])
##########################################################
| 32.039773 | 90 | 0.662529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,668 | 0.473134 |
8a60852354e6415290eaf2e5371028a21ee46376 | 1,004 | py | Python | models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_personreid-res18_market1501_176_80_1.1G_1.3/code/core/data_manager.py | guochunhe/Vitis-AI | e86b6efae11f8703ee647e4a99004dc980b84989 | [
"Apache-2.0"
] | 1 | 2020-12-18T14:49:19.000Z | 2020-12-18T14:49:19.000Z | models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_personreid-res50_market1501_256_128_5.4G_1.3/code/core/data_manager.py | guochunhe/Vitis-AI | e86b6efae11f8703ee647e4a99004dc980b84989 | [
"Apache-2.0"
] | null | null | null | models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_personreid-res50_market1501_256_128_5.4G_1.3/code/core/data_manager.py | guochunhe/Vitis-AI | e86b6efae11f8703ee647e4a99004dc980b84989 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, absolute_import
import glob
import re
from os import path as osp
from .market1501 import Market1501
__factory = {
'market1501': Market1501
}
def get_names():
return list(__factory.keys())
def init_dataset(name, *args, **kwargs):
if name not in __factory.keys():
raise KeyError("Unknown datasets: {}".format(name))
return __factory[name](*args, **kwargs)
| 27.888889 | 74 | 0.737052 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 596 | 0.593625 |
8a62e622419e3b5175ed6a324e076188b956be4c | 2,313 | py | Python | azure-devops/azext_devops/vstsCompressed/service_hooks/v4_0/models/__init__.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | null | null | null | azure-devops/azext_devops/vstsCompressed/service_hooks/v4_0/models/__init__.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | 37 | 2020-04-27T07:45:19.000Z | 2021-04-05T07:27:15.000Z | azure-devops/azext_devops/vstsCompressed/service_hooks/v4_0/models/__init__.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .models import Consumer
from .models import ConsumerAction
from .models import Event
from .models import EventTypeDescriptor
from .models import ExternalConfigurationDescriptor
from .models import FormattedEventMessage
from .models import IdentityRef
from .models import InputDescriptor
from .models import InputFilter
from .models import InputFilterCondition
from .models import InputValidation
from .models import InputValue
from .models import InputValues
from .models import InputValuesError
from .models import InputValuesQuery
from .models import Notification
from .models import NotificationDetails
from .models import NotificationResultsSummaryDetail
from .models import NotificationsQuery
from .models import NotificationSummary
from .models import Publisher
from .models import PublisherEvent
from .models import PublishersQuery
from .models import ReferenceLinks
from .models import ResourceContainer
from .models import SessionToken
from .models import Subscription
from .models import SubscriptionsQuery
from .models import VersionedResource
__all__ = [
'Consumer',
'ConsumerAction',
'Event',
'EventTypeDescriptor',
'ExternalConfigurationDescriptor',
'FormattedEventMessage',
'IdentityRef',
'InputDescriptor',
'InputFilter',
'InputFilterCondition',
'InputValidation',
'InputValue',
'InputValues',
'InputValuesError',
'InputValuesQuery',
'Notification',
'NotificationDetails',
'NotificationResultsSummaryDetail',
'NotificationsQuery',
'NotificationSummary',
'Publisher',
'PublisherEvent',
'PublishersQuery',
'ReferenceLinks',
'ResourceContainer',
'SessionToken',
'Subscription',
'SubscriptionsQuery',
'VersionedResource',
]
| 33.042857 | 94 | 0.685257 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,056 | 0.45655 |
8a678b6dfe1f80688ee851169cd059181b03b309 | 5,922 | py | Python | electrum/dnssec.py | Jesusown/electrum | 0df05dd914c823acae1828cad3b20bdeb13150e9 | [
"MIT"
] | 5,905 | 2015-01-02T17:05:36.000Z | 2022-03-29T07:28:29.000Z | electrum/dnssec.py | Jesusown/electrum | 0df05dd914c823acae1828cad3b20bdeb13150e9 | [
"MIT"
] | 6,097 | 2015-01-01T21:20:25.000Z | 2022-03-31T23:55:01.000Z | electrum/dnssec.py | Jesusown/electrum | 0df05dd914c823acae1828cad3b20bdeb13150e9 | [
"MIT"
] | 2,202 | 2015-01-02T18:31:25.000Z | 2022-03-28T15:35:03.000Z | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Check DNSSEC trust chain.
# Todo: verify expiration dates
#
# Based on
# http://backreference.org/2010/11/17/dnssec-verification-with-dig/
# https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py
import dns
import dns.name
import dns.query
import dns.dnssec
import dns.message
import dns.resolver
import dns.rdatatype
import dns.rdtypes.ANY.NS
import dns.rdtypes.ANY.CNAME
import dns.rdtypes.ANY.DLV
import dns.rdtypes.ANY.DNSKEY
import dns.rdtypes.ANY.DS
import dns.rdtypes.ANY.NSEC
import dns.rdtypes.ANY.NSEC3
import dns.rdtypes.ANY.NSEC3PARAM
import dns.rdtypes.ANY.RRSIG
import dns.rdtypes.ANY.SOA
import dns.rdtypes.ANY.TXT
import dns.rdtypes.IN.A
import dns.rdtypes.IN.AAAA
from .logging import get_logger
_logger = get_logger(__name__)
# hard-coded trust anchors (root KSKs)
trust_anchors = [
# KSK-2017:
dns.rrset.from_text('.', 1 , 'IN', 'DNSKEY', '257 3 8 AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3+/4RgWOq7HrxRixHlFlExOLAJr5emLvN7SWXgnLh4+B5xQlNVz8Og8kvArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+eoZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfdRUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwNR1AkUTV74bU='),
# KSK-2010:
dns.rrset.from_text('.', 15202, 'IN', 'DNSKEY', '257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relS Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='),
]
def _check_query(ns, sub, _type, keys):
q = dns.message.make_query(sub, _type, want_dnssec=True)
response = dns.query.tcp(q, ns, timeout=5)
assert response.rcode() == 0, 'No answer'
answer = response.answer
assert len(answer) != 0, ('No DNS record found', sub, _type)
assert len(answer) != 1, ('No DNSSEC record found', sub, _type)
if answer[0].rdtype == dns.rdatatype.RRSIG:
rrsig, rrset = answer
elif answer[1].rdtype == dns.rdatatype.RRSIG:
rrset, rrsig = answer
else:
raise Exception('No signature set in record')
if keys is None:
keys = {dns.name.from_text(sub):rrset}
dns.dnssec.validate(rrset, rrsig, keys)
return rrset
def _get_and_validate(ns, url, _type):
# get trusted root key
root_rrset = None
for dnskey_rr in trust_anchors:
try:
# Check if there is a valid signature for the root dnskey
root_rrset = _check_query(ns, '', dns.rdatatype.DNSKEY, {dns.name.root: dnskey_rr})
break
except dns.dnssec.ValidationFailure:
# It's OK as long as one key validates
continue
if not root_rrset:
raise dns.dnssec.ValidationFailure('None of the trust anchors found in DNS')
keys = {dns.name.root: root_rrset}
# top-down verification
parts = url.split('.')
for i in range(len(parts), 0, -1):
sub = '.'.join(parts[i-1:])
name = dns.name.from_text(sub)
# If server is authoritative, don't fetch DNSKEY
query = dns.message.make_query(sub, dns.rdatatype.NS)
response = dns.query.udp(query, ns, 3)
assert response.rcode() == dns.rcode.NOERROR, "query error"
rrset = response.authority[0] if len(response.authority) > 0 else response.answer[0]
rr = rrset[0]
if rr.rdtype == dns.rdatatype.SOA:
continue
# get DNSKEY (self-signed)
rrset = _check_query(ns, sub, dns.rdatatype.DNSKEY, None)
# get DS (signed by parent)
ds_rrset = _check_query(ns, sub, dns.rdatatype.DS, keys)
# verify that a signed DS validates DNSKEY
for ds in ds_rrset:
for dnskey in rrset:
htype = 'SHA256' if ds.digest_type == 2 else 'SHA1'
good_ds = dns.dnssec.make_ds(name, dnskey, htype)
if ds == good_ds:
break
else:
continue
break
else:
raise Exception("DS does not match DNSKEY")
# set key for next iteration
keys = {name: rrset}
# get TXT record (signed by zone)
rrset = _check_query(ns, url, _type, keys)
return rrset
def query(url, rtype):
# 8.8.8.8 is Google's public DNS server
nameservers = ['8.8.8.8']
ns = nameservers[0]
try:
out = _get_and_validate(ns, url, rtype)
validated = True
except Exception as e:
_logger.info(f"DNSSEC error: {repr(e)}")
out = dns.resolver.resolve(url, rtype)
validated = False
return out, validated
| 39.218543 | 418 | 0.700777 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,763 | 0.466565 |
8a681bd50a01e317584f76158f59adbe05396fb6 | 61,870 | py | Python | specs/d3d11.py | ds-hwang/apitrace | b74347ebae0d033a013c4de3efb0e9165e9cea8f | [
"MIT"
] | 1 | 2017-06-07T15:28:36.000Z | 2017-06-07T15:28:36.000Z | specs/d3d11.py | jciehl/apitrace | 0e01acc36de14e9ca7c0ced258767ffb99ac96ea | [
"MIT"
] | null | null | null | specs/d3d11.py | jciehl/apitrace | 0e01acc36de14e9ca7c0ced258767ffb99ac96ea | [
"MIT"
] | 1 | 2021-05-21T18:27:29.000Z | 2021-05-21T18:27:29.000Z | ##########################################################################
#
# Copyright 2012 Jose Fonseca
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
from dxgi import *
from d3dcommon import *
from d3d11sdklayers import *
HRESULT = MAKE_HRESULT([
"D3D11_ERROR_FILE_NOT_FOUND",
"D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS",
"D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS",
"D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD",
"D3DERR_INVALIDCALL",
"D3DERR_WASSTILLDRAWING",
])
ID3D11DepthStencilState = Interface("ID3D11DepthStencilState", ID3D11DeviceChild)
ID3D11BlendState = Interface("ID3D11BlendState", ID3D11DeviceChild)
ID3D11RasterizerState = Interface("ID3D11RasterizerState", ID3D11DeviceChild)
ID3D11Resource = Interface("ID3D11Resource", ID3D11DeviceChild)
ID3D11Buffer = Interface("ID3D11Buffer", ID3D11Resource)
ID3D11Texture1D = Interface("ID3D11Texture1D", ID3D11Resource)
ID3D11Texture2D = Interface("ID3D11Texture2D", ID3D11Resource)
ID3D11Texture3D = Interface("ID3D11Texture3D", ID3D11Resource)
ID3D11View = Interface("ID3D11View", ID3D11DeviceChild)
ID3D11ShaderResourceView = Interface("ID3D11ShaderResourceView", ID3D11View)
ID3D11RenderTargetView = Interface("ID3D11RenderTargetView", ID3D11View)
ID3D11DepthStencilView = Interface("ID3D11DepthStencilView", ID3D11View)
ID3D11UnorderedAccessView = Interface("ID3D11UnorderedAccessView", ID3D11View)
ID3D11VertexShader = Interface("ID3D11VertexShader", ID3D11DeviceChild)
ID3D11HullShader = Interface("ID3D11HullShader", ID3D11DeviceChild)
ID3D11DomainShader = Interface("ID3D11DomainShader", ID3D11DeviceChild)
ID3D11GeometryShader = Interface("ID3D11GeometryShader", ID3D11DeviceChild)
ID3D11PixelShader = Interface("ID3D11PixelShader", ID3D11DeviceChild)
ID3D11ComputeShader = Interface("ID3D11ComputeShader", ID3D11DeviceChild)
ID3D11InputLayout = Interface("ID3D11InputLayout", ID3D11DeviceChild)
ID3D11SamplerState = Interface("ID3D11SamplerState", ID3D11DeviceChild)
ID3D11Asynchronous = Interface("ID3D11Asynchronous", ID3D11DeviceChild)
ID3D11Query = Interface("ID3D11Query", ID3D11Asynchronous)
ID3D11Predicate = Interface("ID3D11Predicate", ID3D11Query)
ID3D11Counter = Interface("ID3D11Counter", ID3D11Asynchronous)
ID3D11ClassInstance = Interface("ID3D11ClassInstance", ID3D11DeviceChild)
ID3D11ClassLinkage = Interface("ID3D11ClassLinkage", ID3D11DeviceChild)
ID3D11CommandList = Interface("ID3D11CommandList", ID3D11DeviceChild)
ID3D11Device = Interface("ID3D11Device", IUnknown)
D3D11_INPUT_CLASSIFICATION = Enum("D3D11_INPUT_CLASSIFICATION", [
"D3D11_INPUT_PER_VERTEX_DATA",
"D3D11_INPUT_PER_INSTANCE_DATA",
])
D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET = FakeEnum(UINT, [
"D3D11_APPEND_ALIGNED_ELEMENT",
])
D3D11_INPUT_ELEMENT_DESC = Struct("D3D11_INPUT_ELEMENT_DESC", [
(LPCSTR, "SemanticName"),
(UINT, "SemanticIndex"),
(DXGI_FORMAT, "Format"),
(UINT, "InputSlot"),
(D3D11_INPUT_ELEMENT_ALIGNED_BYTE_OFFSET, "AlignedByteOffset"),
(D3D11_INPUT_CLASSIFICATION, "InputSlotClass"),
(UINT, "InstanceDataStepRate"),
])
D3D11_FILL_MODE = Enum("D3D11_FILL_MODE", [
"D3D11_FILL_WIREFRAME",
"D3D11_FILL_SOLID",
])
D3D11_PRIMITIVE_TOPOLOGY = Enum("D3D11_PRIMITIVE_TOPOLOGY", [
"D3D11_PRIMITIVE_TOPOLOGY_UNDEFINED",
"D3D11_PRIMITIVE_TOPOLOGY_POINTLIST",
"D3D11_PRIMITIVE_TOPOLOGY_LINELIST",
"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP",
"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST",
"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP",
"D3D11_PRIMITIVE_TOPOLOGY_LINELIST_ADJ",
"D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP_ADJ",
"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST_ADJ",
"D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP_ADJ",
"D3D11_PRIMITIVE_TOPOLOGY_1_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_2_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_3_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_4_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_5_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_6_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_7_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_8_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_9_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_10_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_11_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_12_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_13_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_14_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_15_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_16_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_17_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_18_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_19_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_20_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_21_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_22_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_23_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_24_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_25_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_26_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_27_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_28_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_29_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_30_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_31_CONTROL_POINT_PATCHLIST",
"D3D11_PRIMITIVE_TOPOLOGY_32_CONTROL_POINT_PATCHLIST",
])
D3D11_PRIMITIVE = Enum("D3D11_PRIMITIVE", [
"D3D11_PRIMITIVE_UNDEFINED",
"D3D11_PRIMITIVE_POINT",
"D3D11_PRIMITIVE_LINE",
"D3D11_PRIMITIVE_TRIANGLE",
"D3D11_PRIMITIVE_LINE_ADJ",
"D3D11_PRIMITIVE_TRIANGLE_ADJ",
"D3D11_PRIMITIVE_1_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_2_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_3_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_4_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_5_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_6_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_7_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_8_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_9_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_10_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_11_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_12_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_13_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_14_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_15_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_16_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_17_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_18_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_19_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_20_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_21_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_22_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_23_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_24_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_25_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_26_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_27_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_28_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_29_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_30_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_31_CONTROL_POINT_PATCH",
"D3D11_PRIMITIVE_32_CONTROL_POINT_PATCH",
])
D3D11_CULL_MODE = Enum("D3D11_CULL_MODE", [
"D3D11_CULL_NONE",
"D3D11_CULL_FRONT",
"D3D11_CULL_BACK",
])
D3D11_SO_DECLARATION_ENTRY = Struct("D3D11_SO_DECLARATION_ENTRY", [
(UINT, "Stream"),
(LPCSTR, "SemanticName"),
(UINT, "SemanticIndex"),
(BYTE, "StartComponent"),
(BYTE, "ComponentCount"),
(BYTE, "OutputSlot"),
])
D3D11_VIEWPORT = Struct("D3D11_VIEWPORT", [
(FLOAT, "TopLeftX"),
(FLOAT, "TopLeftY"),
(FLOAT, "Width"),
(FLOAT, "Height"),
(FLOAT, "MinDepth"),
(FLOAT, "MaxDepth"),
])
D3D11_RESOURCE_DIMENSION = Enum("D3D11_RESOURCE_DIMENSION", [
"D3D11_RESOURCE_DIMENSION_UNKNOWN",
"D3D11_RESOURCE_DIMENSION_BUFFER",
"D3D11_RESOURCE_DIMENSION_TEXTURE1D",
"D3D11_RESOURCE_DIMENSION_TEXTURE2D",
"D3D11_RESOURCE_DIMENSION_TEXTURE3D",
])
D3D11_SRV_DIMENSION = Enum("D3D11_SRV_DIMENSION", [
"D3D11_SRV_DIMENSION_UNKNOWN",
"D3D11_SRV_DIMENSION_BUFFER",
"D3D11_SRV_DIMENSION_TEXTURE1D",
"D3D11_SRV_DIMENSION_TEXTURE1DARRAY",
"D3D11_SRV_DIMENSION_TEXTURE2D",
"D3D11_SRV_DIMENSION_TEXTURE2DARRAY",
"D3D11_SRV_DIMENSION_TEXTURE2DMS",
"D3D11_SRV_DIMENSION_TEXTURE2DMSARRAY",
"D3D11_SRV_DIMENSION_TEXTURE3D",
"D3D11_SRV_DIMENSION_TEXTURECUBE",
"D3D11_SRV_DIMENSION_TEXTURECUBEARRAY",
"D3D11_SRV_DIMENSION_BUFFEREX",
])
D3D11_DSV_DIMENSION = Enum("D3D11_DSV_DIMENSION", [
"D3D11_DSV_DIMENSION_UNKNOWN",
"D3D11_DSV_DIMENSION_TEXTURE1D",
"D3D11_DSV_DIMENSION_TEXTURE1DARRAY",
"D3D11_DSV_DIMENSION_TEXTURE2D",
"D3D11_DSV_DIMENSION_TEXTURE2DARRAY",
"D3D11_DSV_DIMENSION_TEXTURE2DMS",
"D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY",
])
D3D11_RTV_DIMENSION = Enum("D3D11_RTV_DIMENSION", [
"D3D11_RTV_DIMENSION_UNKNOWN",
"D3D11_RTV_DIMENSION_BUFFER",
"D3D11_RTV_DIMENSION_TEXTURE1D",
"D3D11_RTV_DIMENSION_TEXTURE1DARRAY",
"D3D11_RTV_DIMENSION_TEXTURE2D",
"D3D11_RTV_DIMENSION_TEXTURE2DARRAY",
"D3D11_RTV_DIMENSION_TEXTURE2DMS",
"D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY",
"D3D11_RTV_DIMENSION_TEXTURE3D",
])
D3D11_UAV_DIMENSION = Enum("D3D11_UAV_DIMENSION", [
"D3D11_UAV_DIMENSION_UNKNOWN",
"D3D11_UAV_DIMENSION_BUFFER",
"D3D11_UAV_DIMENSION_TEXTURE1D",
"D3D11_UAV_DIMENSION_TEXTURE1DARRAY",
"D3D11_UAV_DIMENSION_TEXTURE2D",
"D3D11_UAV_DIMENSION_TEXTURE2DARRAY",
"D3D11_UAV_DIMENSION_TEXTURE3D",
])
D3D11_USAGE = Enum("D3D11_USAGE", [
"D3D11_USAGE_DEFAULT",
"D3D11_USAGE_IMMUTABLE",
"D3D11_USAGE_DYNAMIC",
"D3D11_USAGE_STAGING",
])
D3D11_BIND_FLAG = Flags(UINT, [
"D3D11_BIND_VERTEX_BUFFER",
"D3D11_BIND_INDEX_BUFFER",
"D3D11_BIND_CONSTANT_BUFFER",
"D3D11_BIND_SHADER_RESOURCE",
"D3D11_BIND_STREAM_OUTPUT",
"D3D11_BIND_RENDER_TARGET",
"D3D11_BIND_DEPTH_STENCIL",
"D3D11_BIND_UNORDERED_ACCESS",
])
D3D11_CPU_ACCESS_FLAG = Flags(UINT, [
"D3D11_CPU_ACCESS_WRITE",
"D3D11_CPU_ACCESS_READ",
])
D3D11_RESOURCE_MISC_FLAG = Flags(UINT, [
"D3D11_RESOURCE_MISC_GENERATE_MIPS",
"D3D11_RESOURCE_MISC_SHARED",
"D3D11_RESOURCE_MISC_TEXTURECUBE",
"D3D11_RESOURCE_MISC_DRAWINDIRECT_ARGS",
"D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS",
"D3D11_RESOURCE_MISC_BUFFER_STRUCTURED",
"D3D11_RESOURCE_MISC_RESOURCE_CLAMP",
"D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX",
"D3D11_RESOURCE_MISC_GDI_COMPATIBLE",
])
D3D11_MAP = Enum("D3D11_MAP", [
"D3D11_MAP_READ",
"D3D11_MAP_WRITE",
"D3D11_MAP_READ_WRITE",
"D3D11_MAP_WRITE_DISCARD",
"D3D11_MAP_WRITE_NO_OVERWRITE",
])
D3D11_MAP_FLAG = Flags(UINT, [
"D3D11_MAP_FLAG_DO_NOT_WAIT",
])
D3D11_RAISE_FLAG = Flags(UINT, [
"D3D11_RAISE_FLAG_DRIVER_INTERNAL_ERROR",
])
D3D11_CLEAR_FLAG = Flags(UINT, [
"D3D11_CLEAR_DEPTH",
"D3D11_CLEAR_STENCIL",
])
D3D11_RECT = Alias("D3D11_RECT", RECT)
D3D11_BOX = Struct("D3D11_BOX", [
(UINT, "left"),
(UINT, "top"),
(UINT, "front"),
(UINT, "right"),
(UINT, "bottom"),
(UINT, "back"),
])
ID3D11DeviceChild.methods += [
StdMethod(Void, "GetDevice", [Out(Pointer(ObjPointer(ID3D11Device)), "ppDevice")]),
StdMethod(HRESULT, "GetPrivateData", [(REFGUID, "guid"), Out(Pointer(UINT), "pDataSize"), Out(OpaquePointer(Void), "pData")]),
StdMethod(HRESULT, "SetPrivateData", [(REFGUID, "guid"), (UINT, "DataSize"), (OpaqueBlob(Const(Void), "DataSize"), "pData")]),
StdMethod(HRESULT, "SetPrivateDataInterface", [(REFGUID, "guid"), (OpaquePointer(Const(IUnknown)), "pData")]),
]
D3D11_COMPARISON_FUNC = Enum("D3D11_COMPARISON_FUNC", [
"D3D11_COMPARISON_NEVER",
"D3D11_COMPARISON_LESS",
"D3D11_COMPARISON_EQUAL",
"D3D11_COMPARISON_LESS_EQUAL",
"D3D11_COMPARISON_GREATER",
"D3D11_COMPARISON_NOT_EQUAL",
"D3D11_COMPARISON_GREATER_EQUAL",
"D3D11_COMPARISON_ALWAYS",
])
D3D11_DEPTH_WRITE_MASK = Enum("D3D11_DEPTH_WRITE_MASK", [
"D3D11_DEPTH_WRITE_MASK_ZERO",
"D3D11_DEPTH_WRITE_MASK_ALL",
])
D3D11_STENCIL_OP = Enum("D3D11_STENCIL_OP", [
"D3D11_STENCIL_OP_KEEP",
"D3D11_STENCIL_OP_ZERO",
"D3D11_STENCIL_OP_REPLACE",
"D3D11_STENCIL_OP_INCR_SAT",
"D3D11_STENCIL_OP_DECR_SAT",
"D3D11_STENCIL_OP_INVERT",
"D3D11_STENCIL_OP_INCR",
"D3D11_STENCIL_OP_DECR",
])
D3D11_DEPTH_STENCILOP_DESC = Struct("D3D11_DEPTH_STENCILOP_DESC", [
(D3D11_STENCIL_OP, "StencilFailOp"),
(D3D11_STENCIL_OP, "StencilDepthFailOp"),
(D3D11_STENCIL_OP, "StencilPassOp"),
(D3D11_COMPARISON_FUNC, "StencilFunc"),
])
D3D11_DEPTH_STENCIL_DESC = Struct("D3D11_DEPTH_STENCIL_DESC", [
(BOOL, "DepthEnable"),
(D3D11_DEPTH_WRITE_MASK, "DepthWriteMask"),
(D3D11_COMPARISON_FUNC, "DepthFunc"),
(BOOL, "StencilEnable"),
(UINT8, "StencilReadMask"),
(UINT8, "StencilWriteMask"),
(D3D11_DEPTH_STENCILOP_DESC, "FrontFace"),
(D3D11_DEPTH_STENCILOP_DESC, "BackFace"),
])
ID3D11DepthStencilState.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_DEPTH_STENCIL_DESC), "pDesc")]),
]
D3D11_BLEND = Enum("D3D11_BLEND", [
"D3D11_BLEND_ZERO",
"D3D11_BLEND_ONE",
"D3D11_BLEND_SRC_COLOR",
"D3D11_BLEND_INV_SRC_COLOR",
"D3D11_BLEND_SRC_ALPHA",
"D3D11_BLEND_INV_SRC_ALPHA",
"D3D11_BLEND_DEST_ALPHA",
"D3D11_BLEND_INV_DEST_ALPHA",
"D3D11_BLEND_DEST_COLOR",
"D3D11_BLEND_INV_DEST_COLOR",
"D3D11_BLEND_SRC_ALPHA_SAT",
"D3D11_BLEND_BLEND_FACTOR",
"D3D11_BLEND_INV_BLEND_FACTOR",
"D3D11_BLEND_SRC1_COLOR",
"D3D11_BLEND_INV_SRC1_COLOR",
"D3D11_BLEND_SRC1_ALPHA",
"D3D11_BLEND_INV_SRC1_ALPHA",
])
D3D11_BLEND_OP = Enum("D3D11_BLEND_OP", [
"D3D11_BLEND_OP_ADD",
"D3D11_BLEND_OP_SUBTRACT",
"D3D11_BLEND_OP_REV_SUBTRACT",
"D3D11_BLEND_OP_MIN",
"D3D11_BLEND_OP_MAX",
])
D3D11_COLOR_WRITE_ENABLE = Enum("D3D11_COLOR_WRITE_ENABLE", [
"D3D11_COLOR_WRITE_ENABLE_ALL",
"D3D11_COLOR_WRITE_ENABLE_RED",
"D3D11_COLOR_WRITE_ENABLE_GREEN",
"D3D11_COLOR_WRITE_ENABLE_BLUE",
"D3D11_COLOR_WRITE_ENABLE_ALPHA",
])
D3D11_RENDER_TARGET_BLEND_DESC = Struct("D3D11_RENDER_TARGET_BLEND_DESC", [
(BOOL, "BlendEnable"),
(D3D11_BLEND, "SrcBlend"),
(D3D11_BLEND, "DestBlend"),
(D3D11_BLEND_OP, "BlendOp"),
(D3D11_BLEND, "SrcBlendAlpha"),
(D3D11_BLEND, "DestBlendAlpha"),
(D3D11_BLEND_OP, "BlendOpAlpha"),
(UINT8, "RenderTargetWriteMask"),
])
D3D11_BLEND_DESC = Struct("D3D11_BLEND_DESC", [
(BOOL, "AlphaToCoverageEnable"),
(BOOL, "IndependentBlendEnable"),
(Array(D3D11_RENDER_TARGET_BLEND_DESC, 8), "RenderTarget"),
])
ID3D11BlendState.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_BLEND_DESC), "pDesc")]),
]
D3D11_RASTERIZER_DESC = Struct("D3D11_RASTERIZER_DESC", [
(D3D11_FILL_MODE, "FillMode"),
(D3D11_CULL_MODE, "CullMode"),
(BOOL, "FrontCounterClockwise"),
(INT, "DepthBias"),
(FLOAT, "DepthBiasClamp"),
(FLOAT, "SlopeScaledDepthBias"),
(BOOL, "DepthClipEnable"),
(BOOL, "ScissorEnable"),
(BOOL, "MultisampleEnable"),
(BOOL, "AntialiasedLineEnable"),
])
ID3D11RasterizerState.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_RASTERIZER_DESC), "pDesc")]),
]
D3D11_SUBRESOURCE_DATA = Struct("D3D11_SUBRESOURCE_DATA", [
(OpaquePointer(Const(Void)), "pSysMem"),
(UINT, "SysMemPitch"),
(UINT, "SysMemSlicePitch"),
])
D3D11_MAPPED_SUBRESOURCE = Struct("D3D11_MAPPED_SUBRESOURCE", [
(OpaquePointer(Void), "pData"),
(UINT, "RowPitch"),
(UINT, "DepthPitch"),
])
ID3D11Resource.methods += [
StdMethod(Void, "GetType", [Out(Pointer(D3D11_RESOURCE_DIMENSION), "pResourceDimension")]),
StdMethod(Void, "SetEvictionPriority", [(UINT, "EvictionPriority")]),
StdMethod(UINT, "GetEvictionPriority", []),
]
D3D11_BUFFER_DESC = Struct("D3D11_BUFFER_DESC", [
(UINT, "ByteWidth"),
(D3D11_USAGE, "Usage"),
(D3D11_BIND_FLAG, "BindFlags"),
(D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"),
(D3D11_RESOURCE_MISC_FLAG, "MiscFlags"),
(UINT, "StructureByteStride"),
])
ID3D11Buffer.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_BUFFER_DESC), "pDesc")]),
]
D3D11_TEXTURE1D_DESC = Struct("D3D11_TEXTURE1D_DESC", [
(UINT, "Width"),
(UINT, "MipLevels"),
(UINT, "ArraySize"),
(DXGI_FORMAT, "Format"),
(D3D11_USAGE, "Usage"),
(D3D11_BIND_FLAG, "BindFlags"),
(D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"),
(D3D11_RESOURCE_MISC_FLAG, "MiscFlags"),
])
ID3D11Texture1D.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_TEXTURE1D_DESC), "pDesc")]),
]
D3D11_TEXTURE2D_DESC = Struct("D3D11_TEXTURE2D_DESC", [
(UINT, "Width"),
(UINT, "Height"),
(UINT, "MipLevels"),
(UINT, "ArraySize"),
(DXGI_FORMAT, "Format"),
(DXGI_SAMPLE_DESC, "SampleDesc"),
(D3D11_USAGE, "Usage"),
(D3D11_BIND_FLAG, "BindFlags"),
(D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"),
(D3D11_RESOURCE_MISC_FLAG, "MiscFlags"),
])
ID3D11Texture2D.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_TEXTURE2D_DESC), "pDesc")]),
]
D3D11_TEXTURE3D_DESC = Struct("D3D11_TEXTURE3D_DESC", [
(UINT, "Width"),
(UINT, "Height"),
(UINT, "Depth"),
(UINT, "MipLevels"),
(DXGI_FORMAT, "Format"),
(D3D11_USAGE, "Usage"),
(D3D11_BIND_FLAG, "BindFlags"),
(D3D11_CPU_ACCESS_FLAG, "CPUAccessFlags"),
(D3D11_RESOURCE_MISC_FLAG, "MiscFlags"),
])
ID3D11Texture3D.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_TEXTURE3D_DESC), "pDesc")]),
]
D3D11_TEXTURECUBE_FACE = Enum("D3D11_TEXTURECUBE_FACE", [
"D3D11_TEXTURECUBE_FACE_POSITIVE_X",
"D3D11_TEXTURECUBE_FACE_NEGATIVE_X",
"D3D11_TEXTURECUBE_FACE_POSITIVE_Y",
"D3D11_TEXTURECUBE_FACE_NEGATIVE_Y",
"D3D11_TEXTURECUBE_FACE_POSITIVE_Z",
"D3D11_TEXTURECUBE_FACE_NEGATIVE_Z",
])
ID3D11View.methods += [
StdMethod(Void, "GetResource", [Out(Pointer(ObjPointer(ID3D11Resource)), "ppResource")]),
]
D3D11_BUFFER_SRV = Struct("D3D11_BUFFER_SRV", [
(Union(None, [(UINT, "FirstElement"), (UINT, "ElementOffset")]), None),
(Union(None, [(UINT, "NumElements"), (UINT, "ElementWidth")]), None),
])
D3D11_BUFFEREX_SRV_FLAG = Flags(UINT, [
"D3D11_BUFFEREX_SRV_FLAG_RAW",
])
D3D11_BUFFEREX_SRV = Struct("D3D11_BUFFEREX_SRV", [
(UINT, "FirstElement"),
(UINT, "NumElements"),
(D3D11_BUFFEREX_SRV_FLAG, "Flags"),
])
D3D11_TEX1D_SRV = Struct("D3D11_TEX1D_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
])
D3D11_TEX1D_ARRAY_SRV = Struct("D3D11_TEX1D_ARRAY_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2D_SRV = Struct("D3D11_TEX2D_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
])
D3D11_TEX2D_ARRAY_SRV = Struct("D3D11_TEX2D_ARRAY_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX3D_SRV = Struct("D3D11_TEX3D_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
])
D3D11_TEXCUBE_SRV = Struct("D3D11_TEXCUBE_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
])
D3D11_TEXCUBE_ARRAY_SRV = Struct("D3D11_TEXCUBE_ARRAY_SRV", [
(UINT, "MostDetailedMip"),
(UINT, "MipLevels"),
(UINT, "First2DArrayFace"),
(UINT, "NumCubes"),
])
D3D11_TEX2DMS_SRV = Struct("D3D11_TEX2DMS_SRV", [
(UINT, "UnusedField_NothingToDefine"),
])
D3D11_TEX2DMS_ARRAY_SRV = Struct("D3D11_TEX2DMS_ARRAY_SRV", [
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_SHADER_RESOURCE_VIEW_DESC = Struct("D3D11_SHADER_RESOURCE_VIEW_DESC", [
(DXGI_FORMAT, "Format"),
(D3D11_SRV_DIMENSION, "ViewDimension"),
(Union(None, [
(D3D11_BUFFER_SRV, "Buffer"),
(D3D11_TEX1D_SRV, "Texture1D"),
(D3D11_TEX1D_ARRAY_SRV, "Texture1DArray"),
(D3D11_TEX2D_SRV, "Texture2D"),
(D3D11_TEX2D_ARRAY_SRV, "Texture2DArray"),
(D3D11_TEX2DMS_SRV, "Texture2DMS"),
(D3D11_TEX2DMS_ARRAY_SRV, "Texture2DMSArray"),
(D3D11_TEX3D_SRV, "Texture3D"),
(D3D11_TEXCUBE_SRV, "TextureCube"),
(D3D11_TEXCUBE_ARRAY_SRV, "TextureCubeArray"),
(D3D11_BUFFEREX_SRV, "BufferEx"),
]), None),
])
ID3D11ShaderResourceView.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_SHADER_RESOURCE_VIEW_DESC), "pDesc")]),
]
D3D11_BUFFER_RTV = Struct("D3D11_BUFFER_RTV", [
(Union(None, [(UINT, "FirstElement"), (UINT, "ElementOffset")]), None),
(Union(None, [(UINT, "NumElements"), (UINT, "ElementWidth")]), None),
])
D3D11_TEX1D_RTV = Struct("D3D11_TEX1D_RTV", [
(UINT, "MipSlice"),
])
D3D11_TEX1D_ARRAY_RTV = Struct("D3D11_TEX1D_ARRAY_RTV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2D_RTV = Struct("D3D11_TEX2D_RTV", [
(UINT, "MipSlice"),
])
D3D11_TEX2DMS_RTV = Struct("D3D11_TEX2DMS_RTV", [
(UINT, "UnusedField_NothingToDefine"),
])
D3D11_TEX2D_ARRAY_RTV = Struct("D3D11_TEX2D_ARRAY_RTV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2DMS_ARRAY_RTV = Struct("D3D11_TEX2DMS_ARRAY_RTV", [
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX3D_RTV = Struct("D3D11_TEX3D_RTV", [
(UINT, "MipSlice"),
(UINT, "FirstWSlice"),
(UINT, "WSize"),
])
D3D11_RENDER_TARGET_VIEW_DESC = Struct("D3D11_RENDER_TARGET_VIEW_DESC", [
(DXGI_FORMAT, "Format"),
(D3D11_RTV_DIMENSION, "ViewDimension"),
(Union(None, [
(D3D11_BUFFER_RTV, "Buffer"),
(D3D11_TEX1D_RTV, "Texture1D"),
(D3D11_TEX1D_ARRAY_RTV, "Texture1DArray"),
(D3D11_TEX2D_RTV, "Texture2D"),
(D3D11_TEX2D_ARRAY_RTV, "Texture2DArray"),
(D3D11_TEX2DMS_RTV, "Texture2DMS"),
(D3D11_TEX2DMS_ARRAY_RTV, "Texture2DMSArray"),
(D3D11_TEX3D_RTV, "Texture3D"),
]), None),
])
ID3D11RenderTargetView.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_RENDER_TARGET_VIEW_DESC), "pDesc")]),
]
D3D11_TEX1D_DSV = Struct("D3D11_TEX1D_DSV", [
(UINT, "MipSlice"),
])
D3D11_TEX1D_ARRAY_DSV = Struct("D3D11_TEX1D_ARRAY_DSV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2D_DSV = Struct("D3D11_TEX2D_DSV", [
(UINT, "MipSlice"),
])
D3D11_TEX2D_ARRAY_DSV = Struct("D3D11_TEX2D_ARRAY_DSV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2DMS_DSV = Struct("D3D11_TEX2DMS_DSV", [
(UINT, "UnusedField_NothingToDefine"),
])
D3D11_TEX2DMS_ARRAY_DSV = Struct("D3D11_TEX2DMS_ARRAY_DSV", [
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_DSV_FLAG = Flags(UINT, [
"D3D11_DSV_READ_ONLY_DEPTH",
"D3D11_DSV_READ_ONLY_STENCIL",
])
D3D11_DEPTH_STENCIL_VIEW_DESC = Struct("D3D11_DEPTH_STENCIL_VIEW_DESC", [
(DXGI_FORMAT, "Format"),
(D3D11_DSV_DIMENSION, "ViewDimension"),
(D3D11_DSV_FLAG, "Flags"),
(Union(None, [
(D3D11_TEX1D_DSV, "Texture1D"),
(D3D11_TEX1D_ARRAY_DSV, "Texture1DArray"),
(D3D11_TEX2D_DSV, "Texture2D"),
(D3D11_TEX2D_ARRAY_DSV, "Texture2DArray"),
(D3D11_TEX2DMS_DSV, "Texture2DMS"),
(D3D11_TEX2DMS_ARRAY_DSV, "Texture2DMSArray"),
]), None),
])
ID3D11DepthStencilView.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_DEPTH_STENCIL_VIEW_DESC), "pDesc")]),
]
D3D11_BUFFER_UAV_FLAG = Flags(UINT, [
"D3D11_BUFFER_UAV_FLAG_RAW",
"D3D11_BUFFER_UAV_FLAG_APPEND",
"D3D11_BUFFER_UAV_FLAG_COUNTER",
])
D3D11_BUFFER_UAV = Struct("D3D11_BUFFER_UAV", [
(UINT, "FirstElement"),
(UINT, "NumElements"),
(D3D11_BUFFER_UAV_FLAG, "Flags"),
])
D3D11_TEX1D_UAV = Struct("D3D11_TEX1D_UAV", [
(UINT, "MipSlice"),
])
D3D11_TEX1D_ARRAY_UAV = Struct("D3D11_TEX1D_ARRAY_UAV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX2D_UAV = Struct("D3D11_TEX2D_UAV", [
(UINT, "MipSlice"),
])
D3D11_TEX2D_ARRAY_UAV = Struct("D3D11_TEX2D_ARRAY_UAV", [
(UINT, "MipSlice"),
(UINT, "FirstArraySlice"),
(UINT, "ArraySize"),
])
D3D11_TEX3D_UAV = Struct("D3D11_TEX3D_UAV", [
(UINT, "MipSlice"),
(UINT, "FirstWSlice"),
(UINT, "WSize"),
])
D3D11_UNORDERED_ACCESS_VIEW_DESC = Struct("D3D11_UNORDERED_ACCESS_VIEW_DESC", [
(DXGI_FORMAT, "Format"),
(D3D11_UAV_DIMENSION, "ViewDimension"),
(Union(None, [
(D3D11_BUFFER_UAV, "Buffer"),
(D3D11_TEX1D_UAV, "Texture1D"),
(D3D11_TEX1D_ARRAY_UAV, "Texture1DArray"),
(D3D11_TEX2D_UAV, "Texture2D"),
(D3D11_TEX2D_ARRAY_UAV, "Texture2DArray"),
(D3D11_TEX3D_UAV, "Texture3D"),
]), None),
])
ID3D11UnorderedAccessView.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_UNORDERED_ACCESS_VIEW_DESC), "pDesc")]),
]
D3D11_FILTER = Enum("D3D11_FILTER", [
"D3D11_FILTER_MIN_MAG_MIP_POINT",
"D3D11_FILTER_MIN_MAG_POINT_MIP_LINEAR",
"D3D11_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT",
"D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR",
"D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINT",
"D3D11_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR",
"D3D11_FILTER_MIN_MAG_LINEAR_MIP_POINT",
"D3D11_FILTER_MIN_MAG_MIP_LINEAR",
"D3D11_FILTER_ANISOTROPIC",
"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_POINT",
"D3D11_FILTER_COMPARISON_MIN_MAG_POINT_MIP_LINEAR",
"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_LINEAR_MIP_POINT",
"D3D11_FILTER_COMPARISON_MIN_POINT_MAG_MIP_LINEAR",
"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_MIP_POINT",
"D3D11_FILTER_COMPARISON_MIN_LINEAR_MAG_POINT_MIP_LINEAR",
"D3D11_FILTER_COMPARISON_MIN_MAG_LINEAR_MIP_POINT",
"D3D11_FILTER_COMPARISON_MIN_MAG_MIP_LINEAR",
"D3D11_FILTER_COMPARISON_ANISOTROPIC",
])
D3D11_FILTER_TYPE = Enum("D3D11_FILTER_TYPE", [
"D3D11_FILTER_TYPE_POINT",
"D3D11_FILTER_TYPE_LINEAR",
])
D3D11_TEXTURE_ADDRESS_MODE = Enum("D3D11_TEXTURE_ADDRESS_MODE", [
"D3D11_TEXTURE_ADDRESS_WRAP",
"D3D11_TEXTURE_ADDRESS_MIRROR",
"D3D11_TEXTURE_ADDRESS_CLAMP",
"D3D11_TEXTURE_ADDRESS_BORDER",
"D3D11_TEXTURE_ADDRESS_MIRROR_ONCE",
])
D3D11_SAMPLER_DESC = Struct("D3D11_SAMPLER_DESC", [
(D3D11_FILTER, "Filter"),
(D3D11_TEXTURE_ADDRESS_MODE, "AddressU"),
(D3D11_TEXTURE_ADDRESS_MODE, "AddressV"),
(D3D11_TEXTURE_ADDRESS_MODE, "AddressW"),
(FLOAT, "MipLODBias"),
(UINT, "MaxAnisotropy"),
(D3D11_COMPARISON_FUNC, "ComparisonFunc"),
(Array(FLOAT, 4), "BorderColor"),
(FLOAT, "MinLOD"),
(FLOAT, "MaxLOD"),
])
ID3D11SamplerState.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_SAMPLER_DESC), "pDesc")]),
]
D3D11_FORMAT_SUPPORT = Flags(UINT, [
"D3D11_FORMAT_SUPPORT_BUFFER",
"D3D11_FORMAT_SUPPORT_IA_VERTEX_BUFFER",
"D3D11_FORMAT_SUPPORT_IA_INDEX_BUFFER",
"D3D11_FORMAT_SUPPORT_SO_BUFFER",
"D3D11_FORMAT_SUPPORT_TEXTURE1D",
"D3D11_FORMAT_SUPPORT_TEXTURE2D",
"D3D11_FORMAT_SUPPORT_TEXTURE3D",
"D3D11_FORMAT_SUPPORT_TEXTURECUBE",
"D3D11_FORMAT_SUPPORT_SHADER_LOAD",
"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE",
"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_COMPARISON",
"D3D11_FORMAT_SUPPORT_SHADER_SAMPLE_MONO_TEXT",
"D3D11_FORMAT_SUPPORT_MIP",
"D3D11_FORMAT_SUPPORT_MIP_AUTOGEN",
"D3D11_FORMAT_SUPPORT_RENDER_TARGET",
"D3D11_FORMAT_SUPPORT_BLENDABLE",
"D3D11_FORMAT_SUPPORT_DEPTH_STENCIL",
"D3D11_FORMAT_SUPPORT_CPU_LOCKABLE",
"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RESOLVE",
"D3D11_FORMAT_SUPPORT_DISPLAY",
"D3D11_FORMAT_SUPPORT_CAST_WITHIN_BIT_LAYOUT",
"D3D11_FORMAT_SUPPORT_MULTISAMPLE_RENDERTARGET",
"D3D11_FORMAT_SUPPORT_MULTISAMPLE_LOAD",
"D3D11_FORMAT_SUPPORT_SHADER_GATHER",
"D3D11_FORMAT_SUPPORT_BACK_BUFFER_CAST",
"D3D11_FORMAT_SUPPORT_TYPED_UNORDERED_ACCESS_VIEW",
"D3D11_FORMAT_SUPPORT_SHADER_GATHER_COMPARISON",
])
D3D11_FORMAT_SUPPORT2 = Enum("D3D11_FORMAT_SUPPORT2", [
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_ADD",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_BITWISE_OPS",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_COMPARE_STORE_OR_COMPARE_EXCHANGE",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_EXCHANGE",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_SIGNED_MIN_OR_MAX",
"D3D11_FORMAT_SUPPORT2_UAV_ATOMIC_UNSIGNED_MIN_OR_MAX",
"D3D11_FORMAT_SUPPORT2_UAV_TYPED_LOAD",
"D3D11_FORMAT_SUPPORT2_UAV_TYPED_STORE",
])
ID3D11Asynchronous.methods += [
StdMethod(UINT, "GetDataSize", []),
]
D3D11_ASYNC_GETDATA_FLAG = Flags(UINT, [
"D3D11_ASYNC_GETDATA_DONOTFLUSH",
])
D3D11_QUERY = Enum("D3D11_QUERY", [
"D3D11_QUERY_EVENT",
"D3D11_QUERY_OCCLUSION",
"D3D11_QUERY_TIMESTAMP",
"D3D11_QUERY_TIMESTAMP_DISJOINT",
"D3D11_QUERY_PIPELINE_STATISTICS",
"D3D11_QUERY_OCCLUSION_PREDICATE",
"D3D11_QUERY_SO_STATISTICS",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE",
"D3D11_QUERY_SO_STATISTICS_STREAM0",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM0",
"D3D11_QUERY_SO_STATISTICS_STREAM1",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM1",
"D3D11_QUERY_SO_STATISTICS_STREAM2",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM2",
"D3D11_QUERY_SO_STATISTICS_STREAM3",
"D3D11_QUERY_SO_OVERFLOW_PREDICATE_STREAM3",
])
D3D11_QUERY_MISC_FLAG = Flags(UINT, [
"D3D11_QUERY_MISC_PREDICATEHINT",
])
D3D11_QUERY_DESC = Struct("D3D11_QUERY_DESC", [
(D3D11_QUERY, "Query"),
(D3D11_QUERY_MISC_FLAG, "MiscFlags"),
])
ID3D11Query.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_QUERY_DESC), "pDesc")]),
]
D3D11_QUERY_DATA_TIMESTAMP_DISJOINT = Struct("D3D11_QUERY_DATA_TIMESTAMP_DISJOINT", [
(UINT64, "Frequency"),
(BOOL, "Disjoint"),
])
D3D11_QUERY_DATA_PIPELINE_STATISTICS = Struct("D3D11_QUERY_DATA_PIPELINE_STATISTICS", [
(UINT64, "IAVertices"),
(UINT64, "IAPrimitives"),
(UINT64, "VSInvocations"),
(UINT64, "GSInvocations"),
(UINT64, "GSPrimitives"),
(UINT64, "CInvocations"),
(UINT64, "CPrimitives"),
(UINT64, "PSInvocations"),
(UINT64, "HSInvocations"),
(UINT64, "DSInvocations"),
(UINT64, "CSInvocations"),
])
D3D11_QUERY_DATA_SO_STATISTICS = Struct("D3D11_QUERY_DATA_SO_STATISTICS", [
(UINT64, "NumPrimitivesWritten"),
(UINT64, "PrimitivesStorageNeeded"),
])
D3D11_COUNTER = Enum("D3D11_COUNTER", [
"D3D11_COUNTER_DEVICE_DEPENDENT_0",
])
D3D11_COUNTER_TYPE = Enum("D3D11_COUNTER_TYPE", [
"D3D11_COUNTER_TYPE_FLOAT32",
"D3D11_COUNTER_TYPE_UINT16",
"D3D11_COUNTER_TYPE_UINT32",
"D3D11_COUNTER_TYPE_UINT64",
])
D3D11_COUNTER_DESC = Struct("D3D11_COUNTER_DESC", [
(D3D11_COUNTER, "Counter"),
(UINT, "MiscFlags"),
])
D3D11_COUNTER_INFO = Struct("D3D11_COUNTER_INFO", [
(D3D11_COUNTER, "LastDeviceDependentCounter"),
(UINT, "NumSimultaneousCounters"),
(UINT8, "NumDetectableParallelUnits"),
])
ID3D11Counter.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_COUNTER_DESC), "pDesc")]),
]
D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS = Enum("D3D11_STANDARD_MULTISAMPLE_QUALITY_LEVELS", [
"D3D11_STANDARD_MULTISAMPLE_PATTERN",
"D3D11_CENTER_MULTISAMPLE_PATTERN",
])
D3D11_DEVICE_CONTEXT_TYPE = Enum("D3D11_DEVICE_CONTEXT_TYPE", [
"D3D11_DEVICE_CONTEXT_IMMEDIATE",
"D3D11_DEVICE_CONTEXT_DEFERRED",
])
D3D11_CLASS_INSTANCE_DESC = Struct("D3D11_CLASS_INSTANCE_DESC", [
(UINT, "InstanceId"),
(UINT, "InstanceIndex"),
(UINT, "TypeId"),
(UINT, "ConstantBuffer"),
(UINT, "BaseConstantBufferOffset"),
(UINT, "BaseTexture"),
(UINT, "BaseSampler"),
(BOOL, "Created"),
])
ID3D11ClassInstance.methods += [
StdMethod(Void, "GetClassLinkage", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), "ppLinkage")]),
StdMethod(Void, "GetDesc", [Out(Pointer(D3D11_CLASS_INSTANCE_DESC), "pDesc")]),
StdMethod(Void, "GetInstanceName", [Out(LPSTR, "pInstanceName"), Out(Pointer(SIZE_T), "pBufferLength")]),
StdMethod(Void, "GetTypeName", [Out(LPSTR, "pTypeName"), Out(Pointer(SIZE_T), "pBufferLength")]),
]
ID3D11ClassLinkage.methods += [
StdMethod(HRESULT, "GetClassInstance", [(LPCSTR, "pClassInstanceName"), (UINT, "InstanceIndex"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), "ppInstance")]),
StdMethod(HRESULT, "CreateClassInstance", [(LPCSTR, "pClassTypeName"), (UINT, "ConstantBufferOffset"), (UINT, "ConstantVectorOffset"), (UINT, "TextureOffset"), (UINT, "SamplerOffset"), Out(Pointer(ObjPointer(ID3D11ClassInstance)), "ppInstance")]),
]
ID3D11CommandList.methods += [
StdMethod(UINT, "GetContextFlags", []),
]
D3D11_FEATURE_DATA_THREADING = Struct("D3D11_FEATURE_DATA_THREADING", [
(BOOL, "DriverConcurrentCreates"),
(BOOL, "DriverCommandLists"),
])
D3D11_FEATURE_DATA_DOUBLES = Struct("D3D11_FEATURE_DATA_DOUBLES", [
(BOOL, "DoublePrecisionFloatShaderOps"),
])
D3D11_FEATURE_DATA_FORMAT_SUPPORT = Struct("D3D11_FEATURE_DATA_FORMAT_SUPPORT", [
(DXGI_FORMAT, "InFormat"),
(D3D11_FORMAT_SUPPORT, "OutFormatSupport"),
])
D3D11_FEATURE_DATA_FORMAT_SUPPORT2 = Struct("D3D11_FEATURE_DATA_FORMAT_SUPPORT2", [
(DXGI_FORMAT, "InFormat"),
(D3D11_FORMAT_SUPPORT2, "OutFormatSupport2"),
])
D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS = Struct("D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS", [
(BOOL, "ComputeShaders_Plus_RawAndStructuredBuffers_Via_Shader_4_x"),
])
D3D11_FEATURE, D3D11_FEATURE_DATA = EnumPolymorphic("D3D11_FEATURE", "Feature", [
("D3D11_FEATURE_THREADING", Pointer(D3D11_FEATURE_DATA_THREADING)),
("D3D11_FEATURE_DOUBLES", Pointer(D3D11_FEATURE_DATA_DOUBLES)),
("D3D11_FEATURE_FORMAT_SUPPORT", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT)),
("D3D11_FEATURE_FORMAT_SUPPORT2", Pointer(D3D11_FEATURE_DATA_FORMAT_SUPPORT2)),
("D3D11_FEATURE_D3D10_X_HARDWARE_OPTIONS", Pointer(D3D11_FEATURE_DATA_D3D10_X_HARDWARE_OPTIONS)),
], Blob(Void, "FeatureSupportDataSize"), False)
ID3D11DeviceContext.methods += [
StdMethod(Void, "VSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "PSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "PSSetShader", [(ObjPointer(ID3D11PixelShader), "pPixelShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "PSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "VSSetShader", [(ObjPointer(ID3D11VertexShader), "pVertexShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "DrawIndexed", [(UINT, "IndexCount"), (UINT, "StartIndexLocation"), (INT, "BaseVertexLocation")]),
StdMethod(Void, "Draw", [(UINT, "VertexCount"), (UINT, "StartVertexLocation")]),
StdMethod(HRESULT, "Map", [(ObjPointer(ID3D11Resource), "pResource"), (UINT, "Subresource"), (D3D11_MAP, "MapType"), (D3D11_MAP_FLAG, "MapFlags"), Out(Pointer(D3D11_MAPPED_SUBRESOURCE), "pMappedResource")]),
StdMethod(Void, "Unmap", [(ObjPointer(ID3D11Resource), "pResource"), (UINT, "Subresource")]),
StdMethod(Void, "PSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "IASetInputLayout", [(ObjPointer(ID3D11InputLayout), "pInputLayout")]),
StdMethod(Void, "IASetVertexBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppVertexBuffers"), (Pointer(Const(UINT)), "pStrides"), (Pointer(Const(UINT)), "pOffsets")]),
StdMethod(Void, "IASetIndexBuffer", [(ObjPointer(ID3D11Buffer), "pIndexBuffer"), (DXGI_FORMAT, "Format"), (UINT, "Offset")]),
StdMethod(Void, "DrawIndexedInstanced", [(UINT, "IndexCountPerInstance"), (UINT, "InstanceCount"), (UINT, "StartIndexLocation"), (INT, "BaseVertexLocation"), (UINT, "StartInstanceLocation")]),
StdMethod(Void, "DrawInstanced", [(UINT, "VertexCountPerInstance"), (UINT, "InstanceCount"), (UINT, "StartVertexLocation"), (UINT, "StartInstanceLocation")]),
StdMethod(Void, "GSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "GSSetShader", [(ObjPointer(ID3D11GeometryShader), "pShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "IASetPrimitiveTopology", [(D3D11_PRIMITIVE_TOPOLOGY, "Topology")]),
StdMethod(Void, "VSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "VSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "Begin", [(ObjPointer(ID3D11Asynchronous), "pAsync")]),
StdMethod(Void, "End", [(ObjPointer(ID3D11Asynchronous), "pAsync")]),
StdMethod(HRESULT, "GetData", [(ObjPointer(ID3D11Asynchronous), "pAsync"), Out(OpaqueBlob(Void, "DataSize"), "pData"), (UINT, "DataSize"), (D3D11_ASYNC_GETDATA_FLAG, "GetDataFlags")]),
StdMethod(Void, "SetPredication", [(ObjPointer(ID3D11Predicate), "pPredicate"), (BOOL, "PredicateValue")]),
StdMethod(Void, "GSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "GSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "OMSetRenderTargets", [(UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), "NumViews"), "ppRenderTargetViews"), (ObjPointer(ID3D11DepthStencilView), "pDepthStencilView")]),
StdMethod(Void, "OMSetRenderTargetsAndUnorderedAccessViews", [(UINT, "NumRTVs"), (Array(Const(ObjPointer(ID3D11RenderTargetView)), "NumRTVs"), "ppRenderTargetViews"), (ObjPointer(ID3D11DepthStencilView), "pDepthStencilView"), (UINT, "UAVStartSlot"), (UINT, "NumUAVs"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), "NumUAVs"), "ppUnorderedAccessViews"), (Pointer(Const(UINT)), "pUAVInitialCounts")]),
StdMethod(Void, "OMSetBlendState", [(ObjPointer(ID3D11BlendState), "pBlendState"), (Array(Const(FLOAT), 4), "BlendFactor"), (UINT, "SampleMask")]),
StdMethod(Void, "OMSetDepthStencilState", [(ObjPointer(ID3D11DepthStencilState), "pDepthStencilState"), (UINT, "StencilRef")]),
StdMethod(Void, "SOSetTargets", [(UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppSOTargets"), (Pointer(Const(UINT)), "pOffsets")]),
StdMethod(Void, "DrawAuto", []),
StdMethod(Void, "DrawIndexedInstancedIndirect", [(ObjPointer(ID3D11Buffer), "pBufferForArgs"), (UINT, "AlignedByteOffsetForArgs")]),
StdMethod(Void, "DrawInstancedIndirect", [(ObjPointer(ID3D11Buffer), "pBufferForArgs"), (UINT, "AlignedByteOffsetForArgs")]),
StdMethod(Void, "Dispatch", [(UINT, "ThreadGroupCountX"), (UINT, "ThreadGroupCountY"), (UINT, "ThreadGroupCountZ")]),
StdMethod(Void, "DispatchIndirect", [(ObjPointer(ID3D11Buffer), "pBufferForArgs"), (UINT, "AlignedByteOffsetForArgs")]),
StdMethod(Void, "RSSetState", [(ObjPointer(ID3D11RasterizerState), "pRasterizerState")]),
StdMethod(Void, "RSSetViewports", [(UINT, "NumViewports"), (Array(Const(D3D11_VIEWPORT), "NumViewports"), "pViewports")]),
StdMethod(Void, "RSSetScissorRects", [(UINT, "NumRects"), (Array(Const(D3D11_RECT), "NumRects"), "pRects")]),
StdMethod(Void, "CopySubresourceRegion", [(ObjPointer(ID3D11Resource), "pDstResource"), (UINT, "DstSubresource"), (UINT, "DstX"), (UINT, "DstY"), (UINT, "DstZ"), (ObjPointer(ID3D11Resource), "pSrcResource"), (UINT, "SrcSubresource"), (Pointer(Const(D3D11_BOX)), "pSrcBox")]),
StdMethod(Void, "CopyResource", [(ObjPointer(ID3D11Resource), "pDstResource"), (ObjPointer(ID3D11Resource), "pSrcResource")]),
StdMethod(Void, "UpdateSubresource", [(ObjPointer(ID3D11Resource), "pDstResource"), (UINT, "DstSubresource"), (Pointer(Const(D3D11_BOX)), "pDstBox"), (OpaquePointer(Const(Void)), "pSrcData"), (UINT, "SrcRowPitch"), (UINT, "SrcDepthPitch")]),
StdMethod(Void, "CopyStructureCount", [(ObjPointer(ID3D11Buffer), "pDstBuffer"), (UINT, "DstAlignedByteOffset"), (ObjPointer(ID3D11UnorderedAccessView), "pSrcView")]),
StdMethod(Void, "ClearRenderTargetView", [(ObjPointer(ID3D11RenderTargetView), "pRenderTargetView"), (Array(Const(FLOAT), 4), "ColorRGBA")]),
StdMethod(Void, "ClearUnorderedAccessViewUint", [(ObjPointer(ID3D11UnorderedAccessView), "pUnorderedAccessView"), (Array(Const(UINT), 4), "Values")]),
StdMethod(Void, "ClearUnorderedAccessViewFloat", [(ObjPointer(ID3D11UnorderedAccessView), "pUnorderedAccessView"), (Array(Const(FLOAT), 4), "Values")]),
StdMethod(Void, "ClearDepthStencilView", [(ObjPointer(ID3D11DepthStencilView), "pDepthStencilView"), (D3D11_CLEAR_FLAG, "ClearFlags"), (FLOAT, "Depth"), (UINT8, "Stencil")]),
StdMethod(Void, "GenerateMips", [(ObjPointer(ID3D11ShaderResourceView), "pShaderResourceView")]),
StdMethod(Void, "SetResourceMinLOD", [(ObjPointer(ID3D11Resource), "pResource"), (FLOAT, "MinLOD")]),
StdMethod(FLOAT, "GetResourceMinLOD", [(ObjPointer(ID3D11Resource), "pResource")]),
StdMethod(Void, "ResolveSubresource", [(ObjPointer(ID3D11Resource), "pDstResource"), (UINT, "DstSubresource"), (ObjPointer(ID3D11Resource), "pSrcResource"), (UINT, "SrcSubresource"), (DXGI_FORMAT, "Format")]),
StdMethod(Void, "ExecuteCommandList", [(ObjPointer(ID3D11CommandList), "pCommandList"), (BOOL, "RestoreContextState")]),
StdMethod(Void, "HSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "HSSetShader", [(ObjPointer(ID3D11HullShader), "pHullShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "HSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "HSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "DSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "DSSetShader", [(ObjPointer(ID3D11DomainShader), "pDomainShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "DSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "DSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "CSSetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(Const(ObjPointer(ID3D11ShaderResourceView)), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "CSSetUnorderedAccessViews", [(UINT, "StartSlot"), (UINT, "NumUAVs"), (Array(Const(ObjPointer(ID3D11UnorderedAccessView)), "NumUAVs"), "ppUnorderedAccessViews"), (Pointer(Const(UINT)), "pUAVInitialCounts")]),
StdMethod(Void, "CSSetShader", [(ObjPointer(ID3D11ComputeShader), "pComputeShader"), (Array(Const(ObjPointer(ID3D11ClassInstance)), "NumClassInstances"), "ppClassInstances"), (UINT, "NumClassInstances")]),
StdMethod(Void, "CSSetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(Const(ObjPointer(ID3D11SamplerState)), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "CSSetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(Const(ObjPointer(ID3D11Buffer)), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "VSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "PSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "PSGetShader", [Out(Pointer(ObjPointer(ID3D11PixelShader)), "ppPixelShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "PSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "VSGetShader", [Out(Pointer(ObjPointer(ID3D11VertexShader)), "ppVertexShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "PSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "IAGetInputLayout", [Out(Pointer(ObjPointer(ID3D11InputLayout)), "ppInputLayout")]),
StdMethod(Void, "IAGetVertexBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppVertexBuffers"), Out(Pointer(UINT), "pStrides"), Out(Pointer(UINT), "pOffsets")]),
StdMethod(Void, "IAGetIndexBuffer", [Out(Pointer(ObjPointer(ID3D11Buffer)), "pIndexBuffer"), Out(Pointer(DXGI_FORMAT), "Format"), Out(Pointer(UINT), "Offset")]),
StdMethod(Void, "GSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "GSGetShader", [Out(Pointer(ObjPointer(ID3D11GeometryShader)), "ppGeometryShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "IAGetPrimitiveTopology", [Out(Pointer(D3D11_PRIMITIVE_TOPOLOGY), "pTopology")]),
StdMethod(Void, "VSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "VSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "GetPredication", [Out(Pointer(ObjPointer(ID3D11Predicate)), "ppPredicate"), Out(Pointer(BOOL), "pPredicateValue")]),
StdMethod(Void, "GSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "GSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "OMGetRenderTargets", [(UINT, "NumViews"), (Array(ObjPointer(ID3D11RenderTargetView), "NumViews"), "ppRenderTargetViews"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), "ppDepthStencilView")]),
StdMethod(Void, "OMGetRenderTargetsAndUnorderedAccessViews", [(UINT, "NumRTVs"), (Array(ObjPointer(ID3D11RenderTargetView), "NumRTVs"), "ppRenderTargetViews"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), "ppDepthStencilView"), (UINT, "UAVStartSlot"), (UINT, "NumUAVs"), (Array(ObjPointer(ID3D11UnorderedAccessView), "NumUAVs"), "ppUnorderedAccessViews")]),
StdMethod(Void, "OMGetBlendState", [Out(Pointer(ObjPointer(ID3D11BlendState)), "ppBlendState"), Out(Array(FLOAT, 4), "BlendFactor"), Out(Pointer(UINT), "pSampleMask")]),
StdMethod(Void, "OMGetDepthStencilState", [Out(Pointer(ObjPointer(ID3D11DepthStencilState)), "ppDepthStencilState"), Out(Pointer(UINT), "pStencilRef")]),
StdMethod(Void, "SOGetTargets", [(UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppSOTargets")]),
StdMethod(Void, "RSGetState", [Out(Pointer(ObjPointer(ID3D11RasterizerState)), "ppRasterizerState")]),
StdMethod(Void, "RSGetViewports", [Out(Pointer(UINT), "pNumViewports"), Out(Array(D3D11_VIEWPORT, "*pNumViewports"), "pViewports")]),
StdMethod(Void, "RSGetScissorRects", [Out(Pointer(UINT), "pNumRects"), Out(Array(D3D11_RECT, "*pNumRects"), "pRects")]),
StdMethod(Void, "HSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "HSGetShader", [Out(Pointer(ObjPointer(ID3D11HullShader)), "ppHullShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "HSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "HSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "DSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "DSGetShader", [Out(Pointer(ObjPointer(ID3D11DomainShader)), "ppDomainShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "DSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "DSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "CSGetShaderResources", [(UINT, "StartSlot"), (UINT, "NumViews"), (Array(ObjPointer(ID3D11ShaderResourceView), "NumViews"), "ppShaderResourceViews")]),
StdMethod(Void, "CSGetUnorderedAccessViews", [(UINT, "StartSlot"), (UINT, "NumUAVs"), (Array(ObjPointer(ID3D11UnorderedAccessView), "NumUAVs"), "ppUnorderedAccessViews")]),
StdMethod(Void, "CSGetShader", [Out(Pointer(ObjPointer(ID3D11ComputeShader)), "ppComputeShader"), Out(Array(ObjPointer(ID3D11ClassInstance), "*pNumClassInstances"), "ppClassInstances"), Out(Pointer(UINT), "pNumClassInstances")]),
StdMethod(Void, "CSGetSamplers", [(UINT, "StartSlot"), (UINT, "NumSamplers"), (Array(ObjPointer(ID3D11SamplerState), "NumSamplers"), "ppSamplers")]),
StdMethod(Void, "CSGetConstantBuffers", [(UINT, "StartSlot"), (UINT, "NumBuffers"), (Array(ObjPointer(ID3D11Buffer), "NumBuffers"), "ppConstantBuffers")]),
StdMethod(Void, "ClearState", []),
StdMethod(Void, "Flush", []),
StdMethod(D3D11_DEVICE_CONTEXT_TYPE, "GetType", []),
StdMethod(UINT, "GetContextFlags", []),
StdMethod(HRESULT, "FinishCommandList", [(BOOL, "RestoreDeferredContextState"), Out(Pointer(ObjPointer(ID3D11CommandList)), "ppCommandList")]),
]
D3D11_CREATE_DEVICE_FLAG = Flags(UINT, [
"D3D11_CREATE_DEVICE_SINGLETHREADED",
"D3D11_CREATE_DEVICE_DEBUG",
"D3D11_CREATE_DEVICE_SWITCH_TO_REF",
"D3D11_CREATE_DEVICE_PREVENT_INTERNAL_THREADING_OPTIMIZATIONS",
"D3D11_CREATE_DEVICE_BGRA_SUPPORT",
])
ID3D11Device.methods += [
StdMethod(HRESULT, "CreateBuffer", [(Pointer(Const(D3D11_BUFFER_DESC)), "pDesc"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Buffer)), "ppBuffer")]),
StdMethod(HRESULT, "CreateTexture1D", [(Pointer(Const(D3D11_TEXTURE1D_DESC)), "pDesc"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Texture1D)), "ppTexture1D")]),
StdMethod(HRESULT, "CreateTexture2D", [(Pointer(Const(D3D11_TEXTURE2D_DESC)), "pDesc"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Texture2D)), "ppTexture2D")]),
StdMethod(HRESULT, "CreateTexture3D", [(Pointer(Const(D3D11_TEXTURE3D_DESC)), "pDesc"), (Pointer(Const(D3D11_SUBRESOURCE_DATA)), "pInitialData"), Out(Pointer(ObjPointer(ID3D11Texture3D)), "ppTexture3D")]),
StdMethod(HRESULT, "CreateShaderResourceView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_SHADER_RESOURCE_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11ShaderResourceView)), "ppSRView")]),
StdMethod(HRESULT, "CreateUnorderedAccessView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_UNORDERED_ACCESS_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11UnorderedAccessView)), "ppUAView")]),
StdMethod(HRESULT, "CreateRenderTargetView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_RENDER_TARGET_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11RenderTargetView)), "ppRTView")]),
StdMethod(HRESULT, "CreateDepthStencilView", [(ObjPointer(ID3D11Resource), "pResource"), (Pointer(Const(D3D11_DEPTH_STENCIL_VIEW_DESC)), "pDesc"), Out(Pointer(ObjPointer(ID3D11DepthStencilView)), "ppDepthStencilView")]),
StdMethod(HRESULT, "CreateInputLayout", [(Array(Const(D3D11_INPUT_ELEMENT_DESC), "NumElements"), "pInputElementDescs"), (UINT, "NumElements"), (Blob(Const(Void), "BytecodeLength"), "pShaderBytecodeWithInputSignature"), (SIZE_T, "BytecodeLength"), Out(Pointer(ObjPointer(ID3D11InputLayout)), "ppInputLayout")]),
StdMethod(HRESULT, "CreateVertexShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11VertexShader)), "ppVertexShader")]),
StdMethod(HRESULT, "CreateGeometryShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), "ppGeometryShader")]),
StdMethod(HRESULT, "CreateGeometryShaderWithStreamOutput", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (Array(Const(D3D11_SO_DECLARATION_ENTRY), "NumEntries"), "pSODeclaration"), (UINT, "NumEntries"), (Array(Const(UINT), "NumStrides"), "pBufferStrides"), (UINT, "NumStrides"), (UINT, "RasterizedStream"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11GeometryShader)), "ppGeometryShader")]),
StdMethod(HRESULT, "CreatePixelShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11PixelShader)), "ppPixelShader")]),
StdMethod(HRESULT, "CreateHullShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11HullShader)), "ppHullShader")]),
StdMethod(HRESULT, "CreateDomainShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11DomainShader)), "ppDomainShader")]),
StdMethod(HRESULT, "CreateComputeShader", [(Blob(Const(Void), "BytecodeLength"), "pShaderBytecode"), (SIZE_T, "BytecodeLength"), (ObjPointer(ID3D11ClassLinkage), "pClassLinkage"), Out(Pointer(ObjPointer(ID3D11ComputeShader)), "ppComputeShader")]),
StdMethod(HRESULT, "CreateClassLinkage", [Out(Pointer(ObjPointer(ID3D11ClassLinkage)), "ppLinkage")]),
StdMethod(HRESULT, "CreateBlendState", [(Pointer(Const(D3D11_BLEND_DESC)), "pBlendStateDesc"), Out(Pointer(ObjPointer(ID3D11BlendState)), "ppBlendState")]),
StdMethod(HRESULT, "CreateDepthStencilState", [(Pointer(Const(D3D11_DEPTH_STENCIL_DESC)), "pDepthStencilDesc"), Out(Pointer(ObjPointer(ID3D11DepthStencilState)), "ppDepthStencilState")]),
StdMethod(HRESULT, "CreateRasterizerState", [(Pointer(Const(D3D11_RASTERIZER_DESC)), "pRasterizerDesc"), Out(Pointer(ObjPointer(ID3D11RasterizerState)), "ppRasterizerState")]),
StdMethod(HRESULT, "CreateSamplerState", [(Pointer(Const(D3D11_SAMPLER_DESC)), "pSamplerDesc"), Out(Pointer(ObjPointer(ID3D11SamplerState)), "ppSamplerState")]),
StdMethod(HRESULT, "CreateQuery", [(Pointer(Const(D3D11_QUERY_DESC)), "pQueryDesc"), Out(Pointer(ObjPointer(ID3D11Query)), "ppQuery")]),
StdMethod(HRESULT, "CreatePredicate", [(Pointer(Const(D3D11_QUERY_DESC)), "pPredicateDesc"), Out(Pointer(ObjPointer(ID3D11Predicate)), "ppPredicate")]),
StdMethod(HRESULT, "CreateCounter", [(Pointer(Const(D3D11_COUNTER_DESC)), "pCounterDesc"), Out(Pointer(ObjPointer(ID3D11Counter)), "ppCounter")]),
StdMethod(HRESULT, "CreateDeferredContext", [(UINT, "ContextFlags"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppDeferredContext")]),
StdMethod(HRESULT, "OpenSharedResource", [(HANDLE, "hResource"), (REFIID, "ReturnedInterface"), Out(Pointer(ObjPointer(Void)), "ppResource")]),
StdMethod(HRESULT, "CheckFormatSupport", [(DXGI_FORMAT, "Format"), Out(Pointer(D3D11_FORMAT_SUPPORT), "pFormatSupport")]),
StdMethod(HRESULT, "CheckMultisampleQualityLevels", [(DXGI_FORMAT, "Format"), (UINT, "SampleCount"), Out(Pointer(UINT), "pNumQualityLevels")]),
StdMethod(Void, "CheckCounterInfo", [Out(Pointer(D3D11_COUNTER_INFO), "pCounterInfo")]),
StdMethod(HRESULT, "CheckCounter", [(Pointer(Const(D3D11_COUNTER_DESC)), "pDesc"), Out(Pointer(D3D11_COUNTER_TYPE), "pType"), Out(Pointer(UINT), "pActiveCounters"), Out(LPSTR, "szName"), Out(Pointer(UINT), "pNameLength"), Out(LPSTR, "szUnits"), Out(Pointer(UINT), "pUnitsLength"), Out(LPSTR, "szDescription"), Out(Pointer(UINT), "pDescriptionLength")]),
StdMethod(HRESULT, "CheckFeatureSupport", [(D3D11_FEATURE, "Feature"), Out(D3D11_FEATURE_DATA, "pFeatureSupportData"), (UINT, "FeatureSupportDataSize")]),
StdMethod(HRESULT, "GetPrivateData", [(REFGUID, "guid"), Out(Pointer(UINT), "pDataSize"), Out(OpaquePointer(Void), "pData")]),
StdMethod(HRESULT, "SetPrivateData", [(REFGUID, "guid"), (UINT, "DataSize"), (OpaqueBlob(Const(Void), "DataSize"), "pData")]),
StdMethod(HRESULT, "SetPrivateDataInterface", [(REFGUID, "guid"), (OpaquePointer(Const(IUnknown)), "pData")]),
StdMethod(D3D_FEATURE_LEVEL, "GetFeatureLevel", []),
StdMethod(D3D11_CREATE_DEVICE_FLAG, "GetCreationFlags", []),
StdMethod(HRESULT, "GetDeviceRemovedReason", []),
StdMethod(Void, "GetImmediateContext", [Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppImmediateContext")]),
StdMethod(HRESULT, "SetExceptionMode", [(D3D11_RAISE_FLAG, "RaiseFlags")]),
StdMethod(UINT, "GetExceptionMode", []),
]
d3d11 = API("d3d11")
d3d11.addFunctions([
StdFunction(HRESULT, "D3D11CreateDevice", [(ObjPointer(IDXGIAdapter), "pAdapter"), (D3D_DRIVER_TYPE, "DriverType"), (HMODULE, "Software"), (D3D11_CREATE_DEVICE_FLAG, "Flags"), (Array(Const(D3D_FEATURE_LEVEL), "FeatureLevels"), "pFeatureLevels"), (UINT, "FeatureLevels"), (UINT, "SDKVersion"), Out(Pointer(ObjPointer(ID3D11Device)), "ppDevice"), Out(Pointer(D3D_FEATURE_LEVEL), "pFeatureLevel"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppImmediateContext")]),
StdFunction(HRESULT, "D3D11CreateDeviceAndSwapChain", [(ObjPointer(IDXGIAdapter), "pAdapter"), (D3D_DRIVER_TYPE, "DriverType"), (HMODULE, "Software"), (D3D11_CREATE_DEVICE_FLAG, "Flags"), (Array(Const(D3D_FEATURE_LEVEL), "FeatureLevels"), "pFeatureLevels"), (UINT, "FeatureLevels"), (UINT, "SDKVersion"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC)), "pSwapChainDesc"), Out(Pointer(ObjPointer(IDXGISwapChain)), "ppSwapChain"), Out(Pointer(ObjPointer(ID3D11Device)), "ppDevice"), Out(Pointer(D3D_FEATURE_LEVEL), "pFeatureLevel"), Out(Pointer(ObjPointer(ID3D11DeviceContext)), "ppImmediateContext")]),
# XXX: Undocumented functions, called by d3d11sdklayers.dll when D3D11_CREATE_DEVICE_DEBUG is set
StdFunction(HRESULT, "D3D11CoreRegisterLayers", [LPCVOID, DWORD], internal=True),
StdFunction(SIZE_T, "D3D11CoreGetLayeredDeviceSize", [LPCVOID, DWORD], internal=True),
StdFunction(HRESULT, "D3D11CoreCreateLayeredDevice", [LPCVOID, DWORD, LPCVOID, (REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppvObj")], internal=True),
StdFunction(HRESULT, "D3D11CoreCreateDevice", [DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD, DWORD], internal=True),
])
d3d11.addInterfaces([
IDXGIAdapter1,
IDXGIDevice1,
IDXGIResource,
ID3D11Debug,
ID3D11InfoQueue,
ID3D11SwitchToRef,
])
| 50.016168 | 596 | 0.739227 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29,628 | 0.478875 |
8a69c6a560d7f1d6a12a9bb69281971b56733693 | 1,637 | py | Python | setup.py | xbabka01/filetype.py | faba42b86988bd21a50d5b20919ecff0c6a84957 | [
"MIT"
] | null | null | null | setup.py | xbabka01/filetype.py | faba42b86988bd21a50d5b20919ecff0c6a84957 | [
"MIT"
] | null | null | null | setup.py | xbabka01/filetype.py | faba42b86988bd21a50d5b20919ecff0c6a84957 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
from setuptools import find_packages, setup
setup(
name='filetype',
version='1.0.7',
description='Infer file type and MIME type of any file/buffer. '
'No external dependencies.',
long_description=codecs.open('README.rst', 'r',
encoding='utf-8', errors='ignore').read(),
keywords='file libmagic magic infer numbers magicnumbers discovery mime '
'type kind',
url='https://github.com/h2non/filetype.py',
download_url='https://github.com/h2non/filetype.py/tarball/master',
author='Tomas Aparicio',
author_email='tomas@aparicio.me',
license='MIT',
license_files=['LICENSE'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: System',
'Topic :: System :: Filesystems',
'Topic :: Utilities'],
platforms=['any'],
packages=find_packages(exclude=['dist', 'build', 'docs', 'tests',
'examples']),
package_data={'filetype': ['LICENSE', '*.md']},
zip_safe=True)
| 38.069767 | 77 | 0.588882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 967 | 0.590715 |
8a69d4b012c5607f3bca22996d7b21d1f2aed261 | 2,049 | py | Python | demos/netmiko_textfsm.py | ryanaa08/NPA | 45173efa60713858bb8b1d884fe12c50fe69920c | [
"BSD-Source-Code"
] | 4 | 2019-01-15T16:15:26.000Z | 2021-12-05T16:03:15.000Z | demos/netmiko_textfsm.py | krishnakadiyala/NPAcourse | 74f097107839d990b44adcee69d4f949696a332c | [
"BSD-Source-Code"
] | null | null | null | demos/netmiko_textfsm.py | krishnakadiyala/NPAcourse | 74f097107839d990b44adcee69d4f949696a332c | [
"BSD-Source-Code"
] | 2 | 2019-07-04T16:38:19.000Z | 2020-01-31T15:38:27.000Z | # make sure templates are present and netmiko knows about them
# git clone https://github.com/networktocode/ntc-templates
# export NET_TEXTFSM=/home/ntc/ntc-templates/templates/
# see https://github.com/networktocode/ntc-templates/tree/master/templates
# for list of templates
from netmiko import ConnectHandler
import json
user = 'ntc'
pwd = 'ntc123'
d_type = 'cisco_ios'
csr1 = ConnectHandler(ip='csr1', username=user, password=pwd, device_type=d_type)
sh_ip_int_br = csr1.send_command("show ip int brief", use_textfsm=True)
# [{'status': 'up', 'intf': 'GigabitEthernet1', 'ipaddr': '10.0.0.51', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet2', 'ipaddr': 'unassigned', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet3', 'ipaddr': 'unassigned', 'proto': 'up'}, {'status': 'up', 'intf': 'GigabitEthernet4', 'ipaddr': '5.12.1.1', 'proto': 'up'}, {'status': 'up', 'intf': 'Loopback100', 'ipaddr': '10.200.1.20', 'proto': 'up'}]
# is type list
print (type(sh_ip_int_br))
# list of dicts
print (type(sh_ip_int_br[0]))
for each_dict in sh_ip_int_br:
print "\n"
for key in each_dict.keys():
print key
for each_dict in sh_ip_int_br:
print "\n"
for key, value in each_dict.items():
print key + " is " + value
sh_ver_ios = csr1.send_command("show version", use_textfsm=True)
# [{'running_image': 'packages.conf', 'hostname': 'csr1', 'uptime': '6 hours, 59 minutes', 'config_register': '0x2102', 'hardware': ['CSR1000V'], 'version': '16.6.2', 'serial': ['9KIBQAQ3OPE'], 'rommon': 'IOS-XE'}]
# print the json nicely
print (json.dumps(sh_ver_ios, indent=4))
print sh_ver_ios
# list
print type(sh_ver_ios)
# each item is a dict
print type(sh_ver_ios[0])
# list of dicts with some nested lists with the dicts
for each_dict in sh_ver_ios:
print "\n"
for key, value in each_dict.items():
if type(value) is list:
print key + " is "
for list_entry in value:
print list_entry
if type(value) is str:
print key + " is " + value
| 35.947368 | 420 | 0.660322 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,131 | 0.551977 |
8a6c4e202130d51c730ab01bd3f2f21e5ec32862 | 758 | py | Python | tools/data.py | seanys/2D-Irregular-Packing-Algorithm | cc10edff2bc2631fcbcb47acf7bb3215e5c5023c | [
"MIT"
] | 29 | 2020-02-07T06:41:25.000Z | 2022-03-16T18:04:07.000Z | tools/data.py | seanys/2D-Irregular-Packing-Algorithm | cc10edff2bc2631fcbcb47acf7bb3215e5c5023c | [
"MIT"
] | 6 | 2020-04-27T01:36:27.000Z | 2022-01-31T11:59:05.000Z | tools/data.py | seanys/2D-Irregular-Packing-Algorithm | cc10edff2bc2631fcbcb47acf7bb3215e5c5023c | [
"MIT"
] | 12 | 2020-05-05T05:34:06.000Z | 2022-03-26T07:32:46.000Z | from tools.geofunc import GeoFunc
import pandas as pd
import json
def getData(index):
'''报错数据集有(空心):han,jakobs1,jakobs2 '''
'''形状过多暂时未处理:shapes、shirt、swim、trousers'''
name=["ga","albano","blaz1","blaz2","dighe1","dighe2","fu","han","jakobs1","jakobs2","mao","marques","shapes","shirts","swim","trousers"]
print("开始处理",name[index],"数据集")
'''暂时没有考虑宽度,全部缩放来表示'''
scale=[100,0.5,100,100,20,20,20,10,20,20,0.5,20,50]
print("缩放",scale[index],"倍")
df = pd.read_csv("data/"+name[index]+".csv")
polygons=[]
for i in range(0,df.shape[0]):
for j in range(0,df['num'][i]):
poly=json.loads(df['polygon'][i])
GeoFunc.normData(poly,scale[index])
polygons.append(poly)
return polygons
| 36.095238 | 141 | 0.60686 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 361 | 0.420746 |
8a6e9d6c995b4c34ef5a6722c4973c2c7fb333f1 | 1,065 | py | Python | projects/eyetracking/gen_adhd_sin.py | nirdslab/streaminghub | a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf | [
"MIT"
] | null | null | null | projects/eyetracking/gen_adhd_sin.py | nirdslab/streaminghub | a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf | [
"MIT"
] | null | null | null | projects/eyetracking/gen_adhd_sin.py | nirdslab/streaminghub | a0d9f5f8be0ee6f090bd2b48b9f596695497c2bf | [
"MIT"
] | 1 | 2020-01-22T15:35:29.000Z | 2020-01-22T15:35:29.000Z | #!/usr/bin/env python3
import glob
import os
import pandas as pd
import dfs
SRC_DIR = f"{dfs.get_data_dir()}/adhd_sin_orig"
OUT_DIR = f"{dfs.get_data_dir()}/adhd_sin"
if __name__ == '__main__':
files = glob.glob(f"{SRC_DIR}/*.csv")
file_names = list(map(os.path.basename, files))
for file_name in file_names:
df: pd.DataFrame = pd.read_csv(f'{SRC_DIR}/{file_name}').set_index('EyeTrackerTimestamp').sort_index()[
['GazePointX (ADCSpx)', 'GazePointY (ADCSpx)', 'PupilLeft', 'PupilRight']].reset_index()
df.columns = ['t', 'x', 'y', 'dl', 'dr']
# fill blanks (order=interpolate(inter)->bfill+ffill(edges))->zerofill
df = df.apply(lambda x: x.interpolate().fillna(method="bfill").fillna(method="ffill")).fillna(0)
df['x'] = df['x'] / 1920
df['y'] = df['y'] / 1080
df['d'] = (df['dl'] + df['dr']) / 2
# start with t=0, and set unit to ms
df['t'] = (df['t'] - df['t'].min()) / 1000
df = df[['t', 'x', 'y', 'd']].round(6).set_index('t')
df.to_csv(f'{OUT_DIR}/{file_name}')
print(f'Processed: {file_name}')
| 35.5 | 107 | 0.613146 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 462 | 0.433803 |
8a73f2115b3d49a7048eebbbf6a7d009bf2bcb02 | 864 | py | Python | TopQuarkAnalysis/TopJetCombination/python/TtSemiLepJetCombMaxSumPtWMass_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | TopQuarkAnalysis/TopJetCombination/python/TtSemiLepJetCombMaxSumPtWMass_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | TopQuarkAnalysis/TopJetCombination/python/TtSemiLepJetCombMaxSumPtWMass_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
#
# module to make the MaxSumPtWMass jet combination
#
findTtSemiLepJetCombMaxSumPtWMass = cms.EDProducer("TtSemiLepJetCombMaxSumPtWMass",
## jet input
jets = cms.InputTag("selectedPatJets"),
## lepton input
leps = cms.InputTag("selectedPatMuons"),
## maximum number of jets to be considered
maxNJets = cms.int32(4),
## nominal WMass parameter (in GeV)
wMass = cms.double(80.4),
## use b-tagging two distinguish between light and b jets
useBTagging = cms.bool(False),
## choose algorithm for b-tagging
bTagAlgorithm = cms.string("trackCountingHighEffBJetTags"),
## minimum b discriminator value required for b jets and
## maximum b discriminator value allowed for non-b jets
minBDiscBJets = cms.double(1.0),
maxBDiscLightJets = cms.double(3.0)
)
| 36 | 83 | 0.706019 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 455 | 0.52662 |
8a78745915eb3a4aaf90865a024b4d8bafd46ca7 | 5,151 | py | Python | research/gnn/sgcn/postprocess.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 1 | 2021-11-18T08:17:44.000Z | 2021-11-18T08:17:44.000Z | research/gnn/sgcn/postprocess.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | null | null | null | research/gnn/sgcn/postprocess.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 2 | 2019-09-01T06:17:04.000Z | 2019-10-04T08:39:45.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
postprocess.
"""
import os
import argparse
import numpy as np
from src.ms_utils import calculate_auc
from mindspore import context, load_checkpoint
def softmax(x):
t_max = np.max(x, axis=1, keepdims=True) # returns max of each row and keeps same dims
e_x = np.exp(x - t_max) # subtracts each row with its max value
t_sum = np.sum(e_x, axis=1, keepdims=True) # returns sum of each row and keeps same dims
f_x = e_x / t_sum
return f_x
def score_model(preds, test_pos, test_neg, weight, bias):
"""
Score the model on the test set edges in each epoch.
Args:
epoch (LongTensor): Training epochs.
Returns:
auc(Float32): AUC result.
f1(Float32): F1-Score result.
"""
score_positive_edges = np.array(test_pos, dtype=np.int32).T
score_negative_edges = np.array(test_neg, dtype=np.int32).T
test_positive_z = np.concatenate((preds[score_positive_edges[0, :], :],
preds[score_positive_edges[1, :], :]), axis=1)
test_negative_z = np.concatenate((preds[score_negative_edges[0, :], :],
preds[score_negative_edges[1, :], :]), axis=1)
# operands could not be broadcast together with shapes (4288,128) (128,3)
scores = np.dot(np.concatenate((test_positive_z, test_negative_z), axis=0), weight) + bias
probability_scores = np.exp(softmax(scores))
predictions = probability_scores[:, 0]/probability_scores[:, 0:2].sum(1)
# predictions = predictions.asnumpy()
targets = [0]*len(test_pos) + [1]*len(test_neg)
auc, f1 = calculate_auc(targets, predictions)
return auc, f1
def get_acc():
"""get infer Accuracy."""
parser = argparse.ArgumentParser(description='postprocess')
parser.add_argument('--dataset_name', type=str, default='bitcoin-otc', choices=['bitcoin-otc', 'bitcoin-alpha'],
help='dataset name')
parser.add_argument('--result_path', type=str, default='./ascend310_infer/input/', help='result Files')
parser.add_argument('--label_path', type=str, default='', help='y_test npy Files')
parser.add_argument('--mask_path', type=str, default='', help='test_mask npy Files')
parser.add_argument("--checkpoint_file", type=str, default='sgcn_alpha_f1.ckpt', help="Checkpoint file path.")
parser.add_argument("--edge_path", nargs="?",
default="./input/bitcoin_alpha.csv", help="Edge list csv.")
parser.add_argument("--features-path", nargs="?",
default="./input/bitcoin_alpha.csv", help="Edge list csv.")
parser.add_argument("--test-size", type=float,
default=0.2, help="Test dataset size. Default is 0.2.")
parser.add_argument("--seed", type=int, default=42,
help="Random seed for sklearn pre-training. Default is 42.")
parser.add_argument("--spectral-features", default=True, dest="spectral_features", action="store_true")
parser.add_argument("--reduction-iterations", type=int,
default=30, help="Number of SVD iterations. Default is 30.")
parser.add_argument("--reduction-dimensions", type=int,
default=64, help="Number of SVD feature extraction dimensions. Default is 64.")
args_opt = parser.parse_args()
# Runtime
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', device_id=0)
# Create network
test_pos = np.load(os.path.join(args_opt.result_path, 'pos_test.npy'))
test_neg = np.load(os.path.join(args_opt.result_path, 'neg_test.npy'))
# Load parameters from checkpoint into network
param_dict = load_checkpoint(args_opt.checkpoint_file)
print(type(param_dict))
print(param_dict)
print(type(param_dict['regression_weights']))
print(param_dict['regression_weights'])
# load_param_into_net(net, param_dict)
pred = np.fromfile('./result_Files/repos_0.bin', np.float32)
if args_opt.dataset_name == 'bitcoin-otc':
pred = pred.reshape(5881, 64)
else:
pred = pred.reshape(3783, 64)
auc, f1 = score_model(pred, test_pos, test_neg, param_dict['regression_weights'].asnumpy(),
param_dict['regression_bias'].asnumpy())
print("Test set results:", "auc=", "{:.5f}".format(auc), "f1=", "{:.5f}".format(f1))
if __name__ == '__main__':
get_acc()
| 48.140187 | 117 | 0.644729 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,205 | 0.428072 |
8a790aaa3beecccbae1e5fe2d0bb1478dbadd597 | 1,841 | py | Python | VENV/lib/python3.6/site-packages/PyInstaller/hooks/hook-PyQt5.py | workingyifei/display-pattern-generator | b27be84c6221fa93833f283109870737b05bfbf6 | [
"MIT"
] | 3 | 2018-11-27T06:30:23.000Z | 2021-05-30T15:56:32.000Z | VENV/lib/python3.6/site-packages/PyInstaller/hooks/hook-PyQt5.py | workingyifei/display-pattern-generator | b27be84c6221fa93833f283109870737b05bfbf6 | [
"MIT"
] | 1 | 2018-11-15T02:00:31.000Z | 2021-12-06T02:20:32.000Z | VENV/lib/python3.6/site-packages/PyInstaller/hooks/hook-PyQt5.py | workingyifei/display-pattern-generator | b27be84c6221fa93833f283109870737b05bfbf6 | [
"MIT"
] | 1 | 2020-11-06T18:46:35.000Z | 2020-11-06T18:46:35.000Z | #-----------------------------------------------------------------------------
# Copyright (c) 2005-2017, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import os
from PyInstaller.utils.hooks import (
get_module_attribute, is_module_satisfies, qt_menu_nib_dir, get_module_file_attribute,
collect_data_files)
from PyInstaller.compat import getsitepackages, is_darwin, is_win
# On Windows system PATH has to be extended to point to the PyQt5 directory.
# The PySide directory contains Qt dlls. We need to avoid including different
# version of Qt libraries when there is installed another application (e.g. QtCreator)
if is_win:
from PyInstaller.utils.win32.winutils import extend_system_path
extend_system_path([os.path.join(x, 'PyQt5') for x in getsitepackages()])
extend_system_path([os.path.join(os.path.dirname(get_module_file_attribute('PyQt5')),
'Qt', 'bin')])
# In the new consolidated mode any PyQt depends on _qt
hiddenimports = ['sip', 'PyQt5.Qt']
# Collect just the qt.conf file.
datas = [x for x in collect_data_files('PyQt5', False, os.path.join('Qt', 'bin')) if
x[0].endswith('qt.conf')]
# For Qt<5.4 to work on Mac OS X it is necessary to include `qt_menu.nib`.
# This directory contains some resource files necessary to run PyQt or PySide
# app.
if is_darwin:
# Version of the currently installed Qt 5.x shared library.
qt_version = get_module_attribute('PyQt5.QtCore', 'QT_VERSION_STR')
if is_module_satisfies('Qt < 5.4', qt_version):
datas = [(qt_menu_nib_dir('PyQt5'), '')]
| 42.813953 | 90 | 0.669745 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,053 | 0.571972 |
8a7ecd71a92cf19cd5b6422ac30a671d4195653c | 1,358 | py | Python | experiments/bst/setup.py | bigchaindb/privacy-protocols | d220f642c7c056e5ec179b47a8d0863dbc373d9d | [
"CC-BY-4.0"
] | 68 | 2017-08-02T14:22:59.000Z | 2022-02-19T05:27:42.000Z | experiments/bst/setup.py | bigchaindb/privacy-protocols | d220f642c7c056e5ec179b47a8d0863dbc373d9d | [
"CC-BY-4.0"
] | 6 | 2017-08-05T18:30:14.000Z | 2017-08-22T19:54:53.000Z | experiments/bst/setup.py | bigchaindb/privacy-protocols | d220f642c7c056e5ec179b47a8d0863dbc373d9d | [
"CC-BY-4.0"
] | 15 | 2017-08-22T16:04:26.000Z | 2022-03-13T10:36:02.000Z | """bst: BigchainDB Sharing Tools"""
from setuptools import setup, find_packages
install_requires = [
'base58~=0.2.2',
'PyNaCl~=1.1.0',
'bigchaindb-driver',
'click==6.7',
'colorama',
]
setup(
name='bst',
version='0.1.0',
description='bst: BigchainDB Sharing Tools',
long_description=(
'A collection of scripts with different patterns to share'
'private data on BigchainDB.'),
url='https://github.com/vrde/bst/',
author='Alberto Granzotto',
author_email='alberto@bigchaindb.com',
license='AGPLv3',
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Database',
'Topic :: Database :: Database Engines/Servers',
'Topic :: Software Development',
'Natural Language :: English',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
],
packages=find_packages(),
entry_points={
'console_scripts': [
'bst=bst.cli:main'
],
},
install_requires=install_requires
)
| 26.115385 | 74 | 0.594993 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 806 | 0.59352 |
8a8aa73cf4c767bf7b906925d1382b404b94f301 | 1,834 | py | Python | Google/google_books/scrape_google_books.py | dimitryzub/blog-posts-archive | 0978aaa0c9f0142d6f996b81ce391930c5e3be35 | [
"CC0-1.0"
] | null | null | null | Google/google_books/scrape_google_books.py | dimitryzub/blog-posts-archive | 0978aaa0c9f0142d6f996b81ce391930c5e3be35 | [
"CC0-1.0"
] | null | null | null | Google/google_books/scrape_google_books.py | dimitryzub/blog-posts-archive | 0978aaa0c9f0142d6f996b81ce391930c5e3be35 | [
"CC0-1.0"
] | null | null | null | from parsel import Selector
import requests, json, re
params = {
"q": "richard branson",
"tbm": "bks",
"gl": "us",
"hl": "en"
}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.87 Safari/537.36",
}
html = requests.get("https://www.google.com/search", params=params, headers=headers, timeout=30)
selector = Selector(text=html.text)
books_results = []
# https://regex101.com/r/mapBs4/1
book_thumbnails = re.findall(r"s=\\'data:image/jpg;base64,(.*?)\\'", str(selector.css("script").getall()), re.DOTALL)
for book_thumbnail, book_result in zip(book_thumbnails, selector.css(".Yr5TG")):
title = book_result.css(".DKV0Md::text").get()
link = book_result.css(".bHexk a::attr(href)").get()
displayed_link = book_result.css(".tjvcx::text").get()
snippet = book_result.css(".cmlJmd span::text").get()
author = book_result.css(".fl span::text").get()
author_link = f'https://www.google.com/search{book_result.css(".N96wpd .fl::attr(href)").get()}'
date_published = book_result.css(".fl+ span::text").get()
preview_link = book_result.css(".R1n8Q a.yKioRe:nth-child(1)::attr(href)").get()
more_editions_link = book_result.css(".R1n8Q a.yKioRe:nth-child(2)::attr(href)").get()
books_results.append({
"title": title,
"link": link,
"displayed_link": displayed_link,
"snippet": snippet,
"author": author,
"author_link": author_link,
"date_published": date_published,
"preview_link": preview_link,
"more_editions_link": f"https://www.google.com{more_editions_link}" if more_editions_link is not None else None,
"thumbnail": bytes(bytes(book_thumbnail, "ascii").decode("unicode-escape"), "ascii").decode("unicode-escape")
})
| 39.869565 | 135 | 0.657579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 773 | 0.421483 |
8a8bbdd35a1d135f6e6a32befca7b762678940d4 | 327 | py | Python | Python/Higher-Or-Lower/hol/__init__.py | AustinTSchaffer/DailyProgrammer | b16d9babb298ac5e879c514f9c4646b99c6860a8 | [
"MIT"
] | 1 | 2020-07-28T17:07:35.000Z | 2020-07-28T17:07:35.000Z | Python/Higher-Or-Lower/hol/__init__.py | AustinTSchaffer/DailyProgrammer | b16d9babb298ac5e879c514f9c4646b99c6860a8 | [
"MIT"
] | 5 | 2021-04-06T18:25:29.000Z | 2021-04-10T15:13:28.000Z | Python/Higher-Or-Lower/hol/__init__.py | AustinTSchaffer/DailyProgrammer | b16d9babb298ac5e879c514f9c4646b99c6860a8 | [
"MIT"
] | null | null | null | r"""
Contains classes and methods that can be used when simulating the game
Higher-or-Lower and performing statistical analysis on different games.
"""
from hol import (
cards,
constants,
)
from hol._hol import (
generate_all_games,
should_pick_higher,
is_a_winning_game,
generate_win_statistics,
)
| 17.210526 | 71 | 0.737003 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.46789 |
8a995f399ed25fbe111acb3f8ad5749b538eef0a | 433 | py | Python | python/re_user.py | seckcoder/lang-learn | 1e0d6f412bbd7f89b1af00293fd907ddb3c1b571 | [
"Unlicense"
] | 1 | 2017-10-14T04:23:45.000Z | 2017-10-14T04:23:45.000Z | python/re_user.py | seckcoder/lang-learn | 1e0d6f412bbd7f89b1af00293fd907ddb3c1b571 | [
"Unlicense"
] | null | null | null | python/re_user.py | seckcoder/lang-learn | 1e0d6f412bbd7f89b1af00293fd907ddb3c1b571 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
#-*- coding=utf-8 -*-
#
# Copyright 2012 Jike Inc. All Rights Reserved.
# Author: liwei@jike.com
import re
from urlparse import urlparse
def parse1():
p = re.compile(r"/(?P<uid>\d+)/(?P<mid>\w+)")
o = urlparse("http://weibo.com/2827699110/yz62AlEjF")
m = p.search(o.path)
print m.group('uid')
print m.group('mid')
def parse2():
exc_type_str = "<type 'exceptions.IndexError'>"
parse1()
| 22.789474 | 57 | 0.637413 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 224 | 0.517321 |
8a9d019bec9e50c7c8d759ea60e658149d43ef2a | 2,561 | py | Python | audiomentations/core/utils.py | jeongyoonlee/audiomentations | 7f0112ae310989430e0ef7eb32c4116114810966 | [
"MIT"
] | 1 | 2021-02-03T19:12:04.000Z | 2021-02-03T19:12:04.000Z | audiomentations/core/utils.py | jeongyoonlee/audiomentations | 7f0112ae310989430e0ef7eb32c4116114810966 | [
"MIT"
] | null | null | null | audiomentations/core/utils.py | jeongyoonlee/audiomentations | 7f0112ae310989430e0ef7eb32c4116114810966 | [
"MIT"
] | 1 | 2021-07-08T07:33:10.000Z | 2021-07-08T07:33:10.000Z | import os
from pathlib import Path
import numpy as np
AUDIO_FILENAME_ENDINGS = (".aiff", ".flac", ".m4a", ".mp3", ".ogg", ".opus", ".wav")
def get_file_paths(
root_path, filename_endings=AUDIO_FILENAME_ENDINGS, traverse_subdirectories=True
):
"""Return a list of paths to all files with the given filename extensions in a directory.
Also traverses subdirectories by default.
"""
file_paths = []
for root, dirs, filenames in os.walk(root_path):
filenames = sorted(filenames)
for filename in filenames:
input_path = os.path.abspath(root)
file_path = os.path.join(input_path, filename)
if filename.lower().endswith(filename_endings):
file_paths.append(Path(file_path))
if not traverse_subdirectories:
# prevent descending into subfolders
break
return file_paths
def calculate_rms(samples):
"""Given a numpy array of audio samples, return its Root Mean Square (RMS)."""
return np.sqrt(np.mean(np.square(samples), axis=-1))
def calculate_desired_noise_rms(clean_rms, snr):
"""
Given the Root Mean Square (RMS) of a clean sound and a desired signal-to-noise ratio (SNR),
calculate the desired RMS of a noise sound to be mixed in.
Based on https://github.com/Sato-Kunihiko/audio-SNR/blob/8d2c933b6c0afe6f1203251f4877e7a1068a6130/create_mixed_audio_file.py#L20
:param clean_rms: Root Mean Square (RMS) - a value between 0.0 and 1.0
:param snr: Signal-to-Noise (SNR) Ratio in dB - typically somewhere between -20 and 60
:return:
"""
a = float(snr) / 20
noise_rms = clean_rms / (10 ** a)
return noise_rms
def convert_decibels_to_amplitude_ratio(decibels):
return 10 ** (decibels / 20)
def is_waveform_multichannel(samples):
"""
Return bool that answers the question: Is the given ndarray a multichannel waveform or not?
:param samples: numpy ndarray
:return:
"""
return len(samples.shape) > 1
def is_spectrogram_multichannel(spectrogram):
"""
Return bool that answers the question: Is the given ndarray a multichannel spectrogram?
:param samples: numpy ndarray
:return:
"""
return len(spectrogram.shape) > 2 and spectrogram.shape[-1] > 1
def convert_float_samples_to_int16(y):
"""Convert floating-point numpy array of audio samples to int16."""
if not issubclass(y.dtype.type, np.floating):
raise ValueError("input samples not floating-point")
return (y * np.iinfo(np.int16).max).astype(np.int16)
| 31.617284 | 132 | 0.689184 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,193 | 0.465834 |
8a9ed7740bcb98fbae13ca6bc7e08c9cb1a32fd1 | 4,384 | py | Python | semantic-segmentation/deeplabv3plus/dataset_utils.py | shikisawamura/nnabla-examples | baf4e4cc620dedbf4368683325c0fb868676850d | [
"Apache-2.0"
] | 1 | 2020-08-03T12:49:25.000Z | 2020-08-03T12:49:25.000Z | semantic-segmentation/deeplabv3plus/dataset_utils.py | takuseno/nnabla-examples | 070d25078ad3d5458744dbfd390cdd926e20e573 | [
"Apache-2.0"
] | null | null | null | semantic-segmentation/deeplabv3plus/dataset_utils.py | takuseno/nnabla-examples | 070d25078ad3d5458744dbfd390cdd926e20e573 | [
"Apache-2.0"
] | 1 | 2020-04-25T06:11:28.000Z | 2020-04-25T06:11:28.000Z | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
from scipy.misc import imread
from args import get_args
import matplotlib.pyplot as plt
def get_color():
# RGB format
return np.array([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], [120, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128], [224, 224, 192]])
def encode_label(label):
'''
Converting pixel values to corresponding class numbers. Assuming that the input label in 3-dim(h,w,c) and in BGR fromat read from cv2
'''
h, w, c = label.shape
new_label = np.zeros((h, w, 1), dtype=np.int32)
cls_to_clr_map = get_color()
for i in range(cls_to_clr_map.shape[0]):
#new_label[(label == cls_to_clr_map[i])[:,:,0]] = i
#new_label[np.argwhere((label.astype(np.int32) == cls_to_clr_map[i]).all(axis=2))]=i
print(np.where((label.astype(np.int32) == [120, 0, 128]).all(axis=2)))
if i == 21:
new_label[np.where(
(label.astype(np.int32) == cls_to_clr_map[i]).all(axis=2))] = 255
else:
new_label[np.where(
(label.astype(np.int32) == cls_to_clr_map[i]).all(axis=2))] = i
return new_label
# this method should generate train-image.txt and train-label.txt
def generate_path_files(data_dir, train_file, val_file):
ti = open('train_image.txt', 'w')
tl = open('train_label.txt', 'w')
vi = open('val_image.txt', 'w')
vl = open('val_label.txt', 'w')
rootdir = data_dir
train_text_file = open(train_file, "r")
lines = [line[:-1] for line in train_text_file]
for line in lines:
if os.path.exists(data_dir+'JPEGImages/'+line+'.jpg'):
ti.write(data_dir+'JPEGImages/'+line+'.jpg' + '\n')
assert (os.path.isfile(data_dir+'SegmentationClass/encoded/'+line +
'.npy')), "No matching label file for image : " + line + '.jpg'
tl.write(data_dir+'SegmentationClass/encoded/'+line + '.npy' + '\n')
val_text_file = open(val_file, "r")
lines = [line[:-1] for line in val_text_file]
for line in lines:
if os.path.exists(data_dir+'JPEGImages/'+line+'.jpg'):
vi.write(data_dir+'JPEGImages/'+line+'.jpg' + '\n')
assert (os.path.isfile(data_dir+'SegmentationClass/encoded/'+line +
'.npy')), "No matching label file for image : " + line + '.jpg'
vl.write(data_dir+'SegmentationClass/encoded/'+line + '.npy' + '\n')
ti.close()
tl.close()
vi.close()
vl.close()
def main():
'''
Arguments:
train-file = txt file containing randomly selected image filenames to be taken as training set.
val-file = txt file containing randomly selected image filenames to be taken as validation set.
data-dir = dataset directory
Usage: python dataset_utils.py --train-file="" --val-file="" --data_dir=""
'''
args = get_args()
data_dir = args.data_dir
if not os.path.exists(data_dir+'SegmentationClass/' + 'encoded/'):
os.makedirs(data_dir+'SegmentationClass/' + 'encoded/')
for filename in os.listdir(data_dir+'SegmentationClass/'):
if os.path.isdir(data_dir+'SegmentationClass/' + filename):
continue
label = imread(data_dir+'SegmentationClass/' +
filename).astype('float32')
label = encode_label(label)
np.save(data_dir+'SegmentationClass/' + 'encoded/' +
filename.split('.')[0] + '.npy', label)
generate_path_files(args.data_dir, args.train_file, args.val_file)
if __name__ == '__main__':
main()
| 38.79646 | 334 | 0.619297 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,866 | 0.425639 |
8aa2d7e8d015afdc94844a8b1cce4b350015d579 | 3,637 | py | Python | Python/Examples/Macros/SettingsAxesOptimization.py | archformco/RoboDK-API | b3d0cad6a83f505811e2be273453ccb4579324f1 | [
"MIT"
] | 161 | 2018-03-23T01:27:08.000Z | 2022-03-23T12:18:35.000Z | Python/Examples/Macros/SettingsAxesOptimization.py | OxideDevX/RoboDK-API | 50357c38b2fcf58cf82d9b7bf61021cb900fd358 | [
"MIT"
] | 26 | 2018-11-19T10:18:58.000Z | 2022-03-28T18:37:11.000Z | Python/Examples/Macros/SettingsAxesOptimization.py | OxideDevX/RoboDK-API | 50357c38b2fcf58cf82d9b7bf61021cb900fd358 | [
"MIT"
] | 85 | 2018-03-22T19:25:35.000Z | 2022-03-30T04:46:59.000Z | # This example shows how to read or modify the Axes Optimization settings using the RoboDK API and a JSON string.
# You can select "Axes optimization" in a robot machining menu or the robot parameters to view the axes optimization settings.
# It is possible to update the axes optimization settings attached to a robot or a robot machining project manually or using the API.
#
# More information about the RoboDK API here:
# https://robodk.com/doc/en/RoboDK-API.html
# For more information visit:
# https://robodk.com/doc/en/PythonAPI/robolink.html
from robolink import * # RoboDK API
# JSON tools
import json
# Start the RoboDK API
RDK = Robolink()
# Ask the user to select a robot arm (6 axis robot wich can have external axes)
robot = RDK.ItemUserPick("Select a robot arm",ITEM_TYPE_ROBOT_ARM)
# Default optimization settings test template
AxesOptimSettings = {
# Optimization parameters:
"Active": 1, # Use generic axes optimization: 0=Disabled or 1=Enabled
"Algorithm": 2, # Optimization algorithm to use: 1=Nelder Mead, 2=Samples, 3=Samples+Nelder Mead
"MaxIter": 650, # Max. number of iterations
"Tol": 0.0016, # Tolerance to stop iterations
# Absolute Reference joints (double):
"AbsJnt_1": 104.17,
"AbsJnt_2": 11.22,
"AbsJnt_3": 15.97,
"AbsJnt_4": -87.48,
"AbsJnt_5": -75.36,
"AbsJnt_6": 63.03,
"AbsJnt_7": 174.13,
"AbsJnt_8": 173.60,
"AbsJnt_9": 0,
# Using Absolute reference joints (0: No, 1: Yes):
"AbsOn_1": 1,
"AbsOn_2": 1,
"AbsOn_3": 1,
"AbsOn_4": 1,
"AbsOn_5": 1,
"AbsOn_6": 1,
"AbsOn_7": 1,
"AbsOn_8": 1,
"AbsOn_9": 1,
# Weight for absolute reference joints (double):
"AbsW_1": 100,
"AbsW_2": 100,
"AbsW_3": 100,
"AbsW_4": 89,
"AbsW_5": 90,
"AbsW_6": 92,
"AbsW_7": 92,
"AbsW_8": 96,
"AbsW_9": 50,
# Using for relative joint motion smoothing (0: No, 1: Yes):
"RelOn_1": 1,
"RelOn_2": 1,
"RelOn_3": 1,
"RelOn_4": 1,
"RelOn_5": 1,
"RelOn_6": 1,
"RelOn_7": 1,
"RelOn_8": 1,
"RelOn_9": 1,
# Weight for relative joint motion (double):
"RelW_1": 5,
"RelW_2": 47,
"RelW_3": 44,
"RelW_4": 43,
"RelW_5": 36,
"RelW_6": 47,
"RelW_7": 53,
"RelW_8": 59,
"RelW_9": 0,
}
# Update one value, for example, make it active:
ToUpdate = {}
ToUpdate["Active"] = 1
json_str = json.dumps(json.dumps(ToUpdate))
status = robot.setParam("OptimAxes", json_str)
print(status)
# Example to make a partial or full update
count = 1
while True:
for i in range(7):
# Partial update
ToUpdate = {}
ToUpdate["AbsJnt_" + str(i+1)] = (count+i)*4
ToUpdate["AbsOn_" + str(i+1)] = count % 2
ToUpdate["AbsW_" + str(i+1)] = (count+i)
json_str = json.dumps(json.dumps(ToUpdate))
status = robot.setParam("OptimAxes", json_str)
print(status)
# Full update
#OptimAxes_TEST["RefJoint_" + str(i+1)] = (count+i)*4
#OptimAxes_TEST["RefWeight_" + str(i+1)] = (count+i)
#OptimAxes_TEST["RefOn_" + str(i+1)] = count % 2
# Full update
#print(robot.setParam("OptimAxes", str(AxesOptimSettings)))
count = count + 1
# Read settings
json_data = robot.setParam("OptimAxes")
json_object = json.loads(json_data)
print(json.dumps(json_object, indent=4))
pause(0.2)
# Example to read the current axes optimization settings:
while True:
json_data = robot.setParam("OptimAxes")
json_object = json.loads(json_data)
print(json.dumps(json_object, indent=4))
pause(0.2)
| 28.414063 | 133 | 0.62854 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,118 | 0.582348 |
8aaa6ef648c6ab0a8f38e3df5ebf0a4f712b233a | 2,313 | py | Python | infrastructure-provisioning/src/general/api/install_libs.py | roolrd/incubator-datalab | 2045207ecd1b381193f1a1ec143cc968716ad989 | [
"Apache-2.0"
] | 66 | 2020-10-03T08:36:48.000Z | 2022-03-20T23:16:20.000Z | infrastructure-provisioning/src/general/api/install_libs.py | roolrd/incubator-datalab | 2045207ecd1b381193f1a1ec143cc968716ad989 | [
"Apache-2.0"
] | 48 | 2019-02-28T12:11:33.000Z | 2020-09-15T08:27:08.000Z | infrastructure-provisioning/src/general/api/install_libs.py | roolrd/incubator-datalab | 2045207ecd1b381193f1a1ec143cc968716ad989 | [
"Apache-2.0"
] | 44 | 2019-01-14T10:31:55.000Z | 2020-09-22T17:53:33.000Z | #!/usr/bin/python3
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import json
import os
import sys
import subprocess
if __name__ == "__main__":
success = True
try:
subprocess.run('cd /root; fab install-libs', shell=True, check=True)
except:
success = False
reply = dict()
reply['request_id'] = os.environ['request_id']
if success:
reply['status'] = 'ok'
else:
reply['status'] = 'err'
reply['response'] = dict()
try:
with open("/root/result.json") as f:
reply['response']['result'] = json.loads(f.read())
except:
reply['response']['result'] = {"error": "Failed to open result.json"}
reply['response']['log'] = "/var/log/datalab/{0}/{0}_{1}_{2}.log".format(os.environ['conf_resource'],
os.environ['project_name'],
os.environ['request_id'])
with open("/response/{}_{}_{}.json".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id']), 'w') as response_file:
response_file.write(json.dumps(reply))
try:
subprocess.run('chmod 666 /response/*', shell=True, check=True)
except:
success = False
if not success:
sys.exit(1) | 35.584615 | 105 | 0.565932 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,321 | 0.57112 |
8aab4acf40735c2dc3547887c3be02d0b2808eff | 1,584 | py | Python | model_zoo/official/nlp/bert_thor/src/evaluation_config.py | GuoSuiming/mindspore | 48afc4cfa53d970c0b20eedfb46e039db2a133d5 | [
"Apache-2.0"
] | 55 | 2020-12-17T10:26:06.000Z | 2022-03-28T07:18:26.000Z | model_zoo/official/nlp/bert_thor/src/evaluation_config.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 1 | 2020-12-29T06:46:38.000Z | 2020-12-29T06:46:38.000Z | model_zoo/official/nlp/bert_thor/src/evaluation_config.py | forwhat461/mindspore | 59a277756eb4faad9ac9afcc7fd526e8277d4994 | [
"Apache-2.0"
] | 14 | 2021-01-29T02:39:47.000Z | 2022-03-23T05:00:26.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
config settings, will be used in finetune.py
"""
from easydict import EasyDict as edict
import mindspore.common.dtype as mstype
from .bert_model import BertConfig
cfg = edict({
'task': 'NER',
'num_labels': 41,
'data_file': '',
'schema_file': None,
'finetune_ckpt': '',
'use_crf': False,
'clue_benchmark': False,
})
bert_net_cfg = BertConfig(
batch_size=8 if not cfg.clue_benchmark else 1,
seq_length=512,
vocab_size=30522,
hidden_size=1024,
num_hidden_layers=24,
num_attention_heads=16,
intermediate_size=4096,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
use_relative_positions=False,
input_mask_from_dataset=True,
token_type_ids_from_dataset=True,
dtype=mstype.float32,
compute_type=mstype.float16,
)
| 28.8 | 78 | 0.693813 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 802 | 0.506313 |
8aad8de20813d57dc973493fe2b63ad495089392 | 549 | py | Python | setup.py | swfrench/nginx-access-tailer | 5e060396ca749935c622e8e9c50b659b39e3675b | [
"BSD-3-Clause"
] | null | null | null | setup.py | swfrench/nginx-access-tailer | 5e060396ca749935c622e8e9c50b659b39e3675b | [
"BSD-3-Clause"
] | null | null | null | setup.py | swfrench/nginx-access-tailer | 5e060396ca749935c622e8e9c50b659b39e3675b | [
"BSD-3-Clause"
] | null | null | null | """TODO."""
from setuptools import setup
setup(
name='nginx-access-tailer',
version='0.1',
author='swfrench',
url='https://github.com/swfrench/nginx-tailer',
packages=['nginx_access_tailer',],
license='BSD three-clause license',
entry_points={
'console_scripts': ['nginx-access-tailer = nginx_access_tailer.__main__:main'],
},
install_requires=[
'python-gflags >= 3.1.1',
'google-cloud-monitoring >= 0.25.0',
],
test_suite='nose.collector',
tests_require=['nose', 'mock'],
)
| 24.954545 | 87 | 0.626594 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 297 | 0.540984 |
8aaee662db93c29bfc4e01c664b5f8c132a76382 | 1,331 | py | Python | setup.py | richardARPANET/persistent-celery-beat-scheduler | d2cbdd12394eec282ccb97ac5ff894353c2e4ffd | [
"Apache-2.0"
] | 4 | 2018-04-04T13:03:08.000Z | 2018-04-16T18:50:45.000Z | setup.py | richardARPANET/persistent-celery-beat-scheduler | d2cbdd12394eec282ccb97ac5ff894353c2e4ffd | [
"Apache-2.0"
] | null | null | null | setup.py | richardARPANET/persistent-celery-beat-scheduler | d2cbdd12394eec282ccb97ac5ff894353c2e4ffd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*
import os
from setuptools import find_packages, setup
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
with open('requirements.txt') as f:
install_requires = f.read().splitlines()
setup(
name='persistent-celery-beat-scheduler',
version='0.1.1.dev0',
packages=find_packages('src', exclude=('tests',)),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
description=(
'Celery Beat Scheduler that stores the scheduler data in Redis.'
),
author='Richard O\'Dwyer',
author_email='richard@richard.do',
license='Apache 2',
long_description='https://github.com/richardasaurus/persistent-celery-beat-scheduler',
install_requires=install_requires,
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
],
)
| 31.690476 | 90 | 0.643877 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 710 | 0.533434 |
8ab2d6d56bce4e65f9e2921fdc0ec8fdc7ecb7fb | 855 | py | Python | venv/Lib/site-packages/patsy/test_regressions.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 710 | 2015-01-07T20:08:59.000Z | 2022-03-08T14:30:13.000Z | venv/Lib/site-packages/patsy/test_regressions.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 142 | 2015-01-07T02:20:27.000Z | 2021-11-15T04:23:02.000Z | venv/Lib/site-packages/patsy/test_regressions.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 101 | 2015-01-15T16:35:12.000Z | 2022-02-19T06:50:02.000Z | # This file is part of Patsy
# Copyright (C) 2013 Nathaniel Smith <njs@pobox.com>
# See file LICENSE.txt for license information.
# Regression tests for fixed bugs (when not otherwise better covered somewhere
# else)
from patsy import (EvalEnvironment, dmatrix, build_design_matrices,
PatsyError, Origin)
def test_issue_11():
# Give a sensible error message for level mismatches
# (At some points we've failed to put an origin= on these errors)
env = EvalEnvironment.capture()
data = {"X" : [0,1,2,3], "Y" : [1,2,3,4]}
formula = "C(X) + Y"
new_data = {"X" : [0,0,1,2,3,3,4], "Y" : [1,2,3,4,5,6,7]}
info = dmatrix(formula, data)
try:
build_design_matrices([info.design_info], new_data)
except PatsyError as e:
assert e.origin == Origin(formula, 0, 4)
else:
assert False
| 34.2 | 78 | 0.645614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 351 | 0.410526 |
8ab404c67e6f07e674ae9c5b07f6e6e0e0f914ac | 7,764 | py | Python | skimage/io/_plugins/pil_plugin.py | smheidrich/scikit-image | e9cf8b850c4c2800cc221be6f1dfff6a2a32a4eb | [
"BSD-3-Clause"
] | 3 | 2019-02-28T16:05:36.000Z | 2020-04-03T17:29:07.000Z | Lib/site-packages/skimage/io/_plugins/pil_plugin.py | caiyongji/Anaconda-py36.5-tensorflow-built-env | f4eb40b5ca3f49dfc929ff3ad2b4bb877e9663e2 | [
"PSF-2.0"
] | 26 | 2020-03-24T18:07:06.000Z | 2022-03-12T00:12:27.000Z | Lib/site-packages/skimage/io/_plugins/pil_plugin.py | caiyongji/Anaconda-py36.5-tensorflow-built-env | f4eb40b5ca3f49dfc929ff3ad2b4bb877e9663e2 | [
"PSF-2.0"
] | 3 | 2019-12-31T23:21:40.000Z | 2020-04-03T17:29:08.000Z | __all__ = ['imread', 'imsave']
import numpy as np
from PIL import Image
from ...util import img_as_ubyte, img_as_uint
def imread(fname, dtype=None, img_num=None, **kwargs):
"""Load an image from file.
Parameters
----------
fname : str or file
File name or file-like-object.
dtype : numpy dtype object or string specifier
Specifies data type of array elements.
img_num : int, optional
Specifies which image to read in a file with multiple images
(zero-indexed).
kwargs : keyword pairs, optional
Addition keyword arguments to pass through.
Notes
-----
Files are read using the Python Imaging Library.
See PIL docs [1]_ for a list of supported formats.
References
----------
.. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
if isinstance(fname, str):
with open(fname, 'rb') as f:
im = Image.open(f)
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
else:
im = Image.open(fname)
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
def pil_to_ndarray(image, dtype=None, img_num=None):
"""Import a PIL Image object to an ndarray, in memory.
Parameters
----------
Refer to ``imread``.
"""
try:
# this will raise an IOError if the file is not readable
image.getdata()[0]
except IOError as e:
site = "http://pillow.readthedocs.org/en/latest/installation.html#external-libraries"
pillow_error_message = str(e)
error_message = ('Could not load "%s" \n'
'Reason: "%s"\n'
'Please see documentation at: %s'
% (image.filename, pillow_error_message, site))
raise ValueError(error_message)
frames = []
grayscale = None
i = 0
while 1:
try:
image.seek(i)
except EOFError:
break
frame = image
if img_num is not None and img_num != i:
image.getdata()[0]
i += 1
continue
if image.format == 'PNG' and image.mode == 'I' and dtype is None:
dtype = 'uint16'
if image.mode == 'P':
if grayscale is None:
grayscale = _palette_is_grayscale(image)
if grayscale:
frame = image.convert('L')
else:
if image.format == 'PNG' and 'transparency' in image.info:
frame = image.convert('RGBA')
else:
frame = image.convert('RGB')
elif image.mode == '1':
frame = image.convert('L')
elif 'A' in image.mode:
frame = image.convert('RGBA')
elif image.mode == 'CMYK':
frame = image.convert('RGB')
if image.mode.startswith('I;16'):
shape = image.size
dtype = '>u2' if image.mode.endswith('B') else '<u2'
if 'S' in image.mode:
dtype = dtype.replace('u', 'i')
frame = np.fromstring(frame.tobytes(), dtype)
frame.shape = shape[::-1]
else:
frame = np.array(frame, dtype=dtype)
frames.append(frame)
i += 1
if img_num is not None:
break
if hasattr(image, 'fp') and image.fp:
image.fp.close()
if img_num is None and len(frames) > 1:
return np.array(frames)
elif frames:
return frames[0]
elif img_num:
raise IndexError('Could not find image #%s' % img_num)
def _palette_is_grayscale(pil_image):
"""Return True if PIL image in palette mode is grayscale.
Parameters
----------
pil_image : PIL image
PIL Image that is in Palette mode.
Returns
-------
is_grayscale : bool
True if all colors in image palette are gray.
"""
assert pil_image.mode == 'P'
# get palette as an array with R, G, B columns
palette = np.asarray(pil_image.getpalette()).reshape((256, 3))
# Not all palette colors are used; unused colors have junk values.
start, stop = pil_image.getextrema()
valid_palette = palette[start:stop + 1]
# Image is grayscale if channel differences (R - G and G - B)
# are all zero.
return np.allclose(np.diff(valid_palette), 0)
def ndarray_to_pil(arr, format_str=None):
"""Export an ndarray to a PIL object.
Parameters
----------
Refer to ``imsave``.
"""
if arr.ndim == 3:
arr = img_as_ubyte(arr)
mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]]
elif format_str in ['png', 'PNG']:
mode = 'I;16'
mode_base = 'I'
if arr.dtype.kind == 'f':
arr = img_as_uint(arr)
elif arr.max() < 256 and arr.min() >= 0:
arr = arr.astype(np.uint8)
mode = mode_base = 'L'
else:
arr = img_as_uint(arr)
else:
arr = img_as_ubyte(arr)
mode = 'L'
mode_base = 'L'
try:
array_buffer = arr.tobytes()
except AttributeError:
array_buffer = arr.tostring() # Numpy < 1.9
if arr.ndim == 2:
im = Image.new(mode_base, arr.T.shape)
try:
im.frombytes(array_buffer, 'raw', mode)
except AttributeError:
im.fromstring(array_buffer, 'raw', mode) # PIL 1.1.7
else:
image_shape = (arr.shape[1], arr.shape[0])
try:
im = Image.frombytes(mode, image_shape, array_buffer)
except AttributeError:
im = Image.fromstring(mode, image_shape, array_buffer) # PIL 1.1.7
return im
def imsave(fname, arr, format_str=None, **kwargs):
"""Save an image to disk.
Parameters
----------
fname : str or file-like object
Name of destination file.
arr : ndarray of uint8 or float
Array (image) to save. Arrays of data-type uint8 should have
values in [0, 255], whereas floating-point arrays must be
in [0, 1].
format_str: str
Format to save as, this is defaulted to PNG if using a file-like
object; this will be derived from the extension if fname is a string
kwargs: dict
Keyword arguments to the Pillow save function (or tifffile save
function, for Tiff files). These are format dependent. For example,
Pillow's JPEG save function supports an integer ``quality`` argument
with values in [1, 95], while TIFFFile supports a ``compress``
integer argument with values in [0, 9].
Notes
-----
Use the Python Imaging Library.
See PIL docs [1]_ for a list of other supported formats.
All images besides single channel PNGs are converted using `img_as_uint8`.
Single Channel PNGs have the following behavior:
- Integer values in [0, 255] and Boolean types -> img_as_uint8
- Floating point and other integers -> img_as_uint16
References
----------
.. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
# default to PNG if file-like object
if not isinstance(fname, str) and format_str is None:
format_str = "PNG"
# Check for png in filename
if (isinstance(fname, str)
and fname.lower().endswith(".png")):
format_str = "PNG"
arr = np.asanyarray(arr)
if arr.dtype.kind == 'b':
arr = arr.astype(np.uint8)
if arr.ndim not in (2, 3):
raise ValueError("Invalid shape for image array: %s" % (arr.shape, ))
if arr.ndim == 3:
if arr.shape[2] not in (3, 4):
raise ValueError("Invalid number of channels in image array.")
img = ndarray_to_pil(arr, format_str=format_str)
img.save(fname, format=format_str, **kwargs)
| 29.861538 | 93 | 0.579341 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,314 | 0.426842 |
8ac004a4f19bb41d9cfa8a39529011d30c5a08dc | 5,455 | py | Python | main.py | jonodrew/matchex | 531e7cd1c328cb9dc34b601a06648bd2c3e709e6 | [
"MIT"
] | null | null | null | main.py | jonodrew/matchex | 531e7cd1c328cb9dc34b601a06648bd2c3e709e6 | [
"MIT"
] | null | null | null | main.py | jonodrew/matchex | 531e7cd1c328cb9dc34b601a06648bd2c3e709e6 | [
"MIT"
] | null | null | null | from __future__ import division
from timeit import default_timer as timer
import csv
import numpy as np
import itertools
from munkres import Munkres, print_matrix, make_cost_matrix
import sys
from classes import *
from functions import *
from math import sqrt
import Tkinter as tk
import tkFileDialog as filedialog
root = tk.Tk()
root.withdraw()
p_file = filedialog.askopenfilename(title='Please select the posting file')
c_file = filedialog.askopenfilename(title='Please select the candidate file')
"""for use with /users/java_jonathan/postings_lge.csv and
/Users/java_jonathan/candidates_lge.csv"""
# p_file = raw_input("Please enter the path for the postings file: ")
# p_file = p_file.strip()
# c_file = raw_input("Please enter the path for the candidate file: ")
# c_file = c_file.strip()
start = timer()
with open(p_file,'r') as f:
#with open('/Users/Jonathan/Google Drive/CPD/Python/postings.csv','r') as f:
reader = csv.reader(f)
postingsAll = list(reader)
with open(c_file,'r') as f:
reader = csv.reader(f)
candidatesAll = list(reader)
"""create empty lists to fill with lists of lists output by iterating function
below"""
names = []
totalMatrix = []
for list in candidatesAll:
candidate = Candidate(*list)
names.append(candidate.name)
n = 0
for list in postingsAll:
posting = Posting(*list)
totalMatrix.append(matchDept(posting,candidate) + matchAnchor(posting,candidate)
+matchLocation(posting,candidate) + matchCompetency(posting,candidate) +
matchSkill(posting,candidate)+matchCohort(posting,candidate))
n += 1
l = len(names)
names.extend([0] * (n-l))
totalMatrix.extend([0] * (n**2 - len(totalMatrix)))
totalMatrix = np.asarray(totalMatrix)
totalMatrix = np.reshape(totalMatrix,(n,-1))
#at this point the matrix is structured as candidates down and jobs across
totalMatrix = np.transpose(totalMatrix)
#now it's switched!
totalMatrix = np.subtract(np.amax(totalMatrix),totalMatrix)
totalMatrix = np.array(totalMatrix)
minSuitability = 18
check = []
result = []
m = Munkres()
indexes = m.compute(totalMatrix)
#print_matrix(totalMatrix, msg='Lowest cost through this matrix:')
total = 0.0
unhappy_candidates = 0
medium_candidates = 0
tenpc_candidates = 0
qs_candidates = 0
vs_candidates = 0
f = open('output.txt', 'w')
for row, column in indexes:
if column < l:
value = totalMatrix[row][column]
if value > minSuitability*0.9:
tenpc_candidates += 1
elif value > minSuitability*0.75:
medium_candidates += 1
elif value > minSuitability/2:
unhappy_candidates += 1
elif value > minSuitability*0.25:
qs_candidates += 1
elif value > minSuitability*0.1:
vs_candidates += 1
total += value
check.append(column+1)
result.append((row,column))
f.write('For candidate %s: \nOptimal position: %d (score %s)\n'
% (names[column], column+1, value))
else:
pass
globalSatisfaction = 100*(1-(total/(l*minSuitability)))
print('Global satisfaction: %.2f%%' % globalSatisfaction)
print('Candidates who are more than 90%% suitable: %d' % vs_candidates)
print('Candidates who are more than 75%% suitable: %d' % qs_candidates)
print('Candidates who are more than 50%% suitable: %d' % (l-unhappy_candidates))
print('Candidates who are more than 75%% unsuitable: %d' % medium_candidates)
print('Candidates who are more than 90%% unsuitable: %d' % tenpc_candidates)
#output from excel:
correct = [1,3,5,9,10,2,4,8,6,7]
#this function tests output above against Excel:
#test(correct,check)
topMatrix = topFive(names,totalMatrix)
#print(topMatrix)
np.savetxt('/Users/java_jonathan/test.csv',topMatrix, fmt='%s', delimiter=',',
newline='\n', header='', footer='', comments='# ')
np.savetxt('/Users/java_jonathan/test2.csv',totalMatrix, fmt='%s', delimiter=',',
newline='\n', header='', footer='', comments='# ')
end = timer()
print(end-start)
"""
#posting = [Posting(*postingsAll)]
#print(posting[0].anchor)
#print(posting)
#print(candidatesAll)
#print(postingsAll)
#print(postingsAll[0].name)
#print(preferences)
#print(postings)
#split up files into relative blocks
postCode = [lists[0] for lists in postings]
postDept = [lists[1] for lists in postings]
postAnchor = [lists[2] for lists in postings]
postSkills = [lists[3:5] for lists in postings]
postLocation = [lists[5] for lists in postings]
postCompetencies = [lists[7:10] for lists in postings]
postSecurity = [lists[10] for lists in postings]
#with open('/Users/Jonathan/Google Drive/CPD/Python/candidates.csv','r') as f:
#gives first column ie candidate a
a=totalMatrix[:,[0]]
#b = totalMatrix[:,[0]]
#print(a)
#converts 1D matrix to list for ease
a = np.array(a).tolist()
#print(a)
#creates list called output containing rank of score
output = [0] * len(a)
for i, x in enumerate(sorted(range(len(a)), key=lambda y: a[y])):
output[x] = i
print(output)
#creates tuples of rank, job and appends to list
jobRank = []
# for rank, b in zip(output, postCode):
# jobScore = (rank,b)
# list(jobScore)
# jobRank.append(jobScore)
# print(jobRank)
output = [0] * len(a)
for i, x in enumerate(sorted(range(len(a)), key=lambda y: a[y])):
output[x] = i
print(output)
# #print(a)
# jobRank = sorted(jobRank, reverse=False)
# print(jobRank)
# print('For candidate a, the best position is %s') % (jobRank[0][1])
# print(candidate[0].skills)
"""
| 30.646067 | 88 | 0.698075 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,710 | 0.496792 |
8ac00891cba917dcea99bd7701a43788bba03334 | 3,142 | py | Python | pip_info/setup.py | 95616ARG/SyReNN | 19abf589e84ee67317134573054c648bb25c244d | [
"MIT"
] | 36 | 2019-08-19T06:17:52.000Z | 2022-03-11T09:02:40.000Z | pip_info/setup.py | 95616ARG/SyReNN | 19abf589e84ee67317134573054c648bb25c244d | [
"MIT"
] | 8 | 2020-04-09T20:59:04.000Z | 2022-03-11T23:56:50.000Z | pip_info/setup.py | 95616ARG/SyReNN | 19abf589e84ee67317134573054c648bb25c244d | [
"MIT"
] | 4 | 2021-01-13T11:17:55.000Z | 2021-06-28T19:36:04.000Z | """Setup script for PySyReNN.
Adapted from:
https://hynek.me/articles/sharing-your-labor-of-love-pypi-quick-and-dirty/
"""
import codecs
import os
import re
from setuptools import setup, find_packages
###################################################################
NAME = "pysyrenn"
PACKAGES = [
"syrenn_proto",
"pysyrenn",
"pysyrenn.frontend",
"pysyrenn.helpers",
]
META_PATH = "__metadata__.py"
KEYWORDS = ["class", "attribute", "boilerplate"]
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
]
INSTALL_REQUIRES = ["torch"]
with open("requirements.txt") as requirements:
reading = False
for line in requirements.readlines():
if line.startswith("# PYSYRENN"):
reading = True
elif line.startswith("# END"):
reading = False
elif line.startswith("#"):
pass
elif reading:
INSTALL_REQUIRES.append(line.strip().split("==")[0])
###################################################################
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
META_FILE = read(META_PATH)
def find_meta(meta):
"""Extract __*meta*__ from META_FILE.
"""
meta_match = re.search(
r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta),
META_FILE, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
if __name__ == "__main__":
setup(
name=NAME,
description=find_meta("description"),
license=find_meta("license"),
url=find_meta("uri"),
version=find_meta("version"),
author=find_meta("author"),
author_email=find_meta("email"),
maintainer=find_meta("author"),
maintainer_email=find_meta("email"),
keywords=KEYWORDS,
long_description=read("README.md"),
long_description_content_type="text/markdown",
packages=PACKAGES,
package_dir={"": "."},
package_data={"": ["pysyrenn/**/*.py"]},
zip_safe=False,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
)
| 30.803922 | 77 | 0.595799 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,515 | 0.482177 |
8ac046daf66291ca73b420ce81a183abc787e157 | 51 | py | Python | neptune/generated/swagger_client/path_constants.py | jiji-online/neptune-cli | 50cf680a80d141497f9331ab7cdaee49fcb90b0c | [
"Apache-2.0"
] | null | null | null | neptune/generated/swagger_client/path_constants.py | jiji-online/neptune-cli | 50cf680a80d141497f9331ab7cdaee49fcb90b0c | [
"Apache-2.0"
] | null | null | null | neptune/generated/swagger_client/path_constants.py | jiji-online/neptune-cli | 50cf680a80d141497f9331ab7cdaee49fcb90b0c | [
"Apache-2.0"
] | null | null | null | REST_PATH = u""
WS_PATH = u"/api/notifications/v1"
| 17 | 34 | 0.705882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.529412 |
8ad1153bc4951b73c09bcd9a5a044f2aeefb38fb | 13,832 | py | Python | gym/gym/benchmarks/__init__.py | youngwoon/DnC-RL-Tensorflow | 02dc2750fe301a01e3bd68b1e56fc7fd754c2f3f | [
"MIT"
] | 9 | 2019-02-01T22:45:57.000Z | 2022-01-08T16:13:24.000Z | gym/gym/benchmarks/__init__.py | youngwoon/DnC-RL-Tensorflow | 02dc2750fe301a01e3bd68b1e56fc7fd754c2f3f | [
"MIT"
] | null | null | null | gym/gym/benchmarks/__init__.py | youngwoon/DnC-RL-Tensorflow | 02dc2750fe301a01e3bd68b1e56fc7fd754c2f3f | [
"MIT"
] | 1 | 2020-04-07T20:09:48.000Z | 2020-04-07T20:09:48.000Z | # EXPERIMENTAL: all may be removed soon
from gym.benchmarks import scoring
from gym.benchmarks.registration import benchmark_spec, register_benchmark, registry, register_benchmark_view # imports used elsewhere
register_benchmark(
id='Atari200M',
scorer=scoring.TotalReward(),
name='Atari200M',
view_group="Atari",
description='7 Atari games, with pixel observations',
tasks=[
{
'env_id': 'BeamRiderNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 363.9,
'reward_ceiling': 60000.0,
},
{
'env_id': 'BreakoutNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 1.7,
'reward_ceiling': 800.0,
},
{
'env_id': 'EnduroNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 0.0,
'reward_ceiling': 5000.0,
},
{
'env_id': 'PongNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': -20.7,
'reward_ceiling': 21.0,
},
{
'env_id': 'QbertNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 163.9,
'reward_ceiling': 40000.0,
},
{
'env_id': 'SeaquestNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 68.4,
'reward_ceiling': 100000.0,
},
{
'env_id': 'SpaceInvadersNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 148.0,
'reward_ceiling': 30000.0,
},
])
register_benchmark(
id='Atari40M',
scorer=scoring.TotalReward(),
name='Atari40M',
view_group="Atari",
description='7 Atari games, with pixel observations',
tasks=[
{
'env_id': 'BeamRiderNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 363.9,
'reward_ceiling': 60000.0,
},
{
'env_id': 'BreakoutNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 1.7,
'reward_ceiling': 800.0,
},
{
'env_id': 'EnduroNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 0.0,
'reward_ceiling': 5000.0,
},
{
'env_id': 'PongNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': -20.7,
'reward_ceiling': 21.0,
},
{
'env_id': 'QbertNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 163.9,
'reward_ceiling': 40000.0,
},
{
'env_id': 'SeaquestNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 68.4,
'reward_ceiling': 100000.0,
},
{
'env_id': 'SpaceInvadersNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 148.0,
'reward_ceiling': 30000.0,
}
])
register_benchmark(
id='AtariExploration40M',
scorer=scoring.TotalReward(),
name='AtariExploration40M',
view_group="Atari",
description='7 Atari games, with pixel observations',
tasks=[
{
'env_id': 'FreewayNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 0.1,
'reward_ceiling': 31.0,
},
{
'env_id': 'GravitarNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 245.5,
'reward_ceiling': 1000.0,
},
{
'env_id': 'MontezumaRevengeNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 25.0,
'reward_ceiling': 10000.0,
},
{
'env_id': 'PitfallNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': -348.8,
'reward_ceiling': 1000.0,
},
{
'env_id': 'PrivateEyeNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 662.8,
'reward_ceiling': 100.0,
},
{
'env_id': 'SolarisNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 2047.2,
'reward_ceiling': 5000.0,
},
{
'env_id': 'VentureNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 18.0,
'reward_ceiling': 100.0,
}
])
register_benchmark(
id='ClassicControl2-v0',
name='ClassicControl2',
view_group="Control",
description='Simple classic control benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'CartPole-v0',
'trials': 1,
'max_timesteps': 2000,
},
{'env_id': 'Pendulum-v0',
'trials': 1,
'max_timesteps': 1000,
},
])
register_benchmark(
id='ClassicControl-v0',
name='ClassicControl',
view_group="Control",
description='Simple classic control benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'CartPole-v1',
'trials': 3,
'max_timesteps': 100000,
'reward_floor': 0.0,
'reward_ceiling': 500.0,
},
{'env_id': 'Acrobot-v1',
'trials': 3,
'max_timesteps': 100000,
'reward_floor': -500.0,
'reward_ceiling': 0.0,
},
{'env_id': 'MountainCar-v0',
'trials': 3,
'max_timesteps': 100000,
'reward_floor': -200.0,
'reward_ceiling': -100.0,
},
{'env_id': 'Pendulum-v0',
'trials': 3,
'max_timesteps': 200000,
'reward_floor': -1400.0,
'reward_ceiling': 0.0,
},
])
### Autogenerated by tinkerbell.benchmark.convert_benchmark.py
register_benchmark(
id='Mujoco10M-v0',
name='Mujoco10M',
view_group="Control",
description='Mujoco benchmark with 10M steps',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'Ant-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'Hopper-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'Humanoid-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'HumanoidStandup-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'Walker2d-v1',
'trials': 1,
'max_timesteps': 1000000,
}
])
register_benchmark(
id='Mujoco1M-v0',
name='Mujoco1M',
view_group="Control",
description='Mujoco benchmark with 1M steps',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'HalfCheetah-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': -280.0,
'reward_ceiling': 4000.0,
},
{'env_id': 'Hopper-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 16.0,
'reward_ceiling': 4000.0,
},
{'env_id': 'InvertedDoublePendulum-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 53.0,
'reward_ceiling': 10000.0,
},
{'env_id': 'InvertedPendulum-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 5.6,
'reward_ceiling': 1000.0,
},
{'env_id': 'Reacher-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': -43.0,
'reward_ceiling': -0.5,
},
{'env_id': 'Swimmer-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 0.23,
'reward_ceiling': 500.0,
},
{'env_id': 'Walker2d-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 1.6,
'reward_ceiling': 5500.0,
}
])
register_benchmark(
id='MinecraftEasy-v0',
name='MinecraftEasy',
view_group="Minecraft",
description='Minecraft easy benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftBasic-v0',
'trials': 2,
'max_timesteps': 600000,
'reward_floor': -2200.0,
'reward_ceiling': 1000.0,
},
{'env_id': 'MinecraftDefaultFlat1-v0',
'trials': 2,
'max_timesteps': 2000000,
'reward_floor': -500.0,
'reward_ceiling': 0.0,
},
{'env_id': 'MinecraftTrickyArena1-v0',
'trials': 2,
'max_timesteps': 300000,
'reward_floor': -1000.0,
'reward_ceiling': 2800.0,
},
{'env_id': 'MinecraftEating1-v0',
'trials': 2,
'max_timesteps': 300000,
'reward_floor': -300.0,
'reward_ceiling': 300.0,
},
])
register_benchmark(
id='MinecraftMedium-v0',
name='MinecraftMedium',
view_group="Minecraft",
description='Minecraft medium benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftCliffWalking1-v0',
'trials': 2,
'max_timesteps': 400000,
'reward_floor': -100.0,
'reward_ceiling': 100.0,
},
{'env_id': 'MinecraftVertical-v0',
'trials': 2,
'max_timesteps': 900000,
'reward_floor': -1000.0,
'reward_ceiling': 8040.0,
},
{'env_id': 'MinecraftMaze1-v0',
'trials': 2,
'max_timesteps': 600000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
{'env_id': 'MinecraftMaze2-v0',
'trials': 2,
'max_timesteps': 2000000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
])
register_benchmark(
id='MinecraftHard-v0',
name='MinecraftHard',
view_group="Minecraft",
description='Minecraft hard benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftObstacles-v0',
'trials': 1,
'max_timesteps': 900000,
'reward_floor': -1000.0,
'reward_ceiling': 2080.0,
},
{'env_id': 'MinecraftSimpleRoomMaze-v0',
'trials': 1,
'max_timesteps': 900000,
'reward_floor': -1000.0,
'reward_ceiling': 4160.0,
},
{'env_id': 'MinecraftAttic-v0',
'trials': 1,
'max_timesteps': 600000,
'reward_floor': -1000.0,
'reward_ceiling': 1040.0,
},
{'env_id': 'MinecraftComplexityUsage-v0',
'trials': 1,
'max_timesteps': 600000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
])
register_benchmark(
id='MinecraftVeryHard-v0',
name='MinecraftVeryHard',
view_group="Minecraft",
description='Minecraft very hard benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftMedium-v0',
'trials': 2,
'max_timesteps': 1800000,
'reward_floor': -10000.0,
'reward_ceiling': 16280.0,
},
{'env_id': 'MinecraftHard-v0',
'trials': 2,
'max_timesteps': 2400000,
'reward_floor': -10000.0,
'reward_ceiling': 32640.0,
},
])
register_benchmark(
id='MinecraftImpossible-v0',
name='MinecraftImpossible',
view_group="Minecraft",
description='Minecraft impossible benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftDefaultWorld1-v0',
'trials': 2,
'max_timesteps': 6000000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
])
bandit_tasks = []
for n_arms in [5, 10, 50]:
for n_episodes in [10, 100, 500]:
bandit_tasks.append({
'env_id': 'BernoulliBandit-{k}.arms-{n}.episodes-v0'.format(k=n_arms, n=n_episodes),
'trials': 1,
'max_timesteps': 10 ** 9,
'reward_floor': 0,
'reward_ceiling': n_episodes,
})
register_benchmark(
id='BernoulliBandit-v0',
name='BernoulliBandit',
description='Multi-armed Bernoulli bandits',
scorer=scoring.ClipTo01ThenAverage(num_episodes=1000),
tasks=bandit_tasks
)
tabular_mdp_tasks = []
for n_states in [10]:
for n_actions in [5]:
for episode_length in [10]:
for n_episodes in [10, 25, 50, 75, 100]:
tabular_mdp_tasks.append({
'env_id': 'RandomTabularMDP-{s}.states-{a}.actions-{t}.timesteps-{n}.episodes-v0'.format(
s=n_states, a=n_actions, t=episode_length, n=n_episodes,
),
'trials': 1,
'max_timesteps': 10 ** 9,
'reward_floor': 0,
'reward_ceiling': episode_length * n_episodes * 2,
})
register_benchmark(
id='RandomTabularMDP-v0',
name='RandomTabularMDP',
description='Random tabular MDPs',
scorer=scoring.ClipTo01ThenAverage(num_episodes=1000),
tasks=tabular_mdp_tasks
)
| 28.286299 | 135 | 0.510049 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,577 | 0.403195 |
8ad19946c7489c1b3a99e589e195e1b73244786f | 9,538 | py | Python | hypnettorch/data/timeseries/preprocess_audioset.py | pennfranc/hypnettorch | 69d4c455028289ebe3d040af0955d909a9fef3ae | [
"Apache-2.0"
] | 31 | 2021-10-20T19:38:41.000Z | 2022-03-28T08:23:32.000Z | hypnettorch/data/timeseries/preprocess_audioset.py | pennfranc/hypnettorch | 69d4c455028289ebe3d040af0955d909a9fef3ae | [
"Apache-2.0"
] | 2 | 2022-02-14T08:25:43.000Z | 2022-03-26T18:10:52.000Z | hypnettorch/data/timeseries/preprocess_audioset.py | pennfranc/hypnettorch | 69d4c455028289ebe3d040af0955d909a9fef3ae | [
"Apache-2.0"
] | 5 | 2021-11-04T10:10:29.000Z | 2022-03-21T09:00:22.000Z | #!/usr/bin/env python3
# Copyright 2020 Benjamin Ehret
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# title :data/timeseries/preprocess_audioset.py
# author :be
# contact :behret@ethz.ch
# created :31/03/2020
# version :1.0
# python_version :3.7
"""
Script to structure the audioset dataset, which can then be used via
:class:`data.timeseries.audioset_data.AudiosetData`.
The result of this script is available at
https://www.dropbox.com/s/07dfeeuf5aq4w1h/audioset_data_balanced?dl=0
If you want to recreate or modify this dataset, download the audioset data from
https://research.google.com/audioset/download.html
and extract the tar.gz into the following folder:
``datasets/sequential/audioset/audioset_download``.
Subsequently executing this script will create a pickle file containing the 100
class subset of audioset used in this study.
The dataset is stored in tensorflow files. Since we work with pytorch and there
is no utility to read tensorflow files, we extract the data and safe them as
numpy arrays in a pickle file.
Furthermore the data are preprocessed to fit our continual learning experiments.
The original dataset provides three subsets with different compositions of
samples and classes. Since we only work with a subset of classes and samples,
we load all available data and then filter and structure them according to our
criteria.
We use the same criteria as Kemker et al. Classes and samples are restricted in
the following way:
Classes:
- no restriction according to ontology file (parsed from ontology.json)
- no parent / child relationship (parsed from ontology.json)
- confidence level > 70% (data was copied from website into txt file)
- number of samples: we only take classes that have more samples than
a certain threshold
Samples:
- since samples can have multiple labels, we only use samples which
only belong to one of the classes we use
- we exclude samples that don't have the full length of 10 seconds
The chosen classes and samples are then split into train and test data and
saved to a pickle file.
"""
import numpy as np
import pickle
import tensorflow as tf
import os
import json
from warnings import warn
warn('The script was created for one time usage and has to be adapted when ' +
'reusing it. All paths specified here are absolute.')
# Tensorflow eager mode needs to be enabled for dataset mapping to work!
tf.enable_eager_execution()
# Set paths and parameters
data_dir = '../../datasets/sequential/audioset/'
download_dir = os.path.join(data_dir,'audioset_download')
fpath_conf_data = os.path.join(data_dir, 'confidence_data.csv')
fpath_label_inds = os.path.join(data_dir, 'class_labels_indices.csv')
fpath_ontology = os.path.join(data_dir, 'ontology.json')
target_path = os.path.join(data_dir, 'audioset_data_balanced.pickle')
n_classes = 100
n_sample = 1000
test_frac = 0.20
### Load data by serializing files and applying decode function.
def decode(serialized_example):
"""Decode data from TFRecord files.
Args:
serialized_example: serialized_example as created by
tf.data.TFRecordDataset
Returns:
(tuple): Tuple containing:
- **audio** (numpy.ndarray): Array of shape (10,128) representing one
sample with 10 timesteps and 128 features
- **label** (numpy.ndarray): Array of shape (1,) containing the class
of the corresponding sample
"""
sequence_features = {
'audio_embedding': tf.FixedLenSequenceFeature([], tf.string),
}
context_features = {
'start_time_seconds': tf.FixedLenFeature([], tf.float32),
'labels': tf.VarLenFeature(dtype=tf.int64),
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized_example,
sequence_features=sequence_features,
context_features=context_features
)
audio = tf.decode_raw(sequence_parsed['audio_embedding'], tf.uint8)
label = tf.cast(context_parsed['labels'], tf.int64)
return audio, label
# Apply decode function to all dataset entries using map function.
# Take files from all three data sets since we repartition anyway.
fpaths = []
for path, subdirs, files in os.walk(download_dir):
for name in files:
if 'tfrecord' in name:
fpaths.append(os.path.join(path, name))
# Create dataset and decode
dataset = tf.data.TFRecordDataset(fpaths)
dataset = dataset.map(decode)
# Extract data to lists
x = []
y = []
for d in dataset:
x.append(d[0].numpy())
y.append(tf.sparse.to_dense(tf.sparse.reorder(d[1])).numpy())
### Filter classes as described above.
# Parse confidence values
conf_data = {}
with open(fpath_conf_data) as f:
for line in f:
tokens = line.split()
# parse confidence
c = 0
for t in tokens:
if t.find('%') is not -1:
c = int(t[:-1])
# parse class name
n = ''
for t in tokens:
if t.find('%') == -1 and t != '-':
if n == '':
n = t
else:
n = n+' '+t
else:
break
conf_data.update({n:c})
# Parse class numbers from label csv file
l = -1
csv_data = {}
with open(fpath_label_inds) as f:
for line in f:
if l == -1:
l += 1
continue
tokens = line.split('"')
n = tokens[1]
csv_data.update({n:l})
l +=1
# Parse ontology info from json file
with open(fpath_ontology, 'r') as f:
json_data = json.load(f)
# Put all data into a single list.
all_data = []
for j in json_data:
if j['name'] in conf_data.keys():
class_info = {
'name' : j['name'],
'restricted' : j['restrictions'] != [],
'has_child' : j['child_ids'] != [],
'conf' : conf_data[j['name']],
'id' : csv_data[j['name']]
}
all_data.append(class_info)
# Filter classes
classes = []
for c in all_data:
if not c['restricted'] and not c['has_child'] and c['conf'] >= 70:
classes.append(c['id'])
### Filter the samples.
# Find samples that belong to only one of the potential classes.
# We also exclude some samples that don't have data for the full 10 seconds.
# First discard labels that are not in the set of potential classes
y_fil = []
for i in range(len(y)):
y_fil.append( np.intersect1d(y[i],classes))
# Find samples with one label
n_labels = np.asarray([len(y) for y in y_fil])
single_label_idx = np.where(n_labels == 1)[0]
# Find samples that are shorter than 10 seconds (to be excluded)
too_short = np.where(np.asarray([x.shape[0] for x in x]) != 10)[0]
# Construct the set of valid samples
valid_idx = np.setdiff1d(single_label_idx,too_short)
# Count number of valid samples for potential classes
y_single = np.asarray([y_fil[i][0] for i in valid_idx])
num_samples = [len(np.where(y_single == i)[0]) for i in classes]
# Take the n classes with the highest number of samples
n_sample_cutoff = np.sort(num_samples)[-n_classes]
class_idx = np.where(np.asarray(num_samples) >= n_sample_cutoff)[0]
our_classes = [classes[i] for i in class_idx]
### Filter the data again according the the chosen classes
y_fil = []
for i in range(len(y)):
y_fil.append( np.intersect1d(y[i],our_classes))
# Find samples that belong to only one of the potential classes
n_labels = np.asarray([len(y) for y in y_fil])
single_label_idx = np.where(n_labels == 1)[0]
# Find samples that dont are shorter than 10 seconds
too_short = np.where(np.asarray([x.shape[0] for x in x]) != 10)[0]
# Construct the set of valid samples
valid_idx = np.setdiff1d(single_label_idx,too_short)
# Restructure data and relabel the classes to be between 0 and n_classes
y_data = [y_fil[i][0] for i in valid_idx]
y_data = [np.where(np.asarray(our_classes) == i)[0][0] for i in y_data]
y_data = np.asarray(y_data)
x_data = [x[i] for i in valid_idx]
x_data = np.stack(x_data)
### Split into test and train and restrict the number of samples per class
np.random.seed(42)
n_train = int(n_sample * (1-test_frac))
n_test = int(n_sample * test_frac)
train_ind = []
test_ind = []
for i in range(n_classes):
sample_idx = np.where(y_data == i)[0]
n_sample_class = len(sample_idx)
rand_idx = np.arange(n_sample_class)
np.random.shuffle(rand_idx)
train_ind.extend(sample_idx[rand_idx[0:n_train]])
test_ind.extend(sample_idx[rand_idx[n_train:n_sample]])
train_ind = np.asarray(train_ind)
test_ind = np.asarray(test_ind)
sub_sample_idx = np.hstack((train_ind,test_ind))
x_data_sub = x_data[sub_sample_idx,:,:]
y_data_sub = y_data[sub_sample_idx]
train_ind = np.arange(0,len(train_ind))
test_ind = np.arange(len(train_ind),len(train_ind)+len(test_ind))
### Save data
with open(target_path, 'wb') as f:
pickle.dump([x_data_sub, y_data_sub, train_ind, test_ind], f)
| 32.889655 | 80 | 0.68463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,020 | 0.526316 |
76d2dd0a16c26b25219d0d5220bf5e490de12769 | 1,627 | py | Python | run.py | Bioconductor/bioc_git_transition | 9ca29f9e8058b755163e12bf9324ec1063d0182d | [
"MIT"
] | 16 | 2017-03-15T18:00:35.000Z | 2018-07-30T14:44:53.000Z | run.py | Bioconductor/bioc_git_transition | 9ca29f9e8058b755163e12bf9324ec1063d0182d | [
"MIT"
] | 40 | 2017-03-29T20:04:25.000Z | 2019-10-21T16:56:15.000Z | run.py | Bioconductor/bioc_git_transition | 9ca29f9e8058b755163e12bf9324ec1063d0182d | [
"MIT"
] | 4 | 2017-05-08T11:39:07.000Z | 2017-08-17T14:18:03.000Z | """Bioconductor run git transition code.
This module assembles the classes for the SVN --> Git transition
can be run in a sequential manner.
It runs the following aspects fo the Bioconductor transition.
Note: Update the SVN dump
1. Run Bioconductor Software package transition
2. Run Bioconductor Experiment Data package transition
3. Run Workflow package transition
4. Run Manifest file transition
5. Run Rapid update of master (trunk) and RELEASE_3_5 branches on
software packages
Manual tasks which need to be done:
1. Copy over bare repos to repositories/packages
2. Copy manifest bare git repo to repositories/admin
"""
import src.run_transition as rt
import src.svn_dump_update as sdu
import logging
import time
logging.basicConfig(filename='transition.log',
format='%(levelname)s %(asctime)s %(message)s',
level=logging.DEBUG)
def svn_dump_update(config_file):
sdu.svn_root_update(config_file)
sdu.svn_experiment_root_update(config_file)
return
def run(config_file):
rt.run_software_transition(config_file, new_svn_dump=True)
rt.run_experiment_data_transition(config_file, new_svn_dump=True)
rt.run_workflow_transition(config_file, new_svn_dump=True)
rt.run_manifest_transition(config_file, new_svn_dump=True)
return
if __name__ == '__main__':
start_time = time.time()
config_file = "./settings.ini"
svn_dump_update(config_file)
run(config_file)
# TODO: Run updates after dump update
svn_dump_update(config_file)
rt.run_updates(config_file)
logging.info("--- %s seconds ---" % (time.time() - start_time))
| 30.12963 | 69 | 0.754149 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 770 | 0.473264 |
76d6a858fdb2f760a40ddaceed8b3a0b06e85a87 | 14,566 | py | Python | layouts/layout_simulation_procedure.py | KEHUIYAO/coral-sampling-tool | 731cc22fbf5e4045e894b894547ad52c270e3fb1 | [
"MIT"
] | 5 | 2022-03-29T04:41:22.000Z | 2022-03-29T12:17:35.000Z | layouts/layout_simulation_procedure.py | KEHUIYAO/coral-sampling-tool | 731cc22fbf5e4045e894b894547ad52c270e3fb1 | [
"MIT"
] | null | null | null | layouts/layout_simulation_procedure.py | KEHUIYAO/coral-sampling-tool | 731cc22fbf5e4045e894b894547ad52c270e3fb1 | [
"MIT"
] | null | null | null | import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
def generate_simulation_procedure():
return html.Div([
# instruction button to notify the user how to use the simulation tool
dcc.Markdown(children='''There are three panels on the right: Survey, Transect Visualization and Power Calculation. The Survey tab contains a figure which shows all the DRM historical survey locations by year on the map. You can also use the **Select Files** button to select new survey data and update the figure. To begin with, you first click the **Start Simulation** button, then you will be asked to select a region to survey in the figure using the map selection tools on the top of the figure. Notice that the map selection tool bar will only appear when you hover your mouse over the figure. The selected region represents the location you want to conduct your new survey. After that, the app will help you estimate the proportion cover of the coral inside this region based on the historical survey data, and you are required to select a point process from which the coral will be simulated inside the region. If you have questions about how the data will be generated under different point processes, you can checkout the ** Point Process Introduction ** tab. After selecting a point process, you then need to specify the parameters that characterize this point process. Additionally, other parts such as the transect, disease prevalence, and coral size can also be customized.'''),
html.Button('Start Simulation', id='button_select_region', n_clicks=0),
# the instruction related to the above button
dcc.Markdown(children='''
Select a region on the right figure. The map selection tool bar will only appear when you hover your mouse over the figure. Box Select and Lasso Select are mostly used. Click the Box Select or Lasso Select button, then drag and drop on the figure.
''', id='text_select_region', style={"display": "none"}),
# show the rough prop_cover density estimate based on the selected sites
html.Div([], id='prop_cover_estimate', style={'display': 'none'}),
# the hidden div which stores the prop cover estimation
dcc.Store(id='store_prop_cover_estimation'),
# which process is used to generate data
dcc.Markdown(id='text_dropdown_select_process', children='''Select which point process will the coral be simulated from.''', style={"display": "none"}),
generate_dropdown_selection(),
# based on selected process, let user specify the parameter of the process
dcc.Markdown(id='text_input_process_parameters', children='''
Specify the parameters of the certain point process under which the coral is simulated. Also specify other inputs like disease prevalence, and the transect. For how different parameters change the look of a certain point process, you can checkout the **Point Process Introduction** section. There is a playground at the bottom. For a given point process, you can adjust the parameters to see how the simulation data changes spatially. Finally, if you find one simulation under a combination of parameters is quite realistic, you can use the **port** function to copy these parameters to here below.''', style={"display": "none"}),
# user-input area
# html.Div(id='input_process_parameters', style={"display": "none"}),
generate_user_input(),
# empty line
html.Br(),
# button to simulate the corals or calculate the power of the method
html.Div([
html.Button('Simulate once',
id='button_start_simulation',
n_clicks=0,
),
html.Button(
'Calculate power',
id='button_power_calculation',
n_clicks=0
)
],id='show_two_buttons',style={'display':'none'})
,
# dbc.Spinner(html.Div(id='loading-output'), color='primary'),
# html.Div([dbc.Spinner(color='primary')]),
# instruction for power calculation
dcc.Markdown(
id='text_power_calculation_instruction',
children='''
Calculate power''',
style={'display': 'none'}
),
html.Br()
],className='col-sm-5')
def generate_dropdown_selection():
"return a Div containing the dropdown selection box"
return dcc.Dropdown(
id='dropdown_select_process',
style={"display": "none"},
options=[
{'label': 'Homogeneous Poisson process', 'value': 1},
{'label': 'Inhomogeneous Poisson process', 'value': 2},
{'label': 'Cluster process', 'value': 3},
# {'label': 'Strauss process', 'value': 4}
],
# set the initial value=0 to hide the user input interface
value=0)
def generate_user_input():
"return a Div containing users' input interface"
input_n_toolkits = html.Div(html.Div([html.A('Number of transects:', className='col-sm-4'),
dcc.Input(
type='number',
placeholder=2,
value = 2,
id='input_n_toolkits',
className='col-sm-4'
)
], className='row'), id='input_n_toolkits_container', style={'display': 'none'})
# slider
# input_n_toolkits = html.Div(html.Div([
# html.A("Number of transects",className='col-sm-4'),
# dcc.Slider(min=1,
# max=5,
# step=1,
# value=2,
# marks={i: '{}'.format(i) for i in range(1, 6)},
# id='input_n_toolkits',
# className='col-sm-4')
# ], className='row'), id='input_n_toolkits_container',
# className='row',
# style={'display': 'none'})
input_disease_prevalence = html.Div(html.Div([html.A('disease prevalence: ', id='input_disease_prevalence_tooltip', className='col-sm-4'),
dcc.Input(
type='number',
placeholder=0.1,
value = 0.1,
step=0.1,
min=0,
max=1,
id='input_disease_prevalence',
className='col-sm-4'
)
], className='row'), id='input_disease_prevalence_container', style={'display': 'none'})
input_disease_prevalence_tooltip = dbc.Tooltip('the proportion of corals which get infected by a disease', target='input_disease_prevalence_tooltip')
# text or number input
input_fun_lambda = html.Div(html.Div([html.A('proportion cover function:', className='col-sm-4'), dcc.Input(
id="input_fun_lambda",
type='text',
placeholder="1000 * np.exp(-(((x - 50) / 50) ** 2 + ((y - 50) / 50) ** 2) / 0.5 ** 2)",
value="1000 * np.exp(-(((x - 50) / 50) ** 2 + ((y - 50) / 50) ** 2) / 0.5 ** 2)",
className='col-sm-4'
)],className='row'),id='show_input_fun_lambda',style={'display':'none'})
input_parent_prop = html.Div(html.Div([html.A('parent corals / total corals:', className='col-sm-4'), dcc.Input(
id="input_parent_prop",
type='number',
placeholder=0.01,
value=0.01,
step=0.01,
className='col-sm-4'
)],className='row'),id='show_input_parent_prop',style={'display':'none'})
input_parent_range = html.Div(html.Div([html.A('parent range:', className='col-sm-4'), dcc.Input(
id="input_parent_range",
type='number',
placeholder=5,
value=5,
className='col-sm-4'
)],className='row'),id='show_input_parent_range',style={'display':'none'})
input_strauss_beta = dcc.Input(
id="input_strauss_beta",
type='number',
placeholder="strauss_beta",
style={'display': 'none'}
)
input_strauss_gamma = dcc.Input(
id="input_strauss_gamma",
type='number',
placeholder="strauss_gamma",
style={'display': 'none'}
)
input_strauss_R = dcc.Input(
id="input_strauss_R",
type='number',
placeholder="strauss_R",
style={'display': 'none'}
)
input_transect_length = html.Div(html.Div([html.A('transect width (m): ', className='col-sm-4'),
dcc.Input(
type='number',
placeholder=25,
value=25,
id='dcc_input_transect_length',
className='col-sm-4'
)
], className='row'), id='input_transect_length', style={'display': 'none'})
input_transect_width = html.Div(html.Div([html.A('transect length (m): ', className='col-sm-4'),
dcc.Input(
type='number',
placeholder=6,
value = 6,
id='dcc_input_transect_width',
className='col-sm-4'
)
], className='row'), id='input_transect_width', style={'display': 'none'})
line_intercept_ratio = html.Div(html.Div([html.A('transect width / plot width', className='col-sm-4'),
dcc.Input(
type='number',
placeholder=1/5,
value = 1/5,
step=0.1,
id='dcc_line_intercept_ratio',
className='col-sm-4')
],className='row'), id='line_intercept_ratio', style={'display': 'none'})
coral_size = html.Div(html.Div([html.A('coral size (m^2): ', id='coral_size_tooltip',className='col-sm-4'),
dcc.Input(
type='number',
placeholder=0.0068,
value = 0.0068,
step=0.0001,
id='coral_size',
className='col-sm-4'
)
],className='row' ),
id='coral_size_input',
style={'display': 'none'})
coral_size_tooltip = dbc.Tooltip('the average size of an individual coral, measured in m^3', target='coral_size_tooltip')
coral_size_std = html.Div(html.Div([html.A('coral size standard error: ', id='coral_size_std_tooltip', className='col-sm-4'),
dcc.Input(
type='number',
placeholder=0.001,
value = 0.001,
step=0.001,
id='coral_size_std',
className='col-sm-4'
)], className='row')
, id='coral_size_std_input', style={'display': 'none'})
coral_size_std_tooltip = dbc.Tooltip('the standard deviation of the average size of an individual coral', target='coral_size_std_tooltip')
prop_cover = html.Div(html.Div([html.A('proportion cover: ', className='col-sm-4', id='prop_cover_tooltip'),
dcc.Input(
type='number',
placeholder=0,
value = 0,
step=0.1,
min=0,
max=1,
id='prop_cover',
className='col-sm-4'
)
],className='row'), id='prop_cover_input', style={'display': 'none'})
prop_cover_tooltip = dbc.Tooltip('Proportion cover of coral. If it equals 0, its estimation based on the historical data will be used in the simulation', target='prop_cover_tooltip')
num_of_replications = html.Div(html.Div([html.A('number of replications', className='col-sm-4'),
dcc.Input(
type='number',
placeholder=10,
value = 10,
step=1,
min=1,
id='num_of_replications',
className='col-sm-4'
)
],className='row'), id='number_of_replications_input', style={'display': 'none'})
return html.Div([
input_n_toolkits,
prop_cover,
prop_cover_tooltip,
input_fun_lambda,
coral_size,
coral_size_tooltip,
coral_size_std,
coral_size_std_tooltip,
input_disease_prevalence,
input_disease_prevalence_tooltip,
input_parent_prop,
input_parent_range,
input_strauss_beta,
input_strauss_gamma,
input_strauss_R,
input_transect_length,
input_transect_width,
line_intercept_ratio,
num_of_replications
], id='input_process_parameters') | 49.376271 | 1,301 | 0.506316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,650 | 0.456543 |
76d787aa0fb3effb59ce8288a064c7de0d40a573 | 524 | py | Python | configs/HDR/hdr/retinanet_r50_fpn_1x_coco_hdr_minmax_glob_gamma_2.py | ismailkocdemir/mmdetection | 4ac7e76dc66be7c97a8ca2c5f8a8e71434e3d823 | [
"Apache-2.0"
] | null | null | null | configs/HDR/hdr/retinanet_r50_fpn_1x_coco_hdr_minmax_glob_gamma_2.py | ismailkocdemir/mmdetection | 4ac7e76dc66be7c97a8ca2c5f8a8e71434e3d823 | [
"Apache-2.0"
] | null | null | null | configs/HDR/hdr/retinanet_r50_fpn_1x_coco_hdr_minmax_glob_gamma_2.py | ismailkocdemir/mmdetection | 4ac7e76dc66be7c97a8ca2c5f8a8e71434e3d823 | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../retinanet_r50_fpn_1x_coco.py',
'../../_base_/datasets/hdr_detection_minmax_glob_gamma.py',
]
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.0005, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None) # dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[10])
runner = dict(
type='EpochBasedRunner', max_epochs=20)
| 26.2 | 88 | 0.694656 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.45229 |
76e62dfaead6e340b719c28d88044ea601c31718 | 1,309 | py | Python | setup.py | awesome-archive/webspider | 072e9944db8fe05cbb47f8ea6d1a327c2a8929b1 | [
"MIT"
] | null | null | null | setup.py | awesome-archive/webspider | 072e9944db8fe05cbb47f8ea6d1a327c2a8929b1 | [
"MIT"
] | null | null | null | setup.py | awesome-archive/webspider | 072e9944db8fe05cbb47f8ea6d1a327c2a8929b1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import find_packages, setup
from app import __version__
# get the dependencies and installs
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'requirements.txt')) as f:
all_requirements = f.read().split('\n')
setup(
name='webspider',
version=__version__,
license='MIT',
author='heguozhu',
author_email='heguozhu@zhihu.com',
description='lagou.com spider',
url='git@github.com:GuozhuHe/webspider.git',
packages=find_packages(exclude=['tests']),
package_data={'webspider': ['README.md']},
zip_safe=False,
install_requires=all_requirements,
entry_points={
'console_scripts': [
'web = app.web_app:main',
'production_web = app.quickly_cmd:run_web_app_by_gunicorn',
'crawl_lagou_data = app.tasks:crawl_lagou_data',
'crawl_jobs_count = app.tasks.jobs_count:crawl_lagou_jobs_count',
'celery_jobs_count_worker = app.quickly_cmd:run_celery_jobs_count_worker',
'celery_lagou_data_worker = app.quickly_cmd:run_celery_lagou_data_worker',
'celery_beat = app.quickly_cmd:run_celery_beat',
'celery_flower = app.quickly_cmd.py:run_celery_flower',
],
}
)
| 34.447368 | 86 | 0.6822 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 690 | 0.52712 |
76f7e1b302002b518c986240747a14b0f7bf282f | 4,291 | py | Python | src/manifest.py | silent1mezzo/lightsaber | e470be7fb84b810fe846ff0ede78d06bf69cd5e3 | [
"MIT"
] | 13 | 2020-08-12T12:04:19.000Z | 2022-03-12T03:53:07.000Z | src/manifest.py | silent1mezzo/lightsaber | e470be7fb84b810fe846ff0ede78d06bf69cd5e3 | [
"MIT"
] | 46 | 2020-09-03T06:00:18.000Z | 2022-03-25T10:03:53.000Z | src/manifest.py | silent1mezzo/lightsaber | e470be7fb84b810fe846ff0ede78d06bf69cd5e3 | [
"MIT"
] | 3 | 2021-08-11T19:12:37.000Z | 2021-11-09T15:19:59.000Z | MANIFEST = {
"hilt": {
"h1": {
"offsets": {"blade": 0, "button": {"x": (8, 9), "y": (110, 111)}},
"colours": {
"primary": (216, 216, 216), # d8d8d8
"secondary": (141, 141, 141), # 8d8d8d
"tertiary": (180, 97, 19), # b46113
},
"length": 24,
"materials": "Alloy metal/Salvaged materials",
},
"h2": {
"offsets": {"blade": 20, "button": {"x": (8, 8), "y": (100, 105)}},
"colours": {
"primary": (112, 112, 112), # 707070
"secondary": (0, 0, 0), # 000000
"tertiary": (212, 175, 55), # 000000
},
"length": 24,
"materials": "Alloy metal and carbon composite",
},
"h3": {
"offsets": {"blade": 0, "button": {"x": (10, 10), "y": (100, 118)}},
"colours": {
"primary": (157, 157, 157), # 707070
"secondary": (0, 0, 0), # 000000
"tertiary": (180, 97, 19), # b46113
},
"length": 24,
"materials": "Alloy metal",
},
"h4": {
"offsets": {"blade": 7, "button": {"x": (8, 9), "y": (92, 100)}},
"colours": {
"primary": (0, 0, 0), # 000000
"secondary": (157, 157, 157), # 9d9d9d
"tertiary": (180, 97, 19), # b46113
},
"length": 13,
"materials": "Alloy metal",
},
"h5": {
"offsets": {"blade": 0, "button": {"x": (8, 8), "y": (92, 105)}},
"colours": {
"primary": (111, 111, 111), # 6f6f6f
"secondary": (0, 0, 0), # 000000
"tertiary": (180, 97, 19), # b46113
},
"length": 24,
"materials": "Alloy metal",
},
"h6": {
"offsets": {"blade": 2, "button": {"x": (8, 9), "y": (112, 113)}},
"colours": {
"primary": (120, 120, 120), # 787878
"secondary": (0, 0, 0), # 000000
"tertiary": (180, 97, 19), # b46113
},
"length": 22,
"materials": "Alloy metal/Salvaged materials",
},
"h7": {
"offsets": {"blade": 0, "button": {"x": (8, 9), "y": (105, 113)}},
"colours": {
"primary": (192, 192, 192), # c0c0c0
"secondary": (255, 215, 0), # ffd700
"tertiary": (0, 0, 0), # 000000
},
"length": 22,
"materials": "Alloy metal and Gold",
},
"h8": {
"offsets": {"blade": 0, "button": {"x": (8, 9), "y": (100, 111)}},
"colours": {
"primary": (216, 216, 216), # d8d8d8
"secondary": (180, 97, 19), # b46113
"tertiary": (0, 0, 0), # 000000
},
"length": 24,
"materials": "Alloy metal/Copper",
},
},
"blade": {
"b1": {"colour": "Red", "crystal": "Adegan crystal", "type": "Sith"},
"b2": {"colour": "Blue", "crystal": "Zophis crystal", "type": "Jedi"},
"b3": {"colour": "Green", "crystal": "Nishalorite stone", "type": "Jedi"},
"b4": {"colour": "Yellow", "crystal": "Kimber stone", "type": "Jedi"},
"b5": {"colour": "White", "crystal": "Dragite gem", "type": "Jedi"},
"b6": {"colour": "Purple", "crystal": "Krayt dragon pearl", "type": "Jedi"},
"b7": {"colour": "Blue/Green", "crystal": "Dantari crystal", "type": "Jedi"},
"b8": {
"colour": "Orange",
"crystal": ["Ilum crystal", "Ultima Pearl"],
"type": "Sith",
},
"b9": {
"colour": "Black",
"crystal": "Obsidian",
"type": ["Jedi", "Mandalorian"],
},
},
"pommel": {
"p1": {"length": 5,},
"p2": {"length": 14,},
"p3": {"length": 3,},
"p4": {"length": 8,},
"p5": {"length": 5,},
"p6": {"length": 5,},
"p7": {"length": 8,},
},
# These are lightsabers for a specific Jedi or Sith. Should use their name instead of
"unique_urls": {""},
}
| 37.313043 | 89 | 0.381496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,843 | 0.429504 |
76fb80b4170accbe860db8c0999717d64544977e | 5,741 | py | Python | ament_tools/setup_arguments.py | richmattes/ament_tools | 2a25cdcc273fcd73e81e8a47fe892a0b5963307d | [
"Apache-2.0"
] | 1 | 2020-05-19T14:33:49.000Z | 2020-05-19T14:33:49.000Z | ros2_mod_ws/install/lib/python3.7/site-packages/ament_tools/setup_arguments.py | mintforpeople/robobo-ros2-ios-port | 1a5650304bd41060925ebba41d6c861d5062bfae | [
"Apache-2.0"
] | null | null | null | ros2_mod_ws/install/lib/python3.7/site-packages/ament_tools/setup_arguments.py | mintforpeople/robobo-ros2-ios-port | 1a5650304bd41060925ebba41d6c861d5062bfae | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import distutils.core
import os
try:
import setuptools
except ImportError:
pass
import subprocess
import sys
from threading import Lock
from ament_tools.build_type import get_command_prefix
from ament_tools.helper import quote_shell_command
setup_lock = None
def get_setup_arguments_with_context(build_type, context):
"""
Capture the arguments of the setup() function in the setup.py file.
To provide a custom environment when introspecting the setup() function
a separate Python interpreter is being used which can have an extended
PYTHONPATH etc.
:param build_type: the build type
:param context: the context
:type context: :py:class:`ament_tools.context.Context`
:returns: a dictionary containing the arguments of the setup() function
"""
prefix = get_command_prefix(
'%s__setup' % build_type, context.build_space,
context.build_dependencies)
ament_tools_path = os.path.dirname(os.path.dirname(__file__))
setuppy = os.path.join(context.source_space, 'setup.py')
if os.name == 'nt':
ament_tools_path = ament_tools_path.replace(os.sep, os.altsep)
setuppy = setuppy.replace(os.sep, os.altsep)
code_lines = [
'import sys',
"sys.path.insert(0, '%s')" % ament_tools_path,
'from ament_tools.setup_arguments import get_setup_arguments',
"print(repr(get_setup_arguments('%s')))" % setuppy]
# invoke get_setup_arguments() in a separate interpreter
cmd = prefix + [sys.executable, '-c', ';'.join(code_lines)]
cmd = quote_shell_command(cmd)
result = subprocess.run(
cmd, stdout=subprocess.PIPE, shell=True, check=True)
output = result.stdout.decode()
return ast.literal_eval(output)
def get_setup_arguments(setup_py_path):
"""
Capture the arguments of the setup() function in the setup.py file.
The function is being run within the current Python interpreter.
Therefore the processed setup.py file can not have any additional
dependencies not available in the current environment.
:param setup_py_path: the path to the setup.py file
:returns: a dictionary containing the arguments of the setup() function
"""
global setup_lock
if not setup_lock:
setup_lock = Lock()
assert os.path.basename(setup_py_path) == 'setup.py'
# prevent side effects in other threads
with setup_lock:
# change to the directory containing the setup.py file
old_cwd = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(setup_py_path)))
try:
data = {}
mock_setup = create_mock_setup_function(data)
# replace setup() function of distutils and setuptools
# in order to capture its arguments
try:
distutils_setup = distutils.core.setup
distutils.core.setup = mock_setup
try:
setuptools_setup = setuptools.setup
setuptools.setup = mock_setup
except NameError:
pass
# evaluate the setup.py file
with open('setup.py', 'r') as h:
exec(h.read())
finally:
distutils.core.setup = distutils_setup
try:
setuptools.setup = setuptools_setup
except NameError:
pass
return data
finally:
os.chdir(old_cwd)
def create_mock_setup_function(data):
"""
Create a mock function to capture its arguments.
It can replace either distutils.core.setup or setuptools.setup.
:param data: a dictionary which is updated with the captured arguments
:returns: a function to replace disutils.core.setup and setuptools.setup
"""
def setup(*args, **kwargs):
if args:
raise RuntimeError(
'setup() function invoked with positional arguments')
if 'name' not in kwargs:
raise RuntimeError(
"setup() function invoked without the keyword argument 'name'")
data.update(kwargs)
return setup
def get_data_files_mapping(data_files):
"""
Transform the data_files structure into a dictionary.
:param data_files: either a list of source files or
a list of tuples where the first element is the destination path and
the second element is a list of source files
:returns: a dictionary mapping the source file to a destination file
"""
mapping = {}
for data_file in data_files:
if isinstance(data_file, tuple):
assert len(data_file) == 2
dest = data_file[0]
assert not os.path.isabs(dest)
sources = data_file[1]
assert isinstance(sources, list)
for source in sources:
assert not os.path.isabs(source)
mapping[source] = os.path.join(dest, os.path.basename(source))
else:
assert not os.path.isabs(data_file)
mapping[data_file] = os.path.basename(data_file)
return mapping
| 35.006098 | 79 | 0.656854 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,655 | 0.462463 |
0a00e63d1006dbef16f6c53de45b2f52bfe52dea | 7,268 | py | Python | model/resnet.py | DrMMZ/RetinaNet | 0b8491076f2ad344e101f724a2f5b8305adb2d52 | [
"MIT"
] | 7 | 2021-07-07T02:59:58.000Z | 2021-12-09T04:48:49.000Z | model/resnet.py | DrMMZ/ResFPN | 3acd6c629419a9f66da5386f3fd3deb9e8c929ff | [
"MIT"
] | 3 | 2021-11-25T07:21:03.000Z | 2022-01-17T18:56:29.000Z | model/resnet.py | DrMMZ/RetinaNet | 0b8491076f2ad344e101f724a2f5b8305adb2d52 | [
"MIT"
] | 2 | 2021-12-09T01:48:36.000Z | 2022-01-08T15:54:58.000Z | """
Residual Networks (ResNet)
"""
# adapted from
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
import tensorflow as tf
def identity_block(
input_tensor,
filters,
stage,
block,
train_bn=False
):
"""
Builds an identity shortcut in a bottleneck building block of a ResNet.
Parameters
----------
input_tensor : tf tensor, [batch_size, height, width, channels]
An input tensor.
filters : list, positive integers
The number of filters in 3 conv layers at the main path, where
last number is equal to input_tensor's channels.
stage : integer
A number in [2,5] used for generating layer names.
block : string
A lowercase letter, used for generating layer names.
train_bn : boolean, optional
Whether one should normalize the layer input by the mean and variance
over the current batch. The default is False, i.e., use the moving
average of mean and variance to normalize the layer input.
Returns
-------
output_tensor : tf tensor, [batch_size, height, width, channels]
The output tensor same shape as input_tensor.
"""
num_filters_1, num_filters_2, num_filters_3 = filters
conv_prefix = 'res' + str(stage) + block + '_branch'
bn_prefix = 'bn' + str(stage) + block + '_branch'
x = tf.keras.layers.Conv2D(
num_filters_1, (1,1), name=conv_prefix + '2a')(input_tensor)
x = tf.keras.layers.BatchNormalization(
name=bn_prefix + '2a')(x, training=train_bn)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
num_filters_2, (3,3), padding='same', name=conv_prefix + '2b')(x)
x = tf.keras.layers.BatchNormalization(
name=bn_prefix + '2b')(x, training=train_bn)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
num_filters_3, (1,1), name=conv_prefix + '2c')(x)
x = tf.keras.layers.BatchNormalization(
name=bn_prefix + '2c')(x, training=train_bn)
x = tf.keras.layers.Add()([input_tensor, x])
output_tensor = tf.keras.layers.Activation(
'relu', name='res' + str(stage) + block + '_out')(x)
return output_tensor
def conv_block(
input_tensor,
filters,
stage,
block,
strides=(2, 2),
train_bn=False
):
"""
Builds a projection shortcut in a bottleneck block of a ResNet.
Parameters
----------
input_tensor : tf tensor, [batch_size, height, width, channels]
An input tensor.
filters : list, positive integers
The number of filters in 3 conv layers at the main path.
stage : integer
A number in [2,5] used for generating layer names.
block : string
A lowercase letter, used for generating layer names.
strides : tuple, integers, optional
The conv layer strides. The default is (2, 2).
train_bn : boolean, optional
Whether one should normalize the layer input by the mean and variance
over the current batch. The default is False, i.e., use the moving
average of mean and variance to normalize the layer input.
Returns
-------
output_tensor : tf tensor
[batch_size, height//strides, width//strides, num_filters_3] where
num_filters_3 is the last number in filters, the output tensor.
"""
num_filters_1, num_filters_2, num_filters_3 = filters
conv_prefix = 'res' + str(stage) + block + '_branch'
bn_prefix = 'bn' + str(stage) + block + '_branch'
x = tf.keras.layers.Conv2D(
num_filters_1, (1,1), strides, name=conv_prefix + '2a')(input_tensor)
x = tf.keras.layers.BatchNormalization(
name=bn_prefix + '2a')(x, training=train_bn)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
num_filters_2, (3,3), padding='same', name=conv_prefix + '2b')(x)
x = tf.keras.layers.BatchNormalization(
name=bn_prefix + '2b')(x, training=train_bn)
x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Conv2D(
num_filters_3, (1,1), name=conv_prefix + '2c')(x)
x = tf.keras.layers.BatchNormalization(
name=bn_prefix + '2c')(x, training=train_bn)
shortcut = tf.keras.layers.Conv2D(
num_filters_3, (1,1), strides, name=conv_prefix + '1')(input_tensor)
shortcut = tf.keras.layers.BatchNormalization(
name=bn_prefix + '1')(shortcut, training=train_bn)
x = tf.keras.layers.Add()([shortcut, x])
output_tensor = tf.keras.layers.Activation(
'relu', name='res' + str(stage) + block + '_out')(x)
return output_tensor
def backbone_resnet(input_image, architecture, stage5=True, train_bn=False):
"""
Builds a backbone ResNet.
Parameters
----------
input_image : tf tensor, [batch_size, height, width, channels]
An input tensor.
architecture : string
The ResNet architecture in {'resnet50', 'resnet101'}.
stage5 : boolean, optional
Whether create stage5 of network. The default is True.
train_bn : boolean, optional
Whether one should normalize the layer input by the mean and variance
over the current batch. The default is False, i.e., use the moving
average of mean and variance to normalize the layer input.
Returns
-------
outputs : list
Feature maps at each stage.
"""
assert architecture in ['resnet50', 'resnet101'], \
'Only support ResNet50\101'
# stage 1
x = tf.keras.layers.ZeroPadding2D((3,3))(input_image)
x = tf.keras.layers.Conv2D(64, (7,7), (2,2), name='conv1')(x)
x = tf.keras.layers.BatchNormalization(name='bn_conv1')(x, training=train_bn)
x = tf.keras.layers.Activation('relu')(x)
C1 = x = tf.keras.layers.MaxPooling2D((3,3), (2,2), padding='same')(x)
# stage 2
x = conv_block(
x, [64,64,256], stage=2, block='a', strides=(1,1), train_bn=train_bn)
x = identity_block(x, [64,64,256], stage=2, block='b', train_bn=train_bn)
C2 = x = identity_block(
x, [64,64,256], stage=2, block='c', train_bn=train_bn)
# stage 3
x = conv_block(x, [128,128,512], stage=3, block='a', train_bn=train_bn)
x = identity_block(x, [128,128,512], stage=3, block='b', train_bn=train_bn)
x = identity_block(x, [128,128,512], stage=3, block='c', train_bn=train_bn)
C3 = x = identity_block(
x, [128,128,512], stage=3, block='d', train_bn=train_bn)
# stage 4
x = conv_block(x, [256,256,1024], stage=4, block='a', train_bn=train_bn)
num_blocks = {'resnet50':5, 'resnet101':22}[architecture]
for i in range(num_blocks):
x = identity_block(
x, [256,256,1024], stage=4, block=chr(98+i), train_bn=train_bn)
C4 = x
# stage 5
if stage5:
x = conv_block(x, [512,512,2048], stage=5, block='a', train_bn=train_bn)
x = identity_block(
x, [512,512,2048], stage=5, block='b', train_bn=train_bn)
C5 = x = identity_block(
x, [512,512,2048], stage=5, block='c', train_bn=train_bn)
else:
C5 = None
return [C1, C2, C3, C4, C5]
| 35.802956 | 81 | 0.624381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,116 | 0.428729 |
0a066d9e3ce3fc69b55dd82dd4922f5e05e9b7a2 | 2,167 | py | Python | take_snapshot.py | ITCave/sniff-for-changes-in-directory | 59a06c1ca85033273845e8266038bfeacfc9f64d | [
"MIT"
] | null | null | null | take_snapshot.py | ITCave/sniff-for-changes-in-directory | 59a06c1ca85033273845e8266038bfeacfc9f64d | [
"MIT"
] | null | null | null | take_snapshot.py | ITCave/sniff-for-changes-in-directory | 59a06c1ca85033273845e8266038bfeacfc9f64d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Filename : take_snapshot.py
# @Date : 2019-07-15-13-44
# @Project: ITC-sniff-for-changes-in-directory
# @Author: Piotr Wołoszyn
# @Website: http://itcave.eu
# @Email: contact@itcave.eu
# @License: MIT
# @Copyright (C) 2019 ITGO Piotr Wołoszyn
# Generic imports
import os
import pickle
import re
import argparse
from datetime import datetime
def clear_path_string(s):
"""
Simple function that removes chars that are not allowed in file names
:param s: path_string
:return: cleaned_path_string
"""
return (re.sub('[^a-zA-Z]+', '#', s)).lower()
def sniff(sniff_path):
"""
Walks the path and stores information about directory content
:param sniff_path: relative or absolute path
:return: void
"""
sniff_path = str(sniff_path).lower()
# Variable in which information will be stored
dir_store = {}
# Recursive loop that walks through all of the subdirectories
for subdir, dirs, files in os.walk(sniff_path):
if subdir not in dir_store:
dir_store[subdir] = {}
dir_store[subdir]['subdirs'] = dirs
dir_store[subdir]['files'] = files
dir_store[subdir]['file_details'] = {}
for file in files:
f_path = os.path.join(subdir, file)
# The information that will be store for each of the files - in this case last file modification date
# Important: it's cross-platform relevant!
modified_date = os.path.getmtime(f_path)
dir_store[subdir]['file_details'][file] = (modified_date,)
# Name of a file in which data will be stored
dump_name = clear_path_string(sniff_path) + '_' + datetime.now().strftime('%Y%m%d%H%M%S')
# Save pickled data
with open(dump_name + '.pkl', 'wb') as output:
pickle.dump(dir_store, output, pickle.HIGHEST_PROTOCOL)
print("Directory Snapshot taken:", dump_name)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Directory Sniffer')
parser.add_argument('path', help='Path to the directory that you want to take a snapshot of')
args = parser.parse_args()
sniff(args.path)
| 28.513158 | 113 | 0.662206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,089 | 0.502075 |
0a14fdb015437094dc2620963de3edb83ccea376 | 1,706 | py | Python | backend/ibutsu_server/controllers/health_controller.py | rsnyman/ibutsu-server | 3d190a3ab2f3cd206b7c5509ba21f95ce5bbdfcc | [
"MIT"
] | 10 | 2020-07-07T07:00:00.000Z | 2022-03-30T12:21:44.000Z | backend/ibutsu_server/controllers/health_controller.py | rsnyman/ibutsu-server | 3d190a3ab2f3cd206b7c5509ba21f95ce5bbdfcc | [
"MIT"
] | 133 | 2020-07-06T20:10:45.000Z | 2022-03-31T15:19:19.000Z | backend/ibutsu_server/controllers/health_controller.py | rsnyman/ibutsu-server | 3d190a3ab2f3cd206b7c5509ba21f95ce5bbdfcc | [
"MIT"
] | 9 | 2020-07-06T17:33:29.000Z | 2022-03-07T00:08:00.000Z | from flask import current_app
from sqlalchemy.exc import InterfaceError
from sqlalchemy.exc import OperationalError
try:
from ibutsu_server.db.model import Result
IS_CONNECTED = True
except ImportError:
IS_CONNECTED = False
def get_health(token_info=None, user=None):
"""Get a health report
:rtype: Health
"""
return {"status": "OK", "message": "Service is running"}
def get_database_health(token_info=None, user=None):
"""Get a health report for the database
:rtype: Health
"""
response = ({"status": "Pending", "message": "Fetching service status"}, 200)
# Try to connect to the database, and handle various responses
try:
if not IS_CONNECTED:
response = ({"status": "Error", "message": "Incomplete database configuration"}, 500)
else:
Result.query.first()
response = ({"status": "OK", "message": "Service is running"}, 200)
except OperationalError:
response = ({"status": "Error", "message": "Unable to connect to the database"}, 500)
except InterfaceError:
response = ({"status": "Error", "message": "Incorrect connection configuration"}, 500)
except Exception as e:
response = ({"status": "Error", "message": str(e)}, 500)
return response
def get_health_info(token_info=None, user=None):
"""Get the information about this server
:rtype: HealthInfo
"""
return {
"frontend": current_app.config.get("FRONTEND_URL", "http://localhost:3000"),
"backend": current_app.config.get("BACKEND_URL", "http://localhost:8080"),
"api_ui": current_app.config.get("BACKEND_URL", "http://localhost:8080") + "/api/ui/",
}
| 32.188679 | 97 | 0.649472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 732 | 0.429074 |
0a1c4786888ba534eda7784354ef48e759ceac1e | 40 | py | Python | version.py | XioNoX/ansible-junos-stdlib-old | 92f33b3bbe6d2cc36d9f2028bb7c792f25ddce80 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | version.py | XioNoX/ansible-junos-stdlib-old | 92f33b3bbe6d2cc36d9f2028bb7c792f25ddce80 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | version.py | XioNoX/ansible-junos-stdlib-old | 92f33b3bbe6d2cc36d9f2028bb7c792f25ddce80 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | VERSION = "1.4.0"
DATE = "2016-Sept-21"
| 13.333333 | 21 | 0.6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.525 |
0a1e3877d30a492ceb0b5445e7d1d835bd228d55 | 7,409 | py | Python | hw3 cnn and vis/gradcam.py | mtang1001/ML-Exploration | 6fec422eca127210e948945e6d15526947bfae8e | [
"Apache-2.0"
] | null | null | null | hw3 cnn and vis/gradcam.py | mtang1001/ML-Exploration | 6fec422eca127210e948945e6d15526947bfae8e | [
"Apache-2.0"
] | null | null | null | hw3 cnn and vis/gradcam.py | mtang1001/ML-Exploration | 6fec422eca127210e948945e6d15526947bfae8e | [
"Apache-2.0"
] | null | null | null | import torch
import torchvision
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image
from captum.attr import GuidedGradCam, GuidedBackprop
from captum.attr import LayerActivation, LayerConductance, LayerGradCam
from data_utils import *
from image_utils import *
from captum_utils import *
import numpy as np
from visualizers import GradCam
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
X, y, class_names = load_imagenet_val(num=5)
# FOR THIS SECTION ONLY, we need to use gradients. We introduce a new model we will use explicitly for GradCAM for this.
gc_model = torchvision.models.squeezenet1_1(pretrained=True)
gc = GradCam()
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)
y_tensor = torch.LongTensor(y)
# Guided Back-Propagation
gbp_result = gc.guided_backprop(X_tensor,y_tensor, gc_model)
plt.figure(figsize=(24, 24))
for i in range(gbp_result.shape[0]):
plt.subplot(1, 5, i + 1)
img = gbp_result[i]
img = rescale(img)
plt.imshow(img)
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
plt.savefig('visualization/guided_backprop.png')
# GradCam
# GradCAM. We have given you which module(=layer) that we need to capture gradients from, which you can see in conv_module variable below
gc_model = torchvision.models.squeezenet1_1(pretrained=True)
for param in gc_model.parameters():
param.requires_grad = True
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)
y_tensor = torch.LongTensor(y)
gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model)
plt.figure(figsize=(24, 24))
for i in range(gradcam_result.shape[0]):
gradcam_val = gradcam_result[i]
img = X[i] + (matplotlib.cm.jet(gradcam_val)[:,:,:3]*255)
img = img / np.max(img)
plt.subplot(1, 5, i + 1)
plt.imshow(img)
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
plt.savefig('visualization/gradcam.png')
# As a final step, we can combine GradCam and Guided Backprop to get Guided GradCam.
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0).requires_grad_(True)
y_tensor = torch.LongTensor(y)
gradcam_result = gc.grad_cam(X_tensor, y_tensor, gc_model)
gbp_result = gc.guided_backprop(X_tensor, y_tensor, gc_model)
plt.figure(figsize=(24, 24))
for i in range(gradcam_result.shape[0]):
gbp_val = gbp_result[i]
gradcam_val = np.expand_dims(gradcam_result[i], axis=2)
# Pointwise multiplication and normalization of the gradcam and guided backprop results (2 lines)
img = gradcam_val * gbp_val
img = np.expand_dims(img.transpose(2, 0, 1), axis=0)
img = np.float32(img)
img = torch.from_numpy(img)
img = deprocess(img)
plt.subplot(1, 5, i + 1)
plt.imshow(img)
plt.title(class_names[y[i]])
plt.axis('off')
plt.gcf().tight_layout()
plt.savefig('visualization/guided_gradcam.png')
# **************************************************************************************** #
# Captum
model = torchvision.models.squeezenet1_1(pretrained=True)
# We don't want to train the model, so tell PyTorch not to compute gradients
# with respect to model parameters.
for param in model.parameters():
param.requires_grad = False
# Convert X and y from numpy arrays to Torch Tensors
X_tensor = torch.cat([preprocess(Image.fromarray(x)) for x in X], dim=0)
y_tensor = torch.LongTensor(y)
conv_module = model.features[12]
##############################################################################
# TODO: Compute/Visualize GuidedBackprop and Guided GradCAM as well. #
# visualize_attr_maps function from captum_utils.py is useful for #
# visualizing captum outputs #
# Use conv_module as the convolution layer for gradcam #
##############################################################################
# Computing Guided GradCam
ggc = GuidedGradCam(model, conv_module)
attribution_gcc = compute_attributions(ggc, X_tensor, target = y_tensor)
# print(X_tensor.shape, y_tensor.shape, attribution_gcc.shape)
visualize_attr_maps('visualization/GuidedGradCam.png', X, y, class_names, [attribution_gcc], ['Guided_Grad_Cam'])
# Computing Guided BackProp
gbp = GuidedBackprop(model)
attribution_gbp = compute_attributions(gbp, X_tensor, target = y_tensor)
visualize_attr_maps('visualization/GuidedBackpropCam.png', X, y, class_names, [attribution_gbp], ['Guided_Backprop_Cam'])
##############################################################################
# END OF YOUR CODE #
##############################################################################
# Try out different layers and see observe how the attributions change
layer = model.features[3]
# Example visualization for using layer visualizations
# layer_act = LayerActivation(model, layer)
# layer_act_attr = compute_attributions(layer_act, X_tensor)
# layer_act_attr_sum = layer_act_attr.mean(axis=1, keepdim=True)
##############################################################################
# TODO: Visualize Individual Layer Gradcam and Layer Conductance (similar #
# to what we did for the other captum sections, using our helper methods), #
# but with some preprocessing calculations. #
# #
# You can refer to the LayerActivation example above and you should be #
# using 'layer' given above for this section #
# #
# Also note that, you would need to customize your 'attr_preprocess' #
# parameter that you send along to 'visualize_attr_maps' as the default #
# 'attr_preprocess' is written to only to handle multi channel attributions. #
# #
# For layer gradcam look at the usage of the parameter relu_attributions #
##############################################################################
# Layer gradcam aggregates across all channels
from captum.attr import LayerAttribution
N, C, H, W = X_tensor.shape
LC = LayerConductance(model, layer)
LC_attr = compute_attributions(LC, X_tensor, target = y_tensor)
LC_attr_sum = LC_attr.mean(axis = 1, keepdim = True)
LC_attr_int = LayerAttribution.interpolate(LC_attr_sum, (H,W) )
LC_attr_int = LC_attr_int.repeat(1, 3, 1, 1)
visualize_attr_maps('visualization/LayerConductance.png', X, y, class_names, [LC_attr_int], ['LayerConductance'])
LGC = LayerGradCam(model, layer)
LGC_attr = compute_attributions(LGC, X_tensor, target = y_tensor)
LGC_attr_sum = LGC_attr.mean(axis = 1, keepdim = True)
LGC_attr_int = LayerAttribution.interpolate(LGC_attr_sum, (H,W))
LGC_attr_int = LGC_attr_int.repeat(1, 3, 1, 1)
visualize_attr_maps ('visualization/LayerGradCam.png', X, y, class_names, [LGC_attr_int], ['LayerGradCam'])
##############################################################################
# END OF YOUR CODE #
##############################################################################
| 41.623596 | 137 | 0.626535 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,626 | 0.489405 |
0a1e494933ae306f17bb20205df33acd66dcd6cb | 3,713 | py | Python | src/genotypes.py | k8lion/admmdarts | 4953e401cb74ba9f8da3ed0b9d4c5e88da9fc776 | [
"Apache-2.0"
] | null | null | null | src/genotypes.py | k8lion/admmdarts | 4953e401cb74ba9f8da3ed0b9d4c5e88da9fc776 | [
"Apache-2.0"
] | null | null | null | src/genotypes.py | k8lion/admmdarts | 4953e401cb74ba9f8da3ed0b9d4c5e88da9fc776 | [
"Apache-2.0"
] | null | null | null | from collections import namedtuple
Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')
PRIMITIVES = [
'none',
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
CRBPRIMITIVES = [
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
NASNet = Genotype(
normal=[
('sep_conv_5x5', 1),
('sep_conv_3x3', 0),
('sep_conv_5x5', 0),
('sep_conv_3x3', 0),
('avg_pool_3x3', 1),
('skip_connect', 0),
('avg_pool_3x3', 0),
('avg_pool_3x3', 0),
('sep_conv_3x3', 1),
('skip_connect', 1),
],
normal_concat=[2, 3, 4, 5, 6],
reduce=[
('sep_conv_5x5', 1),
('sep_conv_7x7', 0),
('max_pool_3x3', 1),
('sep_conv_7x7', 0),
('avg_pool_3x3', 1),
('sep_conv_5x5', 0),
('skip_connect', 3),
('avg_pool_3x3', 2),
('sep_conv_3x3', 2),
('max_pool_3x3', 1),
],
reduce_concat=[4, 5, 6],
)
AmoebaNet = Genotype(
normal=[
('avg_pool_3x3', 0),
('max_pool_3x3', 1),
('sep_conv_3x3', 0),
('sep_conv_5x5', 2),
('sep_conv_3x3', 0),
('avg_pool_3x3', 3),
('sep_conv_3x3', 1),
('skip_connect', 1),
('skip_connect', 0),
('avg_pool_3x3', 1),
],
normal_concat=[4, 5, 6],
reduce=[
('avg_pool_3x3', 0),
('sep_conv_3x3', 1),
('max_pool_3x3', 0),
('sep_conv_7x7', 2),
('sep_conv_7x7', 0),
('avg_pool_3x3', 1),
('max_pool_3x3', 0),
('max_pool_3x3', 1),
('conv_7x1_1x7', 0),
('sep_conv_3x3', 5),
],
reduce_concat=[3, 4, 6]
)
DARTS_V1 = Genotype(
normal=[('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 0), ('sep_conv_3x3', 1), ('skip_connect', 0),
('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('skip_connect', 2)], normal_concat=[2, 3, 4, 5],
reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 0), ('max_pool_3x3', 0),
('skip_connect', 2), ('skip_connect', 2), ('avg_pool_3x3', 0)], reduce_concat=[2, 3, 4, 5])
DARTS_V2 = Genotype(
normal=[('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 0), ('sep_conv_3x3', 1), ('sep_conv_3x3', 1),
('skip_connect', 0), ('skip_connect', 0), ('dil_conv_3x3', 2)], normal_concat=[2, 3, 4, 5],
reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0),
('skip_connect', 2), ('skip_connect', 2), ('max_pool_3x3', 1)], reduce_concat=[2, 3, 4, 5])
DARTS = DARTS_V2
BATH = Genotype(
normal=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('max_pool_3x3', 0), ('sep_conv_5x5', 2), ('dil_conv_5x5', 0),
('max_pool_3x3', 2), ('sep_conv_3x3', 2), ('sep_conv_3x3', 0)], normal_concat=range(2, 6),
reduce=[('max_pool_3x3', 1), ('max_pool_3x3', 0), ('max_pool_3x3', 1), ('sep_conv_5x5', 2), ('skip_connect', 3),
('avg_pool_3x3', 2), ('sep_conv_3x3', 4), ('dil_conv_5x5', 1)], reduce_concat=range(2, 6))
BATH2 = Genotype(
normal=[('max_pool_3x3', 1), ('skip_connect', 0), ('skip_connect', 2), ('max_pool_3x3', 1), ('skip_connect', 1),
('skip_connect', 2), ('max_pool_3x3', 1), ('max_pool_3x3', 0)], normal_concat=range(2, 6),
reduce=[('max_pool_3x3', 0), ('max_pool_3x3', 1), ('skip_connect', 0), ('dil_conv_3x3', 1), ('skip_connect', 1),
('skip_connect', 0), ('dil_conv_5x5', 0), ('sep_conv_3x3', 4)], reduce_concat=range(2, 6))
| 34.700935 | 116 | 0.546458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,711 | 0.460813 |
0a277a87fbb9f9430d9ecdf658e9964b1157dc17 | 3,951 | py | Python | advanced-workflows/task-graphs-lab/exercise/plugins/lab/plugin/workflows.py | jrzeszutek/cloudify-training-labs | 5477750d269cb703ce47e35a1c13749fc88f3f6f | [
"Apache-2.0"
] | 6 | 2015-07-06T01:10:08.000Z | 2016-12-21T15:42:07.000Z | advanced-workflows/task-graphs-lab/exercise/plugins/lab/plugin/workflows.py | jrzeszutek/cloudify-training-labs | 5477750d269cb703ce47e35a1c13749fc88f3f6f | [
"Apache-2.0"
] | 4 | 2015-08-25T06:32:36.000Z | 2016-09-07T07:01:34.000Z | advanced-workflows/task-graphs-lab/exercise/plugins/lab/plugin/workflows.py | jrzeszutek/cloudify-training-labs | 5477750d269cb703ce47e35a1c13749fc88f3f6f | [
"Apache-2.0"
] | 14 | 2015-03-28T05:45:58.000Z | 2017-02-14T02:22:09.000Z | '''Copyright Gigaspaces, 2017, All Rights Reserved'''
from cloudify.plugins import lifecycle
OP_START = 'hacker.interfaces.lifecycle.start'
OP_STOP = 'hacker.interfaces.lifecycle.stop'
OP_SS_C = 'hacker.interfaces.lifecycle.create_snapshots'
OP_SS_D = 'hacker.interfaces.lifecycle.delete_snapshots'
REQUIRED_OPS = set([OP_START, OP_SS_C, OP_SS_D, OP_STOP])
def build_instance_sequence(instance, operation,
state_start=None, state_end=None):
'''
Builds sequenced subgraph tasks for an instance
.. note::
The sequence will not be built if the instance provided
does not have a node with an operation defined in the
operation parameter.
:param `CloudifyWorkflowNodeInstance` instance:
Node instance to execute tasks against
:param str operation:
Node (lifecycle) operation to execute
:param str state_start:
Verb to describe operation start
:param str state_stop:
Verb to describe operation finish
'''
tasks = list()
# Only build the sequence if the node operation exists
if operation not in instance.node.operations:
return tasks
# Add task starting state
if state_start:
tasks.append(instance.send_event('%s host' % state_start))
tasks.append(instance.set_state(state_start.lower()))
# Add task operation
tasks.append(instance.execute_operation(operation))
# Add task ended state
if state_end:
tasks.append(instance.send_event('%s host' % state_end))
tasks.append(instance.set_state(state_end.lower()))
return tasks
def build_instance_subgraph(instance, graph):
'''
Builds a subgraph for an instance
:param `CloudifyWorkflowNodeInstance` instance:
Node instance to execute tasks against
:param `TaskDependencyGraph` graph:
Task graph to create sequences from
'''
# Init a "stop instance" subgraph
sg_stop = graph.subgraph('stop_subgraph')
seq_stop = sg_stop.sequence()
seq_stop.add(*build_instance_sequence(
instance, OP_STOP, 'Stopping', 'Stopped'))
# Init a "recreate snapshots" subgraph
sg_snap = graph.subgraph('snapshot_subgraph')
seq_snap = sg_snap.sequence()
if OP_SS_D in instance.node.operations:
seq_snap.add(*build_instance_sequence(instance, OP_SS_D))
if OP_SS_C in instance.node.operations:
seq_snap.add(*build_instance_sequence(instance, OP_SS_C))
# Init a "start instance" subgraph
sg_start = graph.subgraph('stop_subgraph')
seq_start = sg_start.sequence()
seq_start.add(*build_instance_sequence(
instance, OP_START, 'Starting', 'Started'))
# Create subgraph dependencies
graph.add_dependency(sg_snap, sg_stop)
graph.add_dependency(sg_start, sg_snap)
def refresh_snapshots(ctx, **_):
'''
Executes a complex, graph-based set of lifecycle events
to stop all host (compute) instances, delete all
existing instance snapshots, take new snapshots
of all attached volumes, and start the instances
back up when complete.
'''
graph = ctx.graph_mode()
# Find all compute hosts and build a sequence graph
for node in ctx.nodes:
if not REQUIRED_OPS.issubset(node.operations):
ctx.logger.warn(
'Skipping refresh_snapshots workflow for node "%s" because '
'it does not have all required operations defined' % node.id)
continue
# Iterate over each node instance
for instance in node.instances:
if not lifecycle.is_host_node(instance):
ctx.logger.warn(
'Skipping refresh_snapshots workflow for node instance '
'"%s" because it is not a compute host' % instance.id)
continue
build_instance_subgraph(instance, graph)
# Execute the sequences
return graph.execute()
| 37.628571 | 77 | 0.679069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,948 | 0.49304 |
0a2ad964a50ee086e447a623b3863c7fbb9ef26a | 1,977 | py | Python | src/com/python/email/send_mail.py | Leeo1124/pythonDemo | 72e2209c095301a3f1f61edfe03ea69c3c05be40 | [
"Apache-2.0"
] | null | null | null | src/com/python/email/send_mail.py | Leeo1124/pythonDemo | 72e2209c095301a3f1f61edfe03ea69c3c05be40 | [
"Apache-2.0"
] | null | null | null | src/com/python/email/send_mail.py | Leeo1124/pythonDemo | 72e2209c095301a3f1f61edfe03ea69c3c05be40 | [
"Apache-2.0"
] | null | null | null | '''
Created on 2016年8月10日
@author: Administrator
'''
from email import encoders
from email.header import Header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.multipart import MIMEBase
from email.utils import parseaddr, formataddr
import smtplib
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
from_addr = 'leeo1124@163.com'#input('From: ')
password = input('Password: ')
to_addr = '450475851@qq.com'#input('To: ')
smtp_server = 'smtp.163.com'#input('SMTP server: ')
# 发送纯文本邮件
# msg = MIMEText('hello, send by Python...', 'plain', 'utf-8')
# 发送HTML邮件
# msg = MIMEText('<html><body><h1>Hello</h1>' +
# '<p>send by <a href="http://www.python.org">Python</a>...</p>' +
# '</body></html>', 'html', 'utf-8')
# 发送带附件的邮件
# 邮件对象:
msg = MIMEMultipart()
msg['From'] = _format_addr('Python爱好者 <%s>' % from_addr)
msg['To'] = _format_addr('管理员 <%s>' % to_addr)
msg['Subject'] = Header('来自SMTP的问候……', 'utf-8').encode()
# 邮件正文是MIMEText:
msg.attach(MIMEText('send with file...', 'plain', 'utf-8'))
# 添加附件就是加上一个MIMEBase,从本地读取一个图片:
with open('D:/pythonWorkspace/pthonDemo/src/com/python/email/test.jpg', 'rb') as f:
# 设置附件的MIME和文件名,这里是png类型:
mime = MIMEBase('image', 'png', filename='test.png')
# 加上必要的头信息:
mime.add_header('Content-Disposition', 'attachment', filename='test.png')
mime.add_header('Content-ID', '<0>')
mime.add_header('X-Attachment-Id', '0')
# 把附件的内容读进来:
mime.set_payload(f.read())
# 用Base64编码:
encoders.encode_base64(mime)
# 添加到MIMEMultipart:
msg.attach(mime)
msg['From'] = _format_addr('Python爱好者 <%s>' % from_addr)
msg['To'] = _format_addr('管理员 <%s>' % to_addr)
msg['Subject'] = Header('来自SMTP的问候……', 'utf-8').encode()
server = smtplib.SMTP(smtp_server, 25)
server.set_debuglevel(1)
server.login(from_addr, password)
server.sendmail(from_addr, [to_addr], msg.as_string())
server.quit() | 29.073529 | 83 | 0.676277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,115 | 0.505211 |
0a33cb634cfe076d601a3145a01487981499f068 | 22,712 | py | Python | Scripts/calc_Utilities.py | zmlabe/ThicknessSensitivity | 6defdd897a61d7d1a02f34a9f4ec92b2b17b3075 | [
"MIT"
] | 1 | 2017-10-22T02:22:14.000Z | 2017-10-22T02:22:14.000Z | Scripts/calc_Utilities.py | zmlabe/ThicknessSensitivity | 6defdd897a61d7d1a02f34a9f4ec92b2b17b3075 | [
"MIT"
] | null | null | null | Scripts/calc_Utilities.py | zmlabe/ThicknessSensitivity | 6defdd897a61d7d1a02f34a9f4ec92b2b17b3075 | [
"MIT"
] | 4 | 2018-04-05T17:55:36.000Z | 2022-03-31T07:05:01.000Z | """
Functions are useful untilities for SITperturb experiments
Notes
-----
Author : Zachary Labe
Date : 13 August 2017
Usage
-----
[1] calcDecJan(varx,vary,lat,lon,level,levsq)
[2] calcDecJanFeb(varx,vary,lat,lon,level,levsq)
[3] calc_indttest(varx,vary)
[4] calc_weightedAve(var,lats)
[5] calc_spatialCorr(varx,vary,lats,lons,weight)
[6] calc_RMSE(varx,vary,lats,lons,weight)
[7] calc_spatialCorrHeight(varx,vary,lats,lons,weight)
[8] calc_spatialCorrHeightLev(varx,vary,lats,lons,weight,levelq)
"""
def calcDecJan(varx,vary,lat,lon,level,levsq):
"""
Function calculates average for December-January
Parameters
----------
varx : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
vary : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
lat : 1d numpy array
latitudes
lon : 1d numpy array
longitudes
level : string
Height of variable (surface or profile)
levsq : integer
number of levels
Returns
-------
varx_dj : 3d array or 4d array
[year,lat,lon] or [year,lev,lat,lon]
vary_dj : 3d array
[year,lat,lon] or [year,lev,lat,lon]
Usage
-----
varx_dj,vary_dj = calcDecJan(varx,vary,lat,lon,level,levsq)
"""
print('\n>>> Using calcDecJan function!')
### Import modules
import numpy as np
### Reshape for 3d variables
if level == 'surface':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varx_dj = np.empty((varx.shape[0]-1,lat.shape[0],lon.shape[0]))
vary_dj = np.empty((vary.shape[0]-1,lat.shape[0],lon.shape[0]) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djappendh = np.append(varxravel[11+i,:,:],varxravel[12+i,:,:])
djappendf = np.append(varyravel[11+i,:,:],varyravel[12+i,:,:])
varx_dj[counter,:,:] = np.nanmean(np.reshape(djappendh,
(2,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
vary_dj[counter,:,:] = np.nanmean(np.reshape(djappendf,
(2,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
### Reshape for 4d variables
elif level == 'profile':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varx_dj = np.empty((int(varx.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])))
vary_dj = np.empty((int(vary.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djappendh = np.append(varxravel[11+i,:,:,:],
varxravel[12+i,:,:,:])
djappendf = np.append(varyravel[11+i,:,:,:],
varyravel[12+i,:,:,:])
varx_dj[counter,:,:] = np.nanmean(np.reshape(djappendh,
(2,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
vary_dj[counter,:,:] = np.nanmean(np.reshape(djappendf,
(2,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
else:
print(ValueError('Selected wrong height - (surface or profile!)!'))
print('Completed: Organized data by months (ON,DJ,FM)!')
print('*Completed: Finished calcDecJan function!')
return varx_dj,vary_dj
###############################################################################
###############################################################################
###############################################################################
def calcDecJanFeb(varx,vary,lat,lon,level,levsq):
"""
Function calculates average for December-January-February
Parameters
----------
varx : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
vary : 4d array or 5d array
[year,month,lat,lon] or [year,month,lev,lat,lon]
lat : 1d numpy array
latitudes
lon : 1d numpy array
longitudes
level : string
Height of variable (surface or profile)
levsq : integer
number of levels
Returns
-------
varx_djf : 3d array or 4d array
[year,lat,lon] or [year,lev,lat,lon]
vary_djf : 3d array
[year,lat,lon] or [year,lev,lat,lon]
Usage
-----
varx_djf,vary_djf = calcDecJanFeb(varx,vary,lat,lon,level,levsq)
"""
print('\n>>> Using calcDecJan function!')
### Import modules
import numpy as np
### Reshape for 3d variables
if level == 'surface':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12),
int(lat.shape[0]),int(lon.shape[0])))
varx_djf = np.empty((varx.shape[0]-1,lat.shape[0],lon.shape[0]))
vary_djf = np.empty((vary.shape[0]-1,lat.shape[0],lon.shape[0]) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djfappendh1 = np.append(varxravel[11+i,:,:],varxravel[12+i,:,:])
djfappendf1 = np.append(varyravel[11+i,:,:],varyravel[12+i,:,:])
djfappendh = np.append(djfappendh1,varxravel[13+i,:,:])
djfappendf = np.append(djfappendf1,varyravel[13+i,:,:])
varx_djf[counter,:,:] = np.nanmean(np.reshape(djfappendh,
(3,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
vary_djf[counter,:,:] = np.nanmean(np.reshape(djfappendf,
(3,int(lat.shape[0]),int(lon.shape[0]))),
axis=0)
### Reshape for 4d variables
elif level == 'profile':
varxravel = np.reshape(varx.copy(),
(int(varx.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varyravel = np.reshape(vary.copy(),
(int(vary.shape[0]*12.),levsq,
int(lat.shape[0]),int(lon.shape[0])))
varx_djf = np.empty((int(varx.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])))
vary_djf = np.empty((int(vary.shape[0]-1),levsq,
int(lat.shape[0]),int(lon.shape[0])) )
for i in range(0,varxravel.shape[0]-12,12):
counter = 0
if i >= 12:
counter = i//12
djfappendh1 = np.append(varxravel[11+i,:,:,:],
varxravel[12+i,:,:,:])
djfappendf1 = np.append(varyravel[11+i,:,:,:],
varyravel[12+i,:,:,:])
djfappendh = np.append(djfappendh1,
varxravel[13+i,:,:,:])
djfappendf = np.append(djfappendf1,
varyravel[13+i,:,:,:])
varx_djf[counter,:,:] = np.nanmean(np.reshape(djfappendh,
(3,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
vary_djf[counter,:,:] = np.nanmean(np.reshape(djfappendf,
(3,levsq,int(lat.shape[0]),
int(lon.shape[0]))),axis=0)
else:
print(ValueError('Selected wrong height - (surface or profile!)!'))
print('Completed: Organized data by months (DJF)!')
print('*Completed: Finished calcDecJanFeb function!')
return varx_djf,vary_djf
###############################################################################
###############################################################################
###############################################################################
def calc_indttest(varx,vary):
"""
Function calculates statistical difference for 2 independent
sample t-test
Parameters
----------
varx : 3d array
vary : 3d array
Returns
-------
stat = calculated t-statistic
pvalue = two-tailed p-value
Usage
-----
stat,pvalue = calc_ttest(varx,vary)
"""
print('\n>>> Using calc_ttest function!')
### Import modules
import numpy as np
import scipy.stats as sts
### 2-independent sample t-test
stat,pvalue = sts.ttest_ind(varx,vary,nan_policy='omit')
### Significant at 95% confidence level
pvalue[np.where(pvalue >= 0.05)] = np.nan
pvalue[np.where(pvalue < 0.05)] = 1.
print('*Completed: Finished calc_ttest function!')
return stat,pvalue
###############################################################################
###############################################################################
###############################################################################
def calc_weightedAve(var,lats):
"""
Area weights sit array 5d [ens,year,month,lat,lon] into [ens,year,month]
Parameters
----------
var : 5d,4d,3d array of a gridded variable
lats : 2d array of latitudes
Returns
-------
meanvar : weighted average for 3d,2d,1d array
Usage
-----
meanvar = calc_weightedAve(var,lats)
"""
print('\n>>> Using calc_weightedAve function!')
### Import modules
import numpy as np
### Calculate weighted average for various dimensional arrays
if var.ndim == 5:
meanvar = np.empty((var.shape[0],var.shape[1],var.shape[2]))
for ens in range(var.shape[0]):
for i in range(var.shape[1]):
for j in range(var.shape[2]):
varq = var[ens,i,j,:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar[ens,i,j] = np.nansum(varmask*areamask) \
/np.sum(areamask)
elif var.ndim == 4:
meanvar = np.empty((var.shape[0],var.shape[1]))
for i in range(var.shape[0]):
for j in range(var.shape[1]):
varq = var[i,j,:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar[i,j] = np.nansum(varmask*areamask)/np.sum(areamask)
elif var.ndim == 3:
meanvar = np.empty((var.shape[0]))
for i in range(var.shape[0]):
varq = var[i,:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar[i] = np.nansum(varmask*areamask)/np.sum(areamask)
elif var.ndim == 2:
meanvar = np.empty((var.shape[0]))
varq = var[:,:]
mask = np.isfinite(varq) & np.isfinite(lats)
varmask = varq[mask]
areamask = np.cos(np.deg2rad(lats[mask]))
meanvar = np.nansum(varmask*areamask)/np.sum(areamask)
else:
print(ValueError('Variable has the wrong dimensions!'))
print('Completed: Weighted variable average!')
print('*Completed: Finished calc_weightedAve function!')
return meanvar
###############################################################################
###############################################################################
###############################################################################
def calc_spatialCorr(varx,vary,lats,lons,weight):
"""
Calculates spatial correlation from pearson correlation coefficient
Parameters
----------
varx : 2d array
vary : 2d array
lats : 1d array
lons : 1d array of latitude
weight : string (yes or no)
Returns
-------
corrcoef : 1d array of correlation coefficient (pearson r)
Usage
-----
corrcoef = calc_spatialCorr(varx,vary,lats,lons)
"""
print('\n>>> Using calc_spatialCorr function!')
### Import modules
import numpy as np
if weight == 'yes': # Computed weighted correlation coefficient
### mask
mask = 'yes'
if mask == 'yes':
latq = np.where(lats > 40)[0]
lats = lats[latq]
varx = varx[latq,:]
vary = vary[latq,:]
print('MASKING LATITUDES!')
### Create 2d meshgrid for weights
lon2,lat2 = np.meshgrid(lons,lats)
### Create 2d array of weights based on latitude
gw = np.cos(np.deg2rad(lat2))
def m(x, w):
"""Weighted Mean"""
wave = np.sum(x * w) / np.sum(w)
print('Completed: Computed weighted average!')
return wave
def cov(x, y, w):
"""Weighted Covariance"""
wcov = np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w)
print('Completed: Computed weighted covariance!')
return wcov
def corr(x, y, w):
"""Weighted Correlation"""
wcor = cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w))
print('Completed: Computed weighted correlation!')
return wcor
corrcoef = corr(varx,vary,gw)
elif weight == 'no':
### Correlation coefficient from numpy function (not weighted)
corrcoef= np.corrcoef(varx.ravel(),vary.ravel())[0][1]
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted arguement in function!')
print('*Completed: Finished calc_SpatialCorr function!')
return corrcoef
###############################################################################
###############################################################################
###############################################################################
def calc_RMSE(varx,vary,lats,lons,weight):
"""
Calculates root mean square weighted average
Parameters
----------
varx : 2d array
vary : 2d array
lons : 1d array of latitude
weight : string (yes or no)
Returns
-------
rmse : 1d array
Usage
-----
rmse = calc_RMSE(varx,vary,lats,lons)
"""
print('\n>>> Using calc_RMSE function!')
### Import modules
import numpy as np
from sklearn.metrics import mean_squared_error
if weight == 'yes': # Computed weighted correlation coefficient
### mask
mask = 'yes'
if mask == 'yes':
latq = np.where(lats > 40)[0]
lats = lats[latq]
varx = varx[latq,:]
vary = vary[latq,:]
print('MASKING LATITUDES!')
### Create 2d meshgrid for weights
lon2,lat2 = np.meshgrid(lons,lats)
### Create 2d array of weights based on latitude
gw = np.cos(np.deg2rad(lat2))
### Calculate rmse
sq_err = (varx - vary)**2
rmse = np.sqrt((np.sum(sq_err*gw))/np.sum(gw))
elif weight == 'no':
### Root mean square error from sklearn (not weighted)
rmse = np.sqrt(mean_squared_error(varx.ravel(),vary.ravel()))
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted arguement in function!')
print('*Completed: Finished calc_RMSE function!')
return rmse
###############################################################################
###############################################################################
###############################################################################
def calc_spatialCorrHeight(varx,vary,levs,lons,weight):
"""
Calculates spatial correlation from pearson correlation coefficient for
grids over vertical height (17 pressure coordinate levels)
Parameters
----------
varx : 2d array
vary : 2d array
levs : 1d array of levels
lons : 1d array of latitude
weight : string (yes or no)
Returns
-------
corrcoef : 1d array of correlation coefficient (pearson r)
Usage
-----
corrcoef = calc_spatialCorrHeight(varx,vary,lats,lons)
"""
print('\n>>> Using calc_spatialCorrHeight function!')
### Import modules
import numpy as np
if weight == 'yes': # Computed weighted correlation coefficient
### Create 2d meshgrid for weights
lon2,lev2 = np.meshgrid(lons,levs)
### Create 2d array of weights based on latitude
gwq = np.array([0.25,0.25,0.25,0.25,0.25,0.25,0.4,0.5,0.5,0.5,
0.5,0.5,0.5,0.7,0.7,0.7,1.])
gw,gw2 = np.meshgrid(lons,gwq)
def m(x, w):
"""Weighted Mean"""
wave = np.sum(x * w) / np.sum(w)
print('Completed: Computed weighted average (17 P Levels)!')
return wave
def cov(x, y, w):
"""Weighted Covariance"""
wcov = np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w)
print('Completed: Computed weighted covariance (17 P Levels)!')
return wcov
def corr(x, y, w):
"""Weighted Correlation"""
wcor = cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w))
print('Completed: Computed weighted correlation (17 P Levels)!')
return wcor
corrcoef = corr(varx,vary,gw)
elif weight == 'no':
### Correlation coefficient from numpy function (not weighted)
corrcoef= np.corrcoef(varx.ravel(),vary.ravel())[0][1]
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted argument in function!')
print('*Completed: Finished calc_SpatialCorrHeight function!')
return corrcoef
###############################################################################
###############################################################################
###############################################################################
def calc_spatialCorrHeightLev(varx,vary,levs,lons,weight,levelq):
"""
Calculates spatial correlation from pearson correlation coefficient for
grids over vertical height (17 pressure coordinate levels). Change the
weighting for different level correlations
Parameters
----------
varx : 2d array
vary : 2d array
levs : 1d array of levels
lons : 1d array of latitude
weight : string (yes or no)
levelq : string (all, tropo, strato)
Returns
-------
corrcoef : 1d array of correlation coefficient (pearson r)
Usage
-----
corrcoef = calc_spatialCorrHeight(varx,vary,lats,lons,levels)
"""
print('\n>>> Using calc_spatialCorrHeightLev function!')
### Import modules
import numpy as np
if weight == 'yes': # Computed weighted correlation coefficient
### Create 2d meshgrid for weights
lon2,lev2 = np.meshgrid(lons,levs)
if levelq == 'all':
### Create 2d array of weights based on latitude
gwq = np.array([0.25,0.25,0.25,0.25,0.25,0.25,0.4,0.5,0.5,0.5,
0.5,0.5,0.5,0.7,0.7,0.7,1.])
gw,gw2 = np.meshgrid(lons,gwq)
elif levelq == 'tropo':
gwq = np.array([1.0,1.0,1.0,1.0,0.5,0.5,0.5,0.2,0.2,0.,0.,0.,
0.,0.,0.,0.,0.])
gw,gw2 = np.meshgrid(lons,gwq)
elif levelq == 'strato':
gwq = np.array([0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.5,1.,1.,1.,1.
,1.,1.])
gw,gw2 = np.meshgrid(lons,gwq)
def m(x, w):
"""Weighted Mean"""
wave = np.sum(x * w) / np.sum(w)
print('Completed: Computed weighted average (17 P Levels)!')
return wave
def cov(x, y, w):
"""Weighted Covariance"""
wcov = np.sum(w * (x - m(x, w)) * (y - m(y, w))) / np.sum(w)
print('Completed: Computed weighted covariance (17 P Levels)!')
return wcov
def corr(x, y, w):
"""Weighted Correlation"""
wcor = cov(x, y, w) / np.sqrt(cov(x, x, w) * cov(y, y, w))
print('Completed: Computed weighted correlation (17 P Levels)!')
return wcor
corrcoef = corr(varx,vary,gw)
elif weight == 'no':
### Correlation coefficient from numpy function (not weighted)
corrcoef= np.corrcoef(varx.ravel(),vary.ravel())[0][1]
print('Completed: Computed NON-weighted correlation!')
else:
ValueError('Wrong weighted argument in function!')
print('*Completed: Finished calc_SpatialCorrHeightLev function!')
return corrcoef | 36.514469 | 95 | 0.468739 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9,562 | 0.421011 |
0a3cda3b610042fefd30969a702f9d925c74876f | 4,421 | py | Python | ttl2json.py | the-norman-sicily-project/genealogical-trees | 32fa4f25861ae34543b0a6b95e54842c0018331b | [
"MIT"
] | 1 | 2021-05-18T20:39:30.000Z | 2021-05-18T20:39:30.000Z | ttl2json.py | the-norman-sicily-project/genealogical-trees | 32fa4f25861ae34543b0a6b95e54842c0018331b | [
"MIT"
] | null | null | null | ttl2json.py | the-norman-sicily-project/genealogical-trees | 32fa4f25861ae34543b0a6b95e54842c0018331b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import json
import rdflib
import rdflib.plugins.sparql as sparql
RELS_TO_DRAW = ['isWifeOf', 'isMotherOf', 'isFatherOf', 'isHusbandOf', 'isSpouseOf']
RELS_TO_INFER = ['hasGrandParent', 'isGrandParentOf', 'hasGreatGrandParent',
'isGreatGrandParentOf', 'isUncleOf', 'hasUncle',
'isGreatUncleOf', 'hasGreatUncle', 'isAuntOf', 'hasAunt',
'isGreatAuntOf', 'hasGreatAunt',
'isBrotherOf', 'isSisterOf', 'isSiblingOf',
'isFirstCousinOf', 'isSecondCousinOf', 'isThirdCousinOf']
RELS_OF_INTEREST = RELS_TO_DRAW + RELS_TO_INFER
try:
workpath = sys.argv[1]
except IndexError:
sys.exit("No path defined!")
try:
recursion_limit = int(sys.argv[2])
except IndexError:
recursion_limit = 0
if recursion_limit > 0:
sys.setrecursionlimit(recursion_limit)
g = rdflib.Graph()
g.parse(workpath, format="turtle")
fhkb_str = "http://www.example.com/genealogy.owl#"
schema_str = "https://schema.org/"
FHKB = rdflib.Namespace(fhkb_str)
SCHEMA_ORG = rdflib.Namespace(schema_str)
def dump(uriref):
if uriref.__contains__('#'):
return uriref.split('#')[-1]
return uriref.split('/')[-1]
graph = {}
graph['nodes'] = []
graph['edges'] = []
nodes = {}
q = sparql.prepareQuery(
"""PREFIX fhkb:<http://www.example.com/genealogy.owl#>
SELECT ?person ?pred ?obj
WHERE {
?person a fhkb:Person ;
?pred ?obj .
}
ORDER BY ?person""")
for rel in RELS_OF_INTEREST:
pred = rdflib.URIRef("{}{}".format(fhkb_str, rel))
relation_query_results = g.query(q, initBindings={'pred': pred})
for (subj, pred, obj) in relation_query_results:
graph['edges'].append(
{
'data': {
'group': 'edges',
'id': f'{dump(subj)}-{dump(pred)}-{dump(obj)}',
'source': dump(subj),
'target': dump(obj),
'type': dump(pred)
}
})
q_details = sparql.prepareQuery(
"""PREFIX fhkb:<http://www.example.com/genealogy.owl#>
SELECT ?person ?pred ?obj
WHERE {
?person a fhkb:Person ;
?pred ?obj .
FILTER NOT EXISTS {
?person ?testPred ?obj .
VALUES ?testPred {
fhkb:isWifeOf
fhkb:isMotherOf
fhkb:isFatherOf
fhkb:isHusbandOf
fhkb:isSpouseOf
fhkb:hasGrandParent
fhkb:isGrandParentOf
fhkb:hasGreatGrandParent
fhkb:isGreatGrandParentOf
fhkb:isUncleOf
fhkb:hasUncle
fhkb:isGreatUncleOf
fhkb:hasGreatUncle
fhkb:isAuntOf
fhkb:hasAunt
fhkb:isGreatAuntOf
fhkb:hasGreatAunt
fhkb:isBrotherOf
fhkb:isSisterOf
fhkb:isSiblingOf
fhkb:isFirstCousinOf
fhkb:isSecondCousinOf
fhkb:isThirdCousinOf
fhkb:hasRelation
fhkb:isPartnerIn
fhkb:isMalePartnerIn
fhkb:isFemalePartnerIn
fhkb:isBloodrelationOf
}
}
}
ORDER BY ?person"""
)
person_query_results = g.query(q_details)
for (subj, pred, obj) in person_query_results:
node = nodes.get(dump(subj), {
'data': {
'label': '',
'degree': 0,
'size': 10,
'alternateNames': [],
'honorificPrefixes': [],
'honorificSuffixes': [],
'images': [],
'id': dump(subj),
}})
if pred == FHKB.Sex:
node['data'][dump(pred)] = dump(obj)
elif pred.startswith(SCHEMA_ORG):
if dump(pred) == 'honorificSuffix':
node['data']['honorificSuffixes'].append(obj)
elif dump(pred) == 'honorificPrefix':
node['data']['honorificPrefixes'].append(obj)
elif dump(pred) == 'alternateName':
node['data']['alternateNames'].append(obj)
elif dump(pred) == 'image':
node['data']['images'].append(obj)
else:
node['data'][dump(pred)] = obj
elif pred == rdflib.RDFS.label:
node['data']['label'] = obj
else:
continue
nodes[dump(subj)] = node
graph['nodes'] = list(nodes.values())
print(json.dumps(graph, indent=0))
sys.exit(0)
| 28.339744 | 84 | 0.555078 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,106 | 0.476363 |
0a3e6de6fa0adef7035c5c9d0aedbcc9e7f13b79 | 791 | py | Python | electrum/version.py | c4pt000/electrum-radiocoin | 7cb5f618a9aa8cd03d60191624a0e57cc24646d2 | [
"MIT"
] | null | null | null | electrum/version.py | c4pt000/electrum-radiocoin | 7cb5f618a9aa8cd03d60191624a0e57cc24646d2 | [
"MIT"
] | null | null | null | electrum/version.py | c4pt000/electrum-radiocoin | 7cb5f618a9aa8cd03d60191624a0e57cc24646d2 | [
"MIT"
] | null | null | null | ELECTRUM_VERSION = '4.1.5-radc' # version of the client package
APK_VERSION = '4.1.5.0' # read by buildozer.spec
PROTOCOL_VERSION = '1.4' # protocol version requested
# The hash of the mnemonic seed must begin with this
SEED_PREFIX = '01' # Standard wallet
SEED_PREFIX_SW = '100' # Segwit wallet
SEED_PREFIX_2FA = '101' # Two-factor authentication
SEED_PREFIX_2FA_SW = '102' # Two-factor auth, using segwit
def seed_prefix(seed_type):
if seed_type == 'standard':
return SEED_PREFIX
elif seed_type == 'segwit':
return SEED_PREFIX_SW
elif seed_type == '2fa':
return SEED_PREFIX_2FA
elif seed_type == '2fa_segwit':
return SEED_PREFIX_2FA_SW
raise Exception(f"unknown seed_type: {seed_type}")
| 34.391304 | 67 | 0.668774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 338 | 0.427307 |
0a4491bed67c4627a06dabc6e88940ee8f57226d | 14,777 | py | Python | ResNet/dropblock.py | whj363636/CamDrop | f8af8c200665145f112b59348f60fc4cf80f04ec | [
"MIT"
] | null | null | null | ResNet/dropblock.py | whj363636/CamDrop | f8af8c200665145f112b59348f60fc4cf80f04ec | [
"MIT"
] | null | null | null | ResNet/dropblock.py | whj363636/CamDrop | f8af8c200665145f112b59348f60fc4cf80f04ec | [
"MIT"
] | 1 | 2021-11-06T11:22:49.000Z | 2021-11-06T11:22:49.000Z | # -*- coding: utf-8 -*-
# File: dropblock.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import six
# from tensorpack.tfutils.compat import tfv1 as tf # this should be avoided first in model code
from tensorpack.tfutils.tower import get_current_tower_context
from tensorpack.models import GlobalAvgPooling, FullyConnected
import tensorflow as tf
__all__ = ['dropblock', 'dropblock2','dropblock3','dropblock4'] # 1: paper baseline; 2: group dropout; 3: group soft-dropout; 4: Uout group dropout
def dropblock(net, keep_prob, dropblock_size, gap_w=None, label=None, G=None, CG=None, data_format='channels_first'):
"""DropBlock: a regularization method for convolutional neural networks.
DropBlock is a form of structured dropout, where units in a contiguous
region of a feature map are dropped together. DropBlock works better than
dropout on convolutional layers due to the fact that activation units in
convolutional layers are spatially correlated.
See https://arxiv.org/pdf/1810.12890.pdf for details.
Args:
net: `Tensor` input tensor.
is_training: `bool` for whether the model is training.
keep_prob: `float` or `Tensor` keep_prob parameter of DropBlock. "None"
means no DropBlock.
dropblock_size: `int` size of blocks to be dropped by DropBlock.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A version of input tensor with DropBlock applied.
Raises:
if width and height of the input tensor are not equal.
"""
ctx = get_current_tower_context()
is_training = bool(ctx.is_training)
if not is_training or keep_prob is None:
return net
tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format(dropblock_size, net.shape))
if data_format == 'channels_last':
_, width, height, _ = net.get_shape().as_list()
else:
_, _, width, height = net.get_shape().as_list()
if width != height:
raise ValueError('Input tensor with width!=height is not supported.')
dropblock_size = min(dropblock_size, width)
# seed_drop_rate is the gamma parameter of DropBlcok.
seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / (
width - dropblock_size + 1)**2
# Forces the block to be inside the feature map.
w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width))
valid_block_center = tf.logical_and(
tf.logical_and(w_i >= int(dropblock_size // 2),
w_i < width - (dropblock_size - 1) // 2),
tf.logical_and(h_i >= int(dropblock_size // 2),
h_i < width - (dropblock_size - 1) // 2))
valid_block_center = tf.expand_dims(valid_block_center, 0)
valid_block_center = tf.expand_dims(
valid_block_center, -1 if data_format == 'channels_last' else 0)
randnoise = tf.random_uniform(tf.shape(net), dtype=tf.float32)
block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast(
(1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1
block_pattern = tf.cast(block_pattern, dtype=tf.float32)
if dropblock_size == width:
block_pattern = tf.reduce_min(
block_pattern,
axis=[1, 2] if data_format == 'channels_last' else [2, 3],
keepdims=True)
else:
if data_format == 'channels_last':
ksize = [1, dropblock_size, dropblock_size, 1]
else:
ksize = [1, 1, dropblock_size, dropblock_size]
block_pattern = -tf.nn.max_pool(
-block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME',
data_format='NHWC' if data_format == 'channels_last' else 'NCHW')
percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast(
tf.size(block_pattern), tf.float32)
net = net / tf.cast(percent_ones, net.dtype) * tf.cast(
block_pattern, net.dtype)
return net
def dropblock2(net, keep_prob, dropblock_size, G=None, CG=None, data_format='channels_first'):
"""
mimic GN
"""
ctx = get_current_tower_context()
is_training = bool(ctx.is_training)
if not is_training or keep_prob is None:
return net
tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format(dropblock_size, net.shape))
if data_format == 'channels_last':
N, height, width, C = net.get_shape().as_list()
else:
N, C, height, width = net.get_shape().as_list()
N = tf.shape(net)[0]
if width != height:
raise ValueError('Input tensor with width!=height is not supported.')
if G == None: G = C // CG
if CG == None: CG = C // G
net = tf.reshape(net, [N, G, CG, height, width])
dropblock_size = min(dropblock_size, width)
# seed_drop_rate is the gamma parameter of DropBlcok.
# seed_drop_rate = (1.0 - keep_prob) * width**2 * G**2 / (C * dropblock_size**2) / (C * (width - dropblock_size + 1)**2)
seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / (width - dropblock_size + 1)**2
# Forces the block to be inside the feature map.
w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width))
valid_block_center = tf.logical_and(
tf.logical_and(w_i >= int(dropblock_size // 2),
w_i < width - (dropblock_size - 1) // 2),
tf.logical_and(h_i >= int(dropblock_size // 2),
h_i < width - (dropblock_size - 1) // 2))
valid_block_center = tf.expand_dims(valid_block_center, 0) # for depth
valid_block_center = tf.expand_dims(valid_block_center, 0) # for batch
valid_block_center = tf.expand_dims(valid_block_center, 0) # for channel
randnoise = tf.random_uniform([N, G, 1, width, height], dtype=tf.float32)
block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast(
(1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1
block_pattern = tf.cast(block_pattern, dtype=tf.float32)
if dropblock_size == width:
block_pattern = tf.reduce_min(block_pattern, axis=[2, 3, 4], keepdims=True)
else:
ksize = [1, 1, dropblock_size, dropblock_size]
block_pattern = tf.reduce_max(-block_pattern, reduction_indices=[2])
block_pattern = -tf.nn.max_pool(block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME', data_format='NCHW')
block_pattern = tf.expand_dims(block_pattern, 2)
percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast(tf.size(block_pattern), tf.float32)
net = net / tf.cast(percent_ones, net.dtype) * tf.cast(block_pattern, net.dtype)
net = tf.reshape(net, [N, height, width, C]) if data_format == 'channels_last' else tf.reshape(net, [N, C, height, width])
return net
def CamDrop(net, keep_prob, dropblock_size, flag=None, label=None, G=None, CG=None, data_format='channels_first'):
'''CamDrop'''
def _get_cam(net, label, flag, dropblock_size, data_format='channels_first'):
'''
net: [N, C, H, W]
gap_w : [gap_C, num_of_class]
'''
if data_format == 'channels_last':
N, height, width, C = net.get_shape().as_list()
else:
N, C, height, width = net.get_shape().as_list()
N = tf.shape(net)[0]
gap_w = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'linear/W') if flag > 0 else None
if not gap_w is None:
gap_w = tf.convert_to_tensor(gap_w, tf.float32)
gap_C, num = tf.squeeze(gap_w, 0).get_shape().as_list() # [gap_C, num]
gap_w = tf.reshape(gap_w, [C, gap_C//C, num])
gap_w = tf.reduce_mean(gap_w, reduction_indices=[1]) # [C, num]
label = tf.gather(tf.transpose(gap_w), label) # [N, C]
# spatial
weights = tf.expand_dims(label, 2) # [N, C, 1]
net = tf.reshape(net, [N, height*width, C]) if data_format == 'channels_last' else tf.reshape(net, [N, C, height*width])
cam = tf.matmul(weights, net, transpose_a=True) # [N, 1, width*height]
# spt_mask = tf.not_equal(cam, tf.reduce_max(cam, reduction_indices=[2], keepdims=True))
# cam = tf.reshape(cam, [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(cam, [N, 1, height, width])
# cam = tf.nn.avg_pool(cam, ksize=[1, 1, dropblock_size, dropblock_size], strides=[1, 1, 1, 1], padding='VALID', data_format='NCHW')
# left_or_top = (dropblock_size-1) // 2
# right_or_bot = left_or_top if dropblock_size % 2 == 1 else dropblock_size-left_or_top-1
# cam = tf.pad(cam, [[0, 0], [0, 0], [left_or_top, right_or_bot], [left_or_top, right_or_bot]])
# cam = tf.reshape(cam, [N, height*width, 1]) if data_format == 'channels_last' else tf.reshape(cam, [N, 1, height*width])
k = tf.cast(height*width/dropblock_size**2, tf.int32)
topk, _ = tf.math.top_k(cam, k=k) # [N, 1, k]
topk = tf.gather(topk, indices=[k-1], axis=-1) # [N, 1, 1]
spt_mask = (cam < topk)
spt_mask = tf.reshape(spt_mask, [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(spt_mask, [N, 1, height, width])
# channel
k = tf.cast(C/8, tf.int32)
topk, _ = tf.math.top_k(label, k=k+1) # [N, k]
topk = tf.gather(topk, indices=k, axis=1) # [N, 1]
topk = tf.expand_dims(topk, 1) # [N, C, 1]
chan_mask = (label < topk)
chan_mask = tf.expand_dims(chan_mask, 2) # [N, C, 1]
chan_mask = tf.expand_dims(chan_mask, 2) # [N, C, 1, 1]
cam_mask = tf.logical_or(spt_mask, chan_mask)
# chan_mask = tf.reshape(tf.nn.softmax(cam), [N*C, height*width]) if data_format == 'channels_last' else tf.reshape(tf.nn.softmax(cam), [N*C, height*width])
# chan_mask = tf.reshape(cam, [N*C, height*width]) if data_format == 'channels_last' else tf.reshape(cam, [N*C, height*width])
# chan_mask = tf.reshape(tf.nn.sigmoid(cam), [N, height, width, 1]) if data_format == 'channels_last' else tf.reshape(tf.nn.sigmoid(cam), [N, 1, height, width])
else:
cam_mask = False
return cam_mask
# def _get_gradcam(net, cost=None, gap_w=None, data_format='channels_first'):
# # Conv layer tensor [?,2048,10,10]
# def _compute_gradients(tensor, var_list):
# grads = tf.gradients(tensor, var_list)
# return [grad if grad is not None else tf.zeros_like(var)
# for var, grad in zip(var_list, grads)]
# # grads = tf.gradients(cost, net)[0]
# if not gap_w is None:
# # Normalizing the gradients
# if data_format == 'channels_last':
# N, height, width, C = net.get_shape().as_list()
# else:
# N, C, height, width = net.get_shape().as_list()
# N = tf.shape(net)[0]
# grads = _compute_gradients(cost, [net])[0]
# norm_grads = tf.divide(grads, tf.sqrt(tf.reduce_mean(tf.square(grads), reduction_indices=[2,3], keepdims=True)) + tf.constant(1e-5))
# weights = tf.reduce_mean(norm_grads, reduction_indices=[2,3]) # [N, C]
# weights = tf.expand_dims(weights, 2) # [N, C, 1]
# net = tf.reshape(net, [N, height*width, C]) if data_format == 'channels_last' else tf.reshape(net, [N, C, height*width])
# # cam_mean = 1 + tf.matmul(net, weights, transpose_a=True) # [N, width*height, 1]
# cam_mean = tf.maximum(tf.matmul(weights, net, transpose_a=True), 0) # [N, 1, width*height]
# cam_chan = tf.maximum(tf.multiply(net, weights), 0) # [N, C, width*height]
# cam = cam_mean*cam_chan
# # Passing through ReLU
# cam = cam / tf.reduce_max(cam, reduction_indices=[1,2], keepdims=True)
# cam = tf.reshape(cam, [N, height, width, C]) if data_format == 'channels_last' else tf.reshape(cam, [N, C, height, width])
# else:
# cam = 0.
# return cam
# def _gumbel_softmax(logits, tau, shape, seed_drop_rate, eps=1e-20):
# if logits == False:
# return logits
# U = tf.random_uniform(tf.shape(logits), minval=0, maxval=1)
# y = logits - tf.log(-tf.log(U + eps) + eps)
# cam_mask = tf.nn.softmax(y / tau)
# topk, _ = tf.math.top_k(cam_mask, k=tf.cast(seed_drop_rate*shape[-1], tf.int32)) # [N, 1]
# topk = tf.gather(topk, indices=tf.cast(seed_drop_rate*shape[-1], tf.int32)-1, axis=1)
# topk = tf.expand_dims(topk, 1) # [N, C, 1]
# cam_mask = (cam_mask < topk)
# # cam_mask = tf.cast(tf.equal(cam_mask, tf.reduce_max(cam_mask, reduction_indices=[1], keepdims=True)), tf.float32)
# cam_mask = tf.expand_dims(cam_mask, 2) # [N, C, 1]
# cam_mask = tf.expand_dims(cam_mask, 2) # [N, C, 1, 1]
# return cam_mask
ctx = get_current_tower_context()
is_training = bool(ctx.is_training)
if not is_training or keep_prob is None:
return net
tf.logging.info('Applying DropBlock: dropblock_size {}, net.shape {}'.format(dropblock_size, net.shape))
if data_format == 'channels_last':
_, width, height, C = net.get_shape().as_list()
else:
_, C, width, height = net.get_shape().as_list()
if width != height:
raise ValueError('Input tensor with width!=height is not supported.')
N = tf.shape(net)[0]
dropblock_size = min(dropblock_size, width)
# seed_drop_rate is the gamma parameter of DropBlcok.
seed_drop_rate = (1.0 - keep_prob) * width**2 / dropblock_size**2 / (width - dropblock_size + 1)**2
cam_mask = _get_cam(net, label, flag, dropblock_size, data_format)
# Forces the block to be inside the feature map.
w_i, h_i = tf.meshgrid(tf.range(width), tf.range(width))
valid_block_center = tf.logical_and(
tf.logical_and(w_i >= int(dropblock_size // 2),
w_i < width - (dropblock_size - 1) // 2),
tf.logical_and(h_i >= int(dropblock_size // 2),
h_i < width - (dropblock_size - 1) // 2))
valid_block_center = tf.expand_dims(valid_block_center, 0)
valid_block_center = tf.expand_dims(valid_block_center, -1 if data_format == 'channels_last' else 0)
randnoise = tf.random_uniform(tf.shape(net), dtype=tf.float32)
block_pattern = (1 - tf.cast(valid_block_center, dtype=tf.float32) + tf.cast((1 - seed_drop_rate), dtype=tf.float32) + randnoise) >= 1
block_pattern = tf.logical_or(block_pattern, cam_mask)
block_pattern = tf.cast(block_pattern, dtype=tf.float32)
if dropblock_size == width:
block_pattern = tf.reduce_min(
block_pattern,
axis=[1, 2] if data_format == 'channels_last' else [2, 3],
keepdims=True)
else:
if data_format == 'channels_last':
ksize = [1, dropblock_size, dropblock_size, 1]
else:
ksize = [1, 1, dropblock_size, dropblock_size]
block_pattern = -tf.nn.max_pool(
-block_pattern, ksize=ksize, strides=[1, 1, 1, 1], padding='SAME',
data_format='NHWC' if data_format == 'channels_last' else 'NCHW')
percent_ones = tf.cast(tf.reduce_sum((block_pattern)), tf.float32) / tf.cast(tf.size(block_pattern), tf.float32)
net = net / tf.cast(percent_ones, net.dtype) * tf.cast(block_pattern, net.dtype)
return net | 45.891304 | 166 | 0.663667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,122 | 0.414292 |
0a482fa1649b42a4ec4a6b713bc6b758170e2273 | 12,096 | py | Python | httprunner/compat.py | panyuan209/httprunner | d90f2b9ab06963e8efa1c327975fca5296d6bc39 | [
"Apache-2.0"
] | null | null | null | httprunner/compat.py | panyuan209/httprunner | d90f2b9ab06963e8efa1c327975fca5296d6bc39 | [
"Apache-2.0"
] | null | null | null | httprunner/compat.py | panyuan209/httprunner | d90f2b9ab06963e8efa1c327975fca5296d6bc39 | [
"Apache-2.0"
] | null | null | null | """
This module handles compatibility issues between testcase format v2 and v3.
解决httprunner2 和 3 之间测试用例兼容性问题
"""
import os
import sys
from typing import List, Dict, Text, Union, Any
from loguru import logger
from httprunner import exceptions
from httprunner.loader import load_project_meta, convert_relative_project_root_dir
from httprunner.parser import parse_data
from httprunner.utils import sort_dict_by_custom_order
def convert_variables(
raw_variables: Union[Dict, List, Text], test_path: Text
) -> Dict[Text, Any]:
if isinstance(raw_variables, Dict):
return raw_variables
if isinstance(raw_variables, List):
# [{"var1": 1}, {"var2": 2}]
variables: Dict[Text, Any] = {}
for var_item in raw_variables:
if not isinstance(var_item, Dict) or len(var_item) != 1:
raise exceptions.TestCaseFormatError(
f"Invalid variables format: {raw_variables}"
)
variables.update(var_item)
return variables
elif isinstance(raw_variables, Text):
# get variables by function, e.g. ${get_variables()}
project_meta = load_project_meta(test_path)
variables = parse_data(raw_variables, {}, project_meta.functions)
return variables
else:
raise exceptions.TestCaseFormatError(
f"Invalid variables format: {raw_variables}"
)
def _convert_jmespath(raw: Text) -> Text:
if not isinstance(raw, Text):
raise exceptions.TestCaseFormatError(f"Invalid jmespath extractor: {raw}")
# content.xx/json.xx => body.xx
if raw.startswith("content"):
raw = f"body{raw[len('content'):]}"
elif raw.startswith("json"):
raw = f"body{raw[len('json'):]}"
raw_list = []
for item in raw.split("."):
if "-" in item:
# add quotes for field with separator
# e.g. headers.Content-Type => headers."Content-Type"
item = item.strip('"')
raw_list.append(f'"{item}"')
elif item.isdigit():
# convert lst.0.name to lst[0].name
if len(raw_list) == 0:
logger.error(f"Invalid jmespath: {raw}")
sys.exit(1)
last_item = raw_list.pop()
item = f"{last_item}[{item}]"
raw_list.append(item)
else:
raw_list.append(item)
return ".".join(raw_list)
def _convert_extractors(extractors: Union[List, Dict]) -> Dict:
""" convert extract list(v2) to dict(v3)
Args:
extractors: [{"varA": "content.varA"}, {"varB": "json.varB"}]
Returns:
{"varA": "body.varA", "varB": "body.varB"}
"""
v3_extractors: Dict = {}
if isinstance(extractors, List):
# [{"varA": "content.varA"}, {"varB": "json.varB"}]
for extractor in extractors:
if not isinstance(extractor, Dict):
logger.error(f"Invalid extractor: {extractors}")
sys.exit(1)
for k, v in extractor.items():
v3_extractors[k] = v
elif isinstance(extractors, Dict):
# {"varA": "body.varA", "varB": "body.varB"}
v3_extractors = extractors
else:
logger.error(f"Invalid extractor: {extractors}")
sys.exit(1)
for k, v in v3_extractors.items():
v3_extractors[k] = _convert_jmespath(v)
return v3_extractors
def _convert_validators(validators: List) -> List:
for v in validators:
if "check" in v and "expect" in v:
# format1: {"check": "content.abc", "assert": "eq", "expect": 201}
v["check"] = _convert_jmespath(v["check"])
elif len(v) == 1:
# format2: {'eq': ['status_code', 201]}
comparator = list(v.keys())[0]
v[comparator][0] = _convert_jmespath(v[comparator][0])
return validators
def _sort_request_by_custom_order(request: Dict) -> Dict:
custom_order = [
"method",
"url",
"params",
"headers",
"cookies",
"data",
"json",
"files",
"timeout",
"allow_redirects",
"proxies",
"verify",
"stream",
"auth",
"cert",
]
return sort_dict_by_custom_order(request, custom_order)
def _sort_step_by_custom_order(step: Dict) -> Dict:
custom_order = [
"name",
"variables",
"request",
"testcase",
"setup_hooks",
"teardown_hooks",
"extract",
"validate",
"validate_script",
]
return sort_dict_by_custom_order(step, custom_order)
def _ensure_step_attachment(step: Dict) -> Dict:
test_dict = {
"name": step["name"],
}
if "variables" in step:
test_dict["variables"] = step["variables"]
if "setup_hooks" in step:
test_dict["setup_hooks"] = step["setup_hooks"]
if "teardown_hooks" in step:
test_dict["teardown_hooks"] = step["teardown_hooks"]
if "extract" in step:
test_dict["extract"] = _convert_extractors(step["extract"])
if "export" in step:
test_dict["export"] = step["export"]
if "validate" in step:
if not isinstance(step["validate"], List):
raise exceptions.TestCaseFormatError(
f'Invalid teststep validate: {step["validate"]}'
)
test_dict["validate"] = _convert_validators(step["validate"])
if "validate_script" in step:
test_dict["validate_script"] = step["validate_script"]
return test_dict
def ensure_testcase_v3_api(api_content: Dict) -> Dict:
logger.info("convert api in v2 to testcase format v3")
teststep = {
"request": _sort_request_by_custom_order(api_content["request"]),
}
teststep.update(_ensure_step_attachment(api_content))
teststep = _sort_step_by_custom_order(teststep)
config = {"name": api_content["name"]}
extract_variable_names: List = list(teststep.get("extract", {}).keys())
if extract_variable_names:
config["export"] = extract_variable_names
return {
"config": config,
"teststeps": [teststep],
}
def ensure_testcase_v3(test_content: Dict) -> Dict:
logger.info("ensure compatibility with testcase format v2")
v3_content = {"config": test_content["config"], "teststeps": []}
if "teststeps" not in test_content:
logger.error(f"Miss teststeps: {test_content}")
sys.exit(1)
if not isinstance(test_content["teststeps"], list):
logger.error(
f'teststeps should be list type, got {type(test_content["teststeps"])}: {test_content["teststeps"]}'
)
sys.exit(1)
for step in test_content["teststeps"]:
teststep = {}
if "request" in step:
teststep["request"] = _sort_request_by_custom_order(step.pop("request"))
elif "api" in step:
teststep["testcase"] = step.pop("api")
elif "testcase" in step:
teststep["testcase"] = step.pop("testcase")
else:
raise exceptions.TestCaseFormatError(f"Invalid teststep: {step}")
teststep.update(_ensure_step_attachment(step))
teststep = _sort_step_by_custom_order(teststep)
v3_content["teststeps"].append(teststep)
return v3_content
def ensure_cli_args(args: List) -> List:
""" ensure compatibility with deprecated cli args in v2
"""
# remove deprecated --failfast
if "--failfast" in args:
logger.warning(f"remove deprecated argument: --failfast")
args.pop(args.index("--failfast"))
# convert --report-file to --html
if "--report-file" in args:
logger.warning(f"replace deprecated argument --report-file with --html")
index = args.index("--report-file")
args[index] = "--html"
args.append("--self-contained-html")
# keep compatibility with --save-tests in v2
if "--save-tests" in args:
logger.warning(
f"generate conftest.py keep compatibility with --save-tests in v2"
)
args.pop(args.index("--save-tests"))
_generate_conftest_for_summary(args)
return args
def _generate_conftest_for_summary(args: List):
for arg in args:
if os.path.exists(arg):
test_path = arg
# FIXME: several test paths maybe specified
break
else:
logger.error(f"No valid test path specified! \nargs: {args}")
sys.exit(1)
conftest_content = '''# NOTICE: Generated By HttpRunner.
import json
import os
import time
import pytest
from loguru import logger
from httprunner.utils import get_platform, ExtendJSONEncoder
@pytest.fixture(scope="session", autouse=True)
def session_fixture(request):
"""setup and teardown each task"""
logger.info(f"start running testcases ...")
start_at = time.time()
yield
logger.info(f"task finished, generate task summary for --save-tests")
summary = {
"success": True,
"stat": {
"testcases": {"total": 0, "success": 0, "fail": 0},
"teststeps": {"total": 0, "failures": 0, "successes": 0},
},
"time": {"start_at": start_at, "duration": time.time() - start_at},
"platform": get_platform(),
"details": [],
}
for item in request.node.items:
testcase_summary = item.instance.get_summary()
summary["success"] &= testcase_summary.success
summary["stat"]["testcases"]["total"] += 1
summary["stat"]["teststeps"]["total"] += len(testcase_summary.step_datas)
if testcase_summary.success:
summary["stat"]["testcases"]["success"] += 1
summary["stat"]["teststeps"]["successes"] += len(
testcase_summary.step_datas
)
else:
summary["stat"]["testcases"]["fail"] += 1
summary["stat"]["teststeps"]["successes"] += (
len(testcase_summary.step_datas) - 1
)
summary["stat"]["teststeps"]["failures"] += 1
testcase_summary_json = testcase_summary.dict()
testcase_summary_json["records"] = testcase_summary_json.pop("step_datas")
summary["details"].append(testcase_summary_json)
summary_path = r"{{SUMMARY_PATH_PLACEHOLDER}}"
summary_dir = os.path.dirname(summary_path)
os.makedirs(summary_dir, exist_ok=True)
with open(summary_path, "w", encoding="utf-8") as f:
json.dump(summary, f, indent=4, ensure_ascii=False, cls=ExtendJSONEncoder)
logger.info(f"generated task summary: {summary_path}")
'''
project_meta = load_project_meta(test_path)
project_root_dir = project_meta.RootDir
conftest_path = os.path.join(project_root_dir, "conftest.py")
test_path = os.path.abspath(test_path)
logs_dir_path = os.path.join(project_root_dir, "logs")
test_path_relative_path = convert_relative_project_root_dir(test_path)
if os.path.isdir(test_path):
file_foder_path = os.path.join(logs_dir_path, test_path_relative_path)
dump_file_name = "all.summary.json"
else:
file_relative_folder_path, test_file = os.path.split(test_path_relative_path)
file_foder_path = os.path.join(logs_dir_path, file_relative_folder_path)
test_file_name, _ = os.path.splitext(test_file)
dump_file_name = f"{test_file_name}.summary.json"
summary_path = os.path.join(file_foder_path, dump_file_name)
conftest_content = conftest_content.replace(
"{{SUMMARY_PATH_PLACEHOLDER}}", summary_path
)
dir_path = os.path.dirname(conftest_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
with open(conftest_path, "w", encoding="utf-8") as f:
f.write(conftest_content)
logger.info("generated conftest.py to generate summary.json")
def ensure_path_sep(path: Text) -> Text:
""" ensure compatibility with different path separators of Linux and Windows
"""
if "/" in path:
path = os.sep.join(path.split("/"))
if "\\" in path:
path = os.sep.join(path.split("\\"))
return path
| 30.315789 | 112 | 0.61789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,024 | 0.414385 |
0a498f8f754b453bd4fdad3c6f6282e67b1ff4ac | 1,551 | py | Python | examples/CountLettersInList.py | Ellis0817/Introduction-to-Programming-Using-Python | 1882a2a846162d5ff56d4d56c3940b638ef408bd | [
"MIT"
] | null | null | null | examples/CountLettersInList.py | Ellis0817/Introduction-to-Programming-Using-Python | 1882a2a846162d5ff56d4d56c3940b638ef408bd | [
"MIT"
] | 4 | 2019-11-07T12:32:19.000Z | 2020-07-19T14:04:44.000Z | examples/CountLettersInList.py | Ellis0817/Introduction-to-Programming-Using-Python | 1882a2a846162d5ff56d4d56c3940b638ef408bd | [
"MIT"
] | 5 | 2019-12-04T15:56:55.000Z | 2022-01-14T06:19:18.000Z | import RandomCharacter # Defined in Listing 6.9
def main():
"""Main."""
# Create a list of characters
chars = createList()
# Display the list
print("The lowercase letters are:")
displayList(chars)
# Count the occurrences of each letter
counts = countLetters(chars)
# Display counts
print("The occurrences of each letter are:")
displayCounts(counts)
def createList():
"""Create a list of characters."""
# Create an empty list
chars = []
# Create lowercase letters randomly and add them to the list
for i in range(100):
chars.append(RandomCharacter.getRandomLowerCaseLetter())
# Return the list
return chars
def displayList(chars):
"""Display the list of characters."""
# Display the characters in the list 20 on each line
for i in range(len(chars)):
if (i + 1) % 20 == 0:
print(chars[i])
else:
print(chars[i], end=' ')
def countLetters(chars):
"""Count the occurrences of each letter."""
# Create a list of 26 integers with initial value 0
counts = 26 * [0]
# For each lowercase letter in the list, count it
for i in range(len(chars)):
counts[ord(chars[i]) - ord('a')] += 1
return counts
def displayCounts(counts):
"""Display counts."""
for i in range(len(counts)):
if (i + 1) % 10 == 0:
print(counts[i], chr(i + ord('a')))
else:
print(counts[i], chr(i + ord('a')), end=' ')
print()
main() # Call the main function
| 23.149254 | 64 | 0.597679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 626 | 0.403611 |
0a4ab6a6c7a8f22ae4262d99f43041e035e6b535 | 602 | py | Python | project/settings/production.py | chiehtu/kissaten | a7aad01de569107d5fd5ed2cd781bca6e5750871 | [
"MIT"
] | null | null | null | project/settings/production.py | chiehtu/kissaten | a7aad01de569107d5fd5ed2cd781bca6e5750871 | [
"MIT"
] | null | null | null | project/settings/production.py | chiehtu/kissaten | a7aad01de569107d5fd5ed2cd781bca6e5750871 | [
"MIT"
] | null | null | null | from .base import *
SECRET_KEY = get_env_var('SECRET_KEY')
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = get_env_var('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = get_env_var('EMAIL_HOST_PASSWORD')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = ''
USERENA_USE_HTTPS = True
| 18.8125 | 61 | 0.750831 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 243 | 0.403654 |
0a5e25995315baeb1a8d9bd6a0b259803f947416 | 1,768 | py | Python | examples/pylab_examples/image_masked.py | pierre-haessig/matplotlib | 0d945044ca3fbf98cad55912584ef80911f330c6 | [
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 16 | 2016-06-14T19:45:35.000Z | 2020-11-30T19:02:58.000Z | examples/pylab_examples/image_masked.py | pierre-haessig/matplotlib | 0d945044ca3fbf98cad55912584ef80911f330c6 | [
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 7 | 2015-05-08T19:36:25.000Z | 2015-06-30T15:32:17.000Z | examples/pylab_examples/image_masked.py | pierre-haessig/matplotlib | 0d945044ca3fbf98cad55912584ef80911f330c6 | [
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 6 | 2015-06-05T03:34:06.000Z | 2022-01-25T09:07:10.000Z | #!/usr/bin/env python
'''imshow with masked array input and out-of-range colors.
The second subplot illustrates the use of BoundaryNorm to
get a filled contour effect.
'''
from pylab import *
from numpy import ma
import matplotlib.colors as colors
delta = 0.025
x = y = arange(-3.0, 3.0, delta)
X, Y = meshgrid(x, y)
Z1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
Z = 10 * (Z2-Z1) # difference of Gaussians
# Set up a colormap:
palette = cm.gray
palette.set_over('r', 1.0)
palette.set_under('g', 1.0)
palette.set_bad('b', 1.0)
# Alternatively, we could use
# palette.set_bad(alpha = 0.0)
# to make the bad region transparent. This is the default.
# If you comment out all the palette.set* lines, you will see
# all the defaults; under and over will be colored with the
# first and last colors in the palette, respectively.
Zm = ma.masked_where(Z > 1.2, Z)
# By setting vmin and vmax in the norm, we establish the
# range to which the regular palette color scale is applied.
# Anything above that range is colored based on palette.set_over, etc.
subplot(1,2,1)
im = imshow(Zm, interpolation='bilinear',
cmap=palette,
norm = colors.Normalize(vmin = -1.0, vmax = 1.0, clip = False),
origin='lower', extent=[-3,3,-3,3])
title('Green=low, Red=high, Blue=bad')
colorbar(im, extend='both', orientation='horizontal', shrink=0.8)
subplot(1,2,2)
im = imshow(Zm, interpolation='nearest',
cmap=palette,
norm = colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1],
ncolors=256, clip = False),
origin='lower', extent=[-3,3,-3,3])
title('With BoundaryNorm')
colorbar(im, extend='both', spacing='proportional',
orientation='horizontal', shrink=0.8)
show()
| 31.571429 | 70 | 0.673643 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 843 | 0.47681 |
0a63b2be4d7b2116c7bb45a2e0a6f93a06e01c5e | 959 | py | Python | other/minimum_edit_distance.py | newvicklee/nlp_algorithms | d2812398d96d345dcb50970bae6ebbf666ea5380 | [
"MIT"
] | null | null | null | other/minimum_edit_distance.py | newvicklee/nlp_algorithms | d2812398d96d345dcb50970bae6ebbf666ea5380 | [
"MIT"
] | null | null | null | other/minimum_edit_distance.py | newvicklee/nlp_algorithms | d2812398d96d345dcb50970bae6ebbf666ea5380 | [
"MIT"
] | null | null | null | """
Minimum edit distance computes the cost it takes to get from one string to another string.
This implementation uses the Levenshtein distance with a cost of 1 for insertions or deletions and a cost of 2 for substitutions.
Resource: https://en.wikipedia.org/wiki/Edit_distance
For example, getting from "intention" to "execution" is a cost of 8.
minimum_edit_distance("intention", "execution")
# 8
"""
def minimum_edit_distance(source, target):
n = len(source)
m = len(target)
D = {}
# Initialization
for i in range(0, n+1):
D[i,0] = i
for j in range(0, m+1):
D[0,j] = j
for i in range(1, n+1):
for j in range(1, m+1):
if source[i-1] == target[j-1]:
D[i,j] = D[i-1, j-1]
else:
D[i,j] = min(
D[i-1, j] + 1,
D[i, j-1] + 1,
D[i-1, j-1] + 2
)
return D[n-1, m-1]
| 28.205882 | 129 | 0.535975 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 423 | 0.441084 |
0a6d2f3733dce67a2fafd219a662c5c458e102f9 | 1,774 | py | Python | XORCipher/XOREncrypt.py | KarthikGandrala/DataEncryption | 6ed4dffead345bc9f7010ac2ea9afbff958c85af | [
"MIT"
] | 1 | 2021-07-12T06:05:45.000Z | 2021-07-12T06:05:45.000Z | XORCipher/XOREncrypt.py | KarthikGandrala/Encrypt-Your-Data | 6ed4dffead345bc9f7010ac2ea9afbff958c85af | [
"MIT"
] | null | null | null | XORCipher/XOREncrypt.py | KarthikGandrala/Encrypt-Your-Data | 6ed4dffead345bc9f7010ac2ea9afbff958c85af | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Function to encrypt message using key is defined
def encrypt(msg, key):
# Defining empty strings and counters
hexadecimal = ''
iteration = 0
# Running for loop in the range of MSG and comparing the BITS
for i in range(len(msg)):
temp = ord(msg[i]) ^ ord(key[iteration])
# zfill will pad a single letter hex with 0, to make it two letter pair
hexadecimal += hex(temp)[2:].zfill(2)
# Checking if the iterations of the key are 1
iteration += 1
if iteration >= len(key):
# once all of the key's letters are used, repeat the key
iteration = 0
# Returning the final value
return hexadecimal
def decrypt(msg, key):
# Defining hex to uni string to store
hex_to_uni = ''
# Running for loop to the length of message
for i in range(0, len(msg), 2):
# Decoding each individual bytes from hex
hex_to_uni += bytes.fromhex(msg[i:i + 2]).decode('utf-8')
decryp_text = ''
iteration = 0
# For loop running for the length of the hex to unicode string
for i in range(len(hex_to_uni)):
# Comparing each individual bit
temp = ord(hex_to_uni[i]) ^ ord(key[iteration])
# zfill will pad a single letter hex with 0, to make it two letter pair
decryp_text += chr(temp)
iteration += 1
if iteration >= len(key):
# once all of the key's letters are used, repeat the key
iteration = 0
# FInally return the decrypted text string
return decryp_text
| 23.653333 | 79 | 0.558061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 783 | 0.441375 |
6a5f51cf2ae3a67fb99172b7bd4214f43d0d42bc | 269 | py | Python | python/ordenacao.py | valdirsjr/learning.data | a4b72dfd27f55f2f04120644b73232bf343f71e3 | [
"MIT"
] | null | null | null | python/ordenacao.py | valdirsjr/learning.data | a4b72dfd27f55f2f04120644b73232bf343f71e3 | [
"MIT"
] | null | null | null | python/ordenacao.py | valdirsjr/learning.data | a4b72dfd27f55f2f04120644b73232bf343f71e3 | [
"MIT"
] | null | null | null | numero1 = int(input("Digite o primeiro número: "))
numero2 = int(input("Digite o segundo número: "))
numero3 = int(input("Digite o terceiro número: "))
if (numero1 < numero2 and numero2 < numero3):
print("crescente")
else:
print("não está em ordem crescente") | 38.428571 | 50 | 0.69145 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.467153 |
6a61f1e1f810996e1c76609bf6e7fcc907c4da57 | 2,020 | py | Python | lang/py/aingle/test/gen_interop_data.py | AIngleLab/aae | 6e95f89fad60e62bb5305afe97c72f3278d8e04b | [
"Apache-2.0"
] | null | null | null | lang/py/aingle/test/gen_interop_data.py | AIngleLab/aae | 6e95f89fad60e62bb5305afe97c72f3278d8e04b | [
"Apache-2.0"
] | null | null | null | lang/py/aingle/test/gen_interop_data.py | AIngleLab/aae | 6e95f89fad60e62bb5305afe97c72f3278d8e04b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import aingle.codecs
import aingle.datafile
import aingle.io
import aingle.schema
NULL_CODEC = "null"
CODECS_TO_VALIDATE = aingle.codecs.KNOWN_CODECS.keys()
DATUM = {
"intField": 12,
"longField": 15234324,
"stringField": "hey",
"boolField": True,
"floatField": 1234.0,
"doubleField": -1234.0,
"bytesField": b"12312adf",
"nullField": None,
"arrayField": [5.0, 0.0, 12.0],
"mapField": {"a": {"label": "a"}, "bee": {"label": "cee"}},
"unionField": 12.0,
"enumField": "C",
"fixedField": b"1019181716151413",
"recordField": {"label": "blah", "children": [{"label": "inner", "children": []}]},
}
def generate(schema_path, output_path):
with open(schema_path) as schema_file:
interop_schema = aingle.schema.parse(schema_file.read())
for codec in CODECS_TO_VALIDATE:
filename = output_path
if codec != NULL_CODEC:
base, ext = os.path.splitext(output_path)
filename = base + "_" + codec + ext
with aingle.datafile.DataFileWriter(open(filename, "wb"), aingle.io.DatumWriter(), interop_schema, codec=codec) as dfw:
dfw.append(DATUM)
if __name__ == "__main__":
generate(sys.argv[1], sys.argv[2])
| 31.5625 | 127 | 0.681188 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,053 | 0.521287 |
6a6b124cb7b2cd1d6d09ae5b84d5b49e63612508 | 679 | py | Python | test_f_login_andy.py | KotoLLC/peacenik-tests | 760f7799ab2b9312fe0cce373890195151c48fce | [
"Apache-2.0"
] | null | null | null | test_f_login_andy.py | KotoLLC/peacenik-tests | 760f7799ab2b9312fe0cce373890195151c48fce | [
"Apache-2.0"
] | null | null | null | test_f_login_andy.py | KotoLLC/peacenik-tests | 760f7799ab2b9312fe0cce373890195151c48fce | [
"Apache-2.0"
] | null | null | null | from helpers import *
def test_f_login_andy():
url = "http://central.orbits.local/rpc.AuthService/Login"
raw_payload = {"name": "andy","password": "12345"}
payload = json.dumps(raw_payload)
headers = {'Content-Type': 'application/json'}
# convert dict to json by json.dumps() for body data.
response = requests.request("POST", url, headers=headers, data=payload)
save_cookies(response.cookies,"cookies.txt")
# Validate response headers and body contents, e.g. status code.
assert response.status_code == 200
# print full request and response
pretty_print_request(response.request)
pretty_print_response(response) | 35.736842 | 75 | 0.696613 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 282 | 0.415317 |
6a6b9fd92e89d1958b00048f55376ec87fde6db2 | 7,696 | py | Python | docker/src/clawpack-5.3.1/riemann/src/shallow_1D_py.py | ian-r-rose/visualization | ed6d9fab95eb125e7340ab3fad3ed114ed3214af | [
"CC-BY-4.0"
] | 11 | 2017-01-04T18:19:48.000Z | 2021-02-21T01:46:33.000Z | docker/src/clawpack-5.3.1/riemann/src/shallow_1D_py.py | ian-r-rose/visualization | ed6d9fab95eb125e7340ab3fad3ed114ed3214af | [
"CC-BY-4.0"
] | 8 | 2016-09-22T20:49:51.000Z | 2019-09-06T23:28:13.000Z | docker/src/clawpack-5.3.1/riemann/src/shallow_1D_py.py | ian-r-rose/visualization | ed6d9fab95eb125e7340ab3fad3ed114ed3214af | [
"CC-BY-4.0"
] | 13 | 2016-09-22T20:20:06.000Z | 2020-07-13T14:48:32.000Z | #!/usr/bin/env python
# encoding: utf-8
r"""
Riemann solvers for the shallow water equations.
The available solvers are:
* Roe - Use Roe averages to caluclate the solution to the Riemann problem
* HLL - Use a HLL solver
* Exact - Use a newton iteration to calculate the exact solution to the
Riemann problem
.. math::
q_t + f(q)_x = 0
where
.. math::
q(x,t) = \left [ \begin{array}{c} h \\ h u \end{array} \right ],
the flux function is
.. math::
f(q) = \left [ \begin{array}{c} h u \\ hu^2 + 1/2 g h^2 \end{array}\right ].
and :math:`h` is the water column height, :math:`u` the velocity and :math:`g`
is the gravitational acceleration.
:Authors:
Kyle T. Mandli (2009-02-05): Initial version
"""
# ============================================================================
# Copyright (C) 2009 Kyle T. Mandli <mandli@amath.washington.edu>
#
# Distributed under the terms of the Berkeley Software Distribution (BSD)
# license
# http://www.opensource.org/licenses/
# ============================================================================
import numpy as np
num_eqn = 2
num_waves = 2
def shallow_roe_1D(q_l,q_r,aux_l,aux_r,problem_data):
r"""
Roe shallow water solver in 1d::
ubar = (sqrt(u_l) + sqrt(u_r)) / (sqrt(h_l) + sqrt(h_r))
cbar = sqrt( 0.5 * g * (h_l + h_r))
W_1 = | 1 | s_1 = ubar - cbar
| ubar - cbar |
W_2 = | 1 | s_1 = ubar + cbar
| ubar + cbar |
a1 = 0.5 * ( - delta_hu + (ubar + cbar) * delta_h ) / cbar
a2 = 0.5 * ( delta_hu - (ubar - cbar) * delta_h ) / cbar
*problem_data* should contain:
- *g* - (float) Gravitational constant
- *efix* - (bool) Boolean as to whether a entropy fix should be used, if
not present, false is assumed
:Version: 1.0 (2009-02-05)
"""
# Array shapes
num_rp = q_l.shape[1]
# Output arrays
wave = np.empty( (num_eqn, num_waves, num_rp) )
s = np.zeros( (num_waves, num_rp) )
amdq = np.zeros( (num_eqn, num_rp) )
apdq = np.zeros( (num_eqn, num_rp) )
# Compute roe-averaged quantities
ubar = ( (q_l[1,:]/np.sqrt(q_l[0,:]) + q_r[1,:]/np.sqrt(q_r[0,:])) /
(np.sqrt(q_l[0,:]) + np.sqrt(q_r[0,:])) )
cbar = np.sqrt(0.5 * problem_data['grav'] * (q_l[0,:] + q_r[0,:]))
# Compute Flux structure
delta = q_r - q_l
a1 = 0.5 * (-delta[1,:] + (ubar + cbar) * delta[0,:]) / cbar
a2 = 0.5 * ( delta[1,:] - (ubar - cbar) * delta[0,:]) / cbar
# Compute each family of waves
wave[0,0,:] = a1
wave[1,0,:] = a1 * (ubar - cbar)
s[0,:] = ubar - cbar
wave[0,1,:] = a2
wave[1,1,:] = a2 * (ubar + cbar)
s[1,:] = ubar + cbar
if problem_data['efix']:
raise NotImplementedError("Entropy fix has not been implemented.")
else:
s_index = np.zeros((2,num_rp))
for m in xrange(num_eqn):
for mw in xrange(num_waves):
s_index[0,:] = s[mw,:]
amdq[m,:] += np.min(s_index,axis=0) * wave[m,mw,:]
apdq[m,:] += np.max(s_index,axis=0) * wave[m,mw,:]
return wave, s, amdq, apdq
def shallow_hll_1D(q_l,q_r,aux_l,aux_r,problem_data):
r"""
HLL shallow water solver ::
W_1 = Q_hat - Q_l s_1 = min(u_l-c_l,u_l+c_l,lambda_roe_1,lambda_roe_2)
W_2 = Q_r - Q_hat s_2 = max(u_r-c_r,u_r+c_r,lambda_roe_1,lambda_roe_2)
Q_hat = ( f(q_r) - f(q_l) - s_2 * q_r + s_1 * q_l ) / (s_1 - s_2)
*problem_data* should contain:
- *g* - (float) Gravitational constant
:Version: 1.0 (2009-02-05)
"""
# Array shapes
num_rp = q_l.shape[1]
num_eqn = 2
num_waves = 2
# Output arrays
wave = np.empty( (num_eqn, num_waves, num_rp) )
s = np.empty( (num_waves, num_rp) )
amdq = np.zeros( (num_eqn, num_rp) )
apdq = np.zeros( (num_eqn, num_rp) )
# Compute Roe and right and left speeds
ubar = ( (q_l[1,:]/np.sqrt(q_l[0,:]) + q_r[1,:]/np.sqrt(q_r[0,:])) /
(np.sqrt(q_l[0,:]) + np.sqrt(q_r[0,:])) )
cbar = np.sqrt(0.5 * problem_data['grav'] * (q_l[0,:] + q_r[0,:]))
u_r = q_r[1,:] / q_r[0,:]
c_r = np.sqrt(problem_data['grav'] * q_r[0,:])
u_l = q_l[1,:] / q_l[0,:]
c_l = np.sqrt(problem_data['grav'] * q_l[0,:])
# Compute Einfeldt speeds
s_index = np.empty((4,num_rp))
s_index[0,:] = ubar+cbar
s_index[1,:] = ubar-cbar
s_index[2,:] = u_l + c_l
s_index[3,:] = u_l - c_l
s[0,:] = np.min(s_index,axis=0)
s_index[2,:] = u_r + c_r
s_index[3,:] = u_r - c_r
s[1,:] = np.max(s_index,axis=0)
# Compute middle state
q_hat = np.empty((2,num_rp))
q_hat[0,:] = ((q_r[1,:] - q_l[1,:] - s[1,:] * q_r[0,:]
+ s[0,:] * q_l[0,:]) / (s[0,:] - s[1,:]))
q_hat[1,:] = ((q_r[1,:]**2/q_r[0,:] + 0.5 * problem_data['grav'] * q_r[0,:]**2
- (q_l[1,:]**2/q_l[0,:] + 0.5 * problem_data['grav'] * q_l[0,:]**2)
- s[1,:] * q_r[1,:] + s[0,:] * q_l[1,:]) / (s[0,:] - s[1,:]))
# Compute each family of waves
wave[:,0,:] = q_hat - q_l
wave[:,1,:] = q_r - q_hat
# Compute variations
s_index = np.zeros((2,num_rp))
for m in xrange(num_eqn):
for mw in xrange(num_waves):
s_index[0,:] = s[mw,:]
amdq[m,:] += np.min(s_index,axis=0) * wave[m,mw,:]
apdq[m,:] += np.max(s_index,axis=0) * wave[m,mw,:]
return wave, s, amdq, apdq
def shallow_fwave_1d(q_l, q_r, aux_l, aux_r, problem_data):
r"""Shallow water Riemann solver using fwaves
Also includes support for bathymetry but be wary if you think you might have
dry states as this has not been tested.
*problem_data* should contain:
- *grav* - (float) Gravitational constant
- *sea_level* - (float) Datum from which the dry-state is calculated.
:Version: 1.0 (2014-09-05)
"""
g = problem_data['grav']
num_rp = q_l.shape[1]
num_eqn = 2
num_waves = 2
# Output arrays
fwave = np.empty( (num_eqn, num_waves, num_rp) )
s = np.empty( (num_waves, num_rp) )
amdq = np.zeros( (num_eqn, num_rp) )
apdq = np.zeros( (num_eqn, num_rp) )
# Extract state
u_l = np.where(q_l[0,:] - problem_data['sea_level'] > 1e-3,
q_l[1,:] / q_l[0,:], 0.0)
u_r = np.where(q_r[0,:] - problem_data['sea_level'] > 1e-3,
q_r[1,:] / q_r[0,:], 0.0)
phi_l = q_l[0,:] * u_l**2 + 0.5 * g * q_l[0,:]**2
phi_r = q_r[0,:] * u_r**2 + 0.5 * g * q_r[0,:]**2
# Speeds
s[0,:] = u_l - np.sqrt(g * q_l[0,:])
s[1,:] = u_r + np.sqrt(g * q_r[0,:])
delta1 = q_r[1,:] - q_l[1,:]
delta2 = phi_r - phi_l + g * 0.5 * (q_r[0,:] + q_l[0,:]) * (aux_r[0,:] - aux_l[0,:])
beta1 = (s[1,:] * delta1 - delta2) / (s[1,:] - s[0,:])
beta2 = (delta2 - s[0,:] * delta1) / (s[1,:] - s[0,:])
fwave[0,0,:] = beta1
fwave[1,0,:] = beta1 * s[0,:]
fwave[0,1,:] = beta2
fwave[1,1,:] = beta2 * s[1,:]
for m in xrange(num_eqn):
for mw in xrange(num_waves):
amdq[m,:] += (s[mw,:] < 0.0) * fwave[m,mw,:]
apdq[m,:] += (s[mw,:] >= 0.0) * fwave[m,mw,:]
return fwave, s, amdq, apdq
def shallow_exact_1D(q_l,q_r,aux_l,aux_r,problem_data):
r"""
Exact shallow water Riemann solver
.. warning::
This solver has not been implemented.
"""
raise NotImplementedError("The exact swe solver has not been implemented.")
| 31.801653 | 88 | 0.511954 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,263 | 0.423986 |
6a6dcc4d9c3e1b2437b6c8b26173ce12b1dfa929 | 7,761 | py | Python | week2/Assignment2Answer.py | RayshineRen/Introduction_to_Data_Science_in_Python | b19aa781a8f8d0e25853c4e86dadd4c9bebbcd71 | [
"MIT"
] | 1 | 2020-09-22T15:06:02.000Z | 2020-09-22T15:06:02.000Z | week2/Assignment2Answer.py | RayshineRen/Introduction_to_Data_Science_in_Python | b19aa781a8f8d0e25853c4e86dadd4c9bebbcd71 | [
"MIT"
] | 1 | 2020-11-03T14:11:02.000Z | 2020-11-03T14:24:50.000Z | week2/Assignment2Answer.py | RayshineRen/Introduction_to_Data_Science_in_Python | b19aa781a8f8d0e25853c4e86dadd4c9bebbcd71 | [
"MIT"
] | 2 | 2020-09-22T05:27:09.000Z | 2020-11-05T10:39:49.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 18 21:56:15 2020
@author: Ray
@email: 1324789704@qq.com
@wechat: RayTing0305
"""
'''
Question 1
Write a function called proportion_of_education which returns the proportion of children in the dataset who had a mother with the education levels equal to less than high school (<12), high school (12), more than high school but not a college graduate (>12) and college degree.
This function should return a dictionary in the form of (use the correct numbers, do not round numbers):
{"less than high school":0.2,
"high school":0.4,
"more than high school but not college":0.2,
"college":0.2}
'''
import scipy.stats as stats
import numpy as np
import pandas as pd
df = pd.read_csv("./assets/NISPUF17.csv")
def proportion_of_education():
# your code goes here
# YOUR CODE HERE
df_edu = df.EDUC1
edu_list = [1, 2, 3, 4]
zero_df = pd.DataFrame(np.zeros((df_edu.shape[0], len(edu_list))), columns=edu_list)
for edu in edu_list:
zero_df[edu][df_edu==edu]=1
#zero_df
sum_ret = zero_df.sum(axis=0)
name_l = ["less than high school", "high school", "more than high school but not college", "college"]
rat = sum_ret.values/sum(sum_ret.values)
dic = dict()
for i in range(4):
dic[name_l[i]] = rat[i]
return dic
raise NotImplementedError()
assert type(proportion_of_education())==type({}), "You must return a dictionary."
assert len(proportion_of_education()) == 4, "You have not returned a dictionary with four items in it."
assert "less than high school" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "high school" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "more than high school but not college" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "college" in proportion_of_education().keys(), "You have not returned a dictionary with the correct"
'''
Question 2
Let's explore the relationship between being fed breastmilk as a child and getting a seasonal influenza vaccine from a healthcare provider. Return a tuple of the average number of influenza vaccines for those children we know received breastmilk as a child and those who know did not.
This function should return a tuple in the form (use the correct numbers:
(2.5, 0.1)
'''
def average_influenza_doses():
# YOUR CODE HERE
#是否喂养母乳
fed_breastmilk = list(df.groupby(by='CBF_01'))
be_fed_breastmilk = fed_breastmilk[0][1]
not_fed_breastmilk = fed_breastmilk[1][1]
#喂养母乳的influenza数目
be_fed_breastmilk_influenza = be_fed_breastmilk.P_NUMFLU
num_be_fed_breastmilk_influenza = be_fed_breastmilk_influenza.dropna().mean()
#未喂养母乳的influenza数目
not_be_fed_breastmilk_influenza = not_fed_breastmilk.P_NUMFLU
num_not_be_fed_breastmilk_influenza = not_be_fed_breastmilk_influenza.dropna().mean()
return num_be_fed_breastmilk_influenza, num_not_be_fed_breastmilk_influenza
raise NotImplementedError()
assert len(average_influenza_doses())==2, "Return two values in a tuple, the first for yes and the second for no."
'''
Question 3
It would be interesting to see if there is any evidence of a link between vaccine effectiveness and sex of the child. Calculate the ratio of the number of children who contracted chickenpox but were vaccinated against it (at least one varicella dose) versus those who were vaccinated but did not contract chicken pox. Return results by sex.
This function should return a dictionary in the form of (use the correct numbers):
{"male":0.2,
"female":0.4}
Note: To aid in verification, the chickenpox_by_sex()['female'] value the autograder is looking for starts with the digits 0.0077.
'''
def chickenpox_by_sex():
# YOUR CODE HERE
#是否感染Varicella
cpox = df.HAD_CPOX
#cpox.value_counts()
cpox_group = list(df.groupby(by='HAD_CPOX'))
have_cpox = cpox_group[0][1]
not_have_cpox = cpox_group[1][1]
#男女分开
have_cpox_group = list(have_cpox.groupby(by='SEX'))
not_have_cpox_group = list(not_have_cpox.groupby(by='SEX'))
have_cpox_boy = have_cpox_group[0][1]
have_cpox_girl = have_cpox_group[1][1]
not_have_cpox_boy = not_have_cpox_group[0][1]
not_have_cpox_girl = not_have_cpox_group[1][1]
#接种感染
#have_cpox_boy_injected = have_cpox_boy[(have_cpox_boy['P_NUMMMR']>0) | (have_cpox_boy['P_NUMVRC']>0)]
have_cpox_boy_injected = have_cpox_boy[(have_cpox_boy['P_NUMVRC']>0)]
num_have_cpox_boy_injected = have_cpox_boy_injected.count()['SEQNUMC']
have_cpox_girl_injected = have_cpox_girl[(have_cpox_girl['P_NUMVRC']>0)]
num_have_cpox_girl_injected = have_cpox_girl_injected.count()['SEQNUMC']
#接种未感染
not_have_cpox_boy_injected = not_have_cpox_boy[(not_have_cpox_boy['P_NUMVRC']>0)]
num_not_have_cpox_boy_injected = not_have_cpox_boy_injected.count()['SEQNUMC']
not_have_cpox_girl_injected = not_have_cpox_girl[(not_have_cpox_girl['P_NUMVRC']>0)]
num_not_have_cpox_girl_injected = not_have_cpox_girl_injected.count()['SEQNUMC']
#计算比例
ratio_boy = num_have_cpox_boy_injected / num_not_have_cpox_boy_injected
ratio_girl = num_have_cpox_girl_injected / num_not_have_cpox_girl_injected
dic = {}
dic['male'] = ratio_boy
dic['female'] = ratio_girl
return dic
raise NotImplementedError()
assert len(chickenpox_by_sex())==2, "Return a dictionary with two items, the first for males and the second for females."
'''
Question 4
A correlation is a statistical relationship between two variables. If we wanted to know if vaccines work, we might look at the correlation between the use of the vaccine and whether it results in prevention of the infection or disease [1]. In this question, you are to see if there is a correlation between having had the chicken pox and the number of chickenpox vaccine doses given (varicella).
Some notes on interpreting the answer. The had_chickenpox_column is either 1 (for yes) or 2 (for no), and the num_chickenpox_vaccine_column is the number of doses a child has been given of the varicella vaccine. A positive correlation (e.g., corr > 0) means that an increase in had_chickenpox_column (which means more no’s) would also increase the values of num_chickenpox_vaccine_column (which means more doses of vaccine). If there is a negative correlation (e.g., corr < 0), it indicates that having had chickenpox is related to an increase in the number of vaccine doses.
Also, pval is the probability that we observe a correlation between had_chickenpox_column and num_chickenpox_vaccine_column which is greater than or equal to a particular value occurred by chance. A small pval means that the observed correlation is highly unlikely to occur by chance. In this case, pval should be very small (will end in e-18 indicating a very small number).
[1] This isn’t really the full picture, since we are not looking at when the dose was given. It’s possible that children had chickenpox and then their parents went to get them the vaccine. Does this dataset have the data we would need to investigate the timing of the dose?
'''
def corr_chickenpox():
cpox = df[(df.P_NUMVRC).notnull()]
have_cpox = cpox[(cpox.HAD_CPOX==1) | (cpox.HAD_CPOX==2)]
df1=pd.DataFrame({"had_chickenpox_column":have_cpox.HAD_CPOX,
"num_chickenpox_vaccine_column":have_cpox.P_NUMVRC})
corr, pval=stats.pearsonr(df1["had_chickenpox_column"],df1["num_chickenpox_vaccine_column"])
return corr
raise NotImplementedError()
| 53.895833 | 576 | 0.74024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,615 | 0.587823 |
6a75c6bcf2a235fe76f46e51c4cc31283811626a | 2,534 | py | Python | simulation/dataset_G_1q_X_Z_N1.py | eperrier/QDataSet | 383b38b9b4166848f72fac0153800525e66b477b | [
"MIT"
] | 42 | 2021-08-17T02:27:59.000Z | 2022-03-26T16:00:57.000Z | simulation/dataset_G_1q_X_Z_N1.py | eperrier/QDataSet | 383b38b9b4166848f72fac0153800525e66b477b | [
"MIT"
] | 1 | 2021-09-25T11:15:20.000Z | 2021-09-27T04:18:25.000Z | simulation/dataset_G_1q_X_Z_N1.py | eperrier/QDataSet | 383b38b9b4166848f72fac0153800525e66b477b | [
"MIT"
] | 6 | 2021-08-17T02:28:04.000Z | 2022-03-22T07:11:48.000Z | ##############################################
"""
This module generate a dataset
"""
##############################################
# preample
import numpy as np
from utilites import Pauli_operators, simulate, CheckNoise
################################################
# meta parameters
name = "G_1q_X_Z_N1"
################################################
# quantum parameters
dim = 2 # dimension of the system
Omega = 12 # qubit energy gap
static_operators = [0.5*Pauli_operators[3]*Omega] # drift Hamiltonian
dynamic_operators = [0.5*Pauli_operators[1]] # control Hamiltonian
noise_operators = [0.5*Pauli_operators[3]] # noise Hamiltonian
initial_states = [
np.array([[0.5,0.5],[0.5,0.5]]), np.array([[0.5,-0.5],[-0.5,0.5]]),
np.array([[0.5,-0.5j],[0.5j,0.5]]),np.array([[0.5,0.5j],[-0.5j,0.5]]),
np.array([[1,0],[0,0]]), np.array([[0,0],[0,1]])
] # intial state of qubit
measurement_operators = Pauli_operators[1:] # measurement operators
##################################################
# simulation parameters
T = 1 # Evolution time
M = 1024 # Number of time steps
num_ex = 10000 # Number of examples
batch_size = 50 # batch size for TF
##################################################
# noise parameters
K = 2000 # Number of realzations
noise_profile = [1] # Noise type
###################################################
# control parameters
pulse_shape = "Gaussian" # Control pulse shape
num_pulses = 5 # Number of pulses per sequence
####################################################
# Generate the dataset
sim_parameters = dict( [(k,eval(k)) for k in ["name", "dim", "Omega", "static_operators", "dynamic_operators", "noise_operators", "measurement_operators", "initial_states", "T", "M", "num_ex", "batch_size", "K", "noise_profile", "pulse_shape", "num_pulses"] ])
CheckNoise(sim_parameters)
simulate(sim_parameters)
#################################################### | 56.311111 | 261 | 0.404893 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,192 | 0.470403 |
6a77df2fb34c60a66cb0710a264af376f888be93 | 2,112 | py | Python | advanced/itertools_funcs.py | ariannasg/python3-essential-training | 9b52645f5ccb57d2bda5d5f4a3053681a026450a | [
"MIT"
] | 1 | 2020-06-02T08:37:41.000Z | 2020-06-02T08:37:41.000Z | advanced/itertools_funcs.py | ariannasg/python3-training | 9b52645f5ccb57d2bda5d5f4a3053681a026450a | [
"MIT"
] | null | null | null | advanced/itertools_funcs.py | ariannasg/python3-training | 9b52645f5ccb57d2bda5d5f4a3053681a026450a | [
"MIT"
] | null | null | null | #!usr/bin/env python3
import itertools
# itertools is a module that's not technically a set of built-in functions but
# it is part of the standard library that comes with python.
# it's useful for for creating and using iterators.
def main():
print('some infinite iterators')
# cycle iterator can be used to cycle over a collection over and over
seq1 = ["Joe", "John", "Mike"]
cycle1 = itertools.cycle(seq1)
print(next(cycle1))
print(next(cycle1))
print(next(cycle1))
print(next(cycle1))
print(next(cycle1))
# use count to create a simple counter
count1 = itertools.count(100, 3)
print(next(count1))
print(next(count1))
print(next(count1))
print('some non-infinite iterators')
values = [10, 5, 20, 30, 40, 50, 40, 30]
# accumulate creates an iterator that accumulates/aggregates values
print(list(itertools.accumulate(values))) # this defaults to addition
print(list(itertools.accumulate(values, max)))
print(list(itertools.accumulate(values, min)))
# use chain to connect sequences together
x = itertools.chain('ABCD', '1234')
print(list(x))
# dropwhile and takewhile will return values until
# a certain condition is met that stops them. they are similar to the
# filter built-in function.
# dropwhile will drop the values from the sequence as long as the
# condition of the function is true and then returns the rest of values
print(list(itertools.dropwhile(is_less_than_forty, values)))
# takewhile will keep the values from the sequence as long as the
# condition of the function is true and then stops giving data
print(list(itertools.takewhile(is_less_than_forty, values)))
def is_less_than_forty(x):
return x < 40
if __name__ == "__main__":
main()
# CONSOLE OUTPUT:
# some infinite iterators
# Joe
# John
# Mike
# Joe
# John
# 100
# 103
# 106
# some non-infinite iterators
# [10, 15, 35, 65, 105, 155, 195, 225]
# [10, 10, 20, 30, 40, 50, 50, 50]
# [10, 5, 5, 5, 5, 5, 5, 5]
# ['A', 'B', 'C', 'D', '1', '2', '3', '4']
# [40, 50, 40, 30]
# [10, 5, 20, 30]
| 29.333333 | 78 | 0.673295 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,244 | 0.589015 |
6a7d299369e55fc318f13ff176616da2592dab8c | 526 | py | Python | Python/17 - 081 - extraindo dados de uma lista.py | matheusguerreiro/python | f39a1b92409f11cbe7fef5d9261f863f9e0fac0d | [
"MIT"
] | null | null | null | Python/17 - 081 - extraindo dados de uma lista.py | matheusguerreiro/python | f39a1b92409f11cbe7fef5d9261f863f9e0fac0d | [
"MIT"
] | null | null | null | Python/17 - 081 - extraindo dados de uma lista.py | matheusguerreiro/python | f39a1b92409f11cbe7fef5d9261f863f9e0fac0d | [
"MIT"
] | null | null | null | # Aula 17 (Listas (Parte 1))
valores = []
while True:
valor = int(input('Digite um Valor ou -1 para Finalizar: '))
if valor < 0:
print('\nFinalizando...')
break
else:
valores.append(valor)
print(f'Foram digitados {len(valores)} números')
valores.sort(reverse=True)
print(f'Lista ordenada de forma decrescente: {valores}')
if 5 in valores:
valores.reverse()
print(f'O valor 5 foi digitado e está na {valores.index(5)} posição.')
else:
print('Valor 5 não encontrado na lista.')
| 26.3 | 74 | 0.652091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 278 | 0.52354 |
6a8e7fcaf4ca3d67de4aab013987d7db788188b5 | 252 | py | Python | pyqtgraph/examples/template.py | secantsquared/pyqtgraph | 3ef7f5b91639543e43bcd66a84290fb9bc18fc5c | [
"MIT"
] | null | null | null | pyqtgraph/examples/template.py | secantsquared/pyqtgraph | 3ef7f5b91639543e43bcd66a84290fb9bc18fc5c | [
"MIT"
] | null | null | null | pyqtgraph/examples/template.py | secantsquared/pyqtgraph | 3ef7f5b91639543e43bcd66a84290fb9bc18fc5c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Description of example
"""
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui, mkQApp
import numpy as np
app = mkQApp()
# win.setWindowTitle('pyqtgraph example: ____')
if __name__ == '__main__':
pg.exec()
| 15.75 | 47 | 0.68254 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.436508 |
6a9907c6e19624e9a00da0b3cff99ba87e746680 | 3,206 | py | Python | models2.py | Lydia-Tan/MindLife | 644f1a3834f337d51c99650c3924df99c5200d06 | [
"MIT"
] | 1 | 2020-01-20T19:49:07.000Z | 2020-01-20T19:49:07.000Z | models2.py | lindaweng/Mindlife | 30be070b39728fb3fe149d4c95e5bce280a3b6a7 | [
"MIT"
] | null | null | null | models2.py | lindaweng/Mindlife | 30be070b39728fb3fe149d4c95e5bce280a3b6a7 | [
"MIT"
] | null | null | null | import nltk
import re
import sys
from sys import argv
from nltk.sentiment.vader import SentimentIntensityAnalyzer
def ajay(ans):
ajay = SentimentIntensityAnalyzer()
completeScore = 0
questionWeights = [0.05, 0.20, 0.05, 0.05, 0.05, 0.20, 0.05, 0.05, 0.20, 0.10]
print ans
ansList = ans.split("$")
for j in range(10):
print ansList[j]
for i in range(10):
results = []
score = 0
count = 0
# print (count)
for paragraph in ansList:
for line in paragraph:
#Split Paragraph on basis of '.' or ? or !.
for l in re.split(r"\.|\?|\!",paragraph):
# print(l)
ss = ajay.polarity_scores(l)
results.append(ss);
# print(ss['compound'])
score += ss['compound']
count += 1
completeScore += (score/count)*questionWeights[i]
#print(completeScore)
if (completeScore >= 0.1):
return "False Alarm! You don't have Depression."
elif (completeScore >= -0.1):
return ("Seasonal affective disorder (SAD). This type of depression " +
"emerges as days get shorter in the fall and winter. The mood "
+ "change may result from alterations in the body's natural daily "
+ "rhythms, in the eyes' sensitivity to light, or in how chemical "
+ "messengers like serotonin and melatonin function. The leading "
+ "treatment is light therapy, which involves daily sessions sitting "
+ "close to an especially intense light source. The usual treatments "
+ "for depression, such as psychotherapy and medication, may also be "
+ "effective.");
elif (completeScore >= -0.4):
return ("Persistent depressive disorder. Formerly called dysthymia, this "
+ "type of depression refers to low mood that has lasted for at least "
+ "two years but may not reach the intensity of major depression. Many "
+ "people with this type of depression type are able to function day to "
+ "but feel low or joyless much of the time. Some depressive symptoms, "
+ "such as appetite and sleep changes, low energy, low self-esteem, or "
+ "hopelessness, are usually part of the picture.")
else:
return ("The classic depression type, major depression is a state where a dark "
+ "mood is all-consuming and one loses interest in activities, even ones "
+ "that are usually pleasurable. Symptoms of this type of depression "
+ "include trouble sleeping, changes in appetite or weight, loss of energy, "
+ "and feeling worthless. Thoughts of death or suicide may occur. It is "
+ "usually treated with psychotherapy and medication. For some people with "
+ "severe depression that isn't alleviated with psychotherapy or antidepressant "
+ "medications, electroconvulsive therapy may be effective.") | 51.709677 | 98 | 0.585153 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,752 | 0.546475 |
6a9c552700ad0a75cac33278ee8dc5a5139c2432 | 844 | py | Python | textpand/download.py | caufieldjh/textpand-for-kgs | 42853c53c5a4cc06fbd745c147d02fe7916690fa | [
"BSD-3-Clause"
] | 3 | 2021-12-10T21:13:47.000Z | 2021-12-10T23:36:18.000Z | textpand/download.py | caufieldjh/textpand-for-kgs | 42853c53c5a4cc06fbd745c147d02fe7916690fa | [
"BSD-3-Clause"
] | 1 | 2022-01-06T20:59:07.000Z | 2022-01-06T20:59:07.000Z | textpand/download.py | caufieldjh/textpand-for-kgs | 42853c53c5a4cc06fbd745c147d02fe7916690fa | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from .utils import download_from_yaml
def download(output_dir: str, snippet_only: bool, ignore_cache: bool = False) -> None:
"""Downloads data files from list of URLs (default: download.yaml) into data directory (default: data/).
Args:
output_dir: A string pointing to the location to download data to.
snippet_only: Downloads only the first 5 kB of the source, for testing and file checks.
ignore_cache: Ignore cache and download files even if they exist [false]
Returns:
None.
"""
download_from_yaml(yaml_file="download.yaml",
output_dir=output_dir,
snippet_only=snippet_only,
ignore_cache=ignore_cache,
verbose=True)
return None
| 31.259259 | 108 | 0.625592 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 466 | 0.552133 |
6a9d42bd307c1507375c76e403f46b3901bbf76d | 3,560 | py | Python | qt-creator-opensource-src-4.6.1/scripts/checkInstalledFiles.py | kevinlq/Qt-Creator-Opensource-Study | b8cadff1f33f25a5d4ef33ed93f661b788b1ba0f | [
"MIT"
] | 5 | 2018-12-22T14:49:13.000Z | 2022-01-13T07:21:46.000Z | qt-creator-opensource-src-4.6.1/scripts/checkInstalledFiles.py | kevinlq/Qt-Creator-Opensource-Study | b8cadff1f33f25a5d4ef33ed93f661b788b1ba0f | [
"MIT"
] | null | null | null | qt-creator-opensource-src-4.6.1/scripts/checkInstalledFiles.py | kevinlq/Qt-Creator-Opensource-Study | b8cadff1f33f25a5d4ef33ed93f661b788b1ba0f | [
"MIT"
] | 8 | 2018-07-17T03:55:48.000Z | 2021-12-22T06:37:53.000Z | #!/usr/bin/env python
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
import os
import sys
import stat
import difflib
import inspect
import getopt
def referenceFile():
if sys.platform.startswith('linux'):
filename = 'makeinstall.linux'
elif sys.platform.startswith('win'):
filename = 'makeinstall.windows'
elif sys.platform == 'darwin':
filename = 'makeinstall.darwin'
else:
print "Unsupported platform: ", sys.platform
sys.exit(-1)
scriptDir = os.path.dirname(inspect.getfile(inspect.currentframe()))
return os.path.join(scriptDir,'..','tests', 'reference', filename)
def readReferenceFile():
# read file with old diff
f = open(referenceFile(), 'r');
filelist = []
for line in f:
filelist.append(line)
f.close()
return filelist
def generateReference(rootdir):
fileDict = {}
for root, subFolders, files in os.walk(rootdir):
for file in (subFolders + files):
f = os.path.join(root,file)
perm = os.stat(f).st_mode & 0777
if os.path.getsize(f) == 0:
print "'%s' is empty!" % f
fileDict[f[len(rootdir)+1:]] = perm
# generate new list
formattedlist = []
for name, perm in sorted(fileDict.iteritems()):
formattedlist.append("%o %s\n"% (perm, name))
return formattedlist;
def usage():
print "Usage: %s [-g | --generate] <dir>" % os.path.basename(sys.argv[0])
def main():
generateMode = False
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'hg', ['help', 'generate'])
except:
print str(err)
usage()
sys.exit(2)
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
if o in ('-g', '--generate'):
generateMode = True
if len(args) != 1:
usage()
sys.exit(2)
rootdir = args[0]
if generateMode:
f = open(referenceFile(), 'w')
for item in generateReference(rootdir):
f.write(item)
f.close()
print "Do not forget to commit", referenceFile()
else:
hasDiff = False
for line in difflib.unified_diff(readReferenceFile(), generateReference(rootdir), fromfile=referenceFile(), tofile="generated"):
sys.stdout.write(line)
hasDiff = True
if hasDiff:
sys.exit(1)
if __name__ == "__main__":
main()
| 31.504425 | 136 | 0.608989 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,497 | 0.420506 |
6aa02482ee4345f8d62c98b8785e029ed85945dd | 1,639 | py | Python | tqsdk/demo/example/momentum.py | boyscout2008/tqsdk-python | 79496a938a44f79ea9164569637509d0cc7db70a | [
"Apache-2.0"
] | null | null | null | tqsdk/demo/example/momentum.py | boyscout2008/tqsdk-python | 79496a938a44f79ea9164569637509d0cc7db70a | [
"Apache-2.0"
] | null | null | null | tqsdk/demo/example/momentum.py | boyscout2008/tqsdk-python | 79496a938a44f79ea9164569637509d0cc7db70a | [
"Apache-2.0"
] | 1 | 2020-11-20T01:19:11.000Z | 2020-11-20T01:19:11.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Ringo"
'''
价格动量 策略 (难度:初级)
参考: https://www.shinnytech.com/blog/momentum-strategy/
注: 该示例策略仅用于功能示范, 实盘时请根据自己的策略/经验进行修改
'''
from tqsdk import TqAccount, TqApi, TargetPosTask
# 设置指定合约,获取N条K线计算价格动量
SYMBOL = "SHFE.au1912"
N = 15
api = TqApi()
klines = api.get_kline_serial(SYMBOL, 60*60*24, N)
quote = api.get_quote(SYMBOL)
target_pos = TargetPosTask(api, SYMBOL)
position = api.get_position(SYMBOL)
# 编写价格动量函数AR,以前N-1日K线计算价格动量ar
def AR(kline1):
spread_ho = sum(kline1.high[:-1] - kline1.open[:-1])
spread_oc = sum(kline1.open[:-1] - kline1.low[:-1])
# spread_oc 为0时,设置为最小价格跳动值
if spread_oc == 0:
spread_oc = quote.price_tick
ar = (spread_ho/spread_oc)*100
return ar
ar = AR(klines)
print("策略开始启动")
while True:
api.wait_update()
# 生成新K线时,重新计算价格动量值ar
if api.is_changing(klines.iloc[-1], "datetime"):
ar = AR(klines)
print("价格动量是:", ar)
# 每次最新价发生变动时,重新进行判断
if api.is_changing(quote, "last_price"):
# 开仓策略
if position.pos_long == 0 and position.pos_short == 0:
# 如果ar大于110并且小于150,开多仓
if 110 < ar < 150:
print("价值动量超过110,小于150,做多")
target_pos.set_target_volume(100)
# 如果ar大于50,小于90,开空仓
elif 50 < ar < 90:
print("价值动量大于50,小于90,做空")
target_pos.set_target_volume(-100)
# 止损策略,多头下当前ar值小于90则平仓止损,空头下当前ar值大于110则平仓止损
elif (position.pos_long > 0 and ar < 90) or (position.pos_short > 0 and ar > 110):
print("止损平仓")
target_pos.set_target_volume(0)
| 26.015873 | 90 | 0.621721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 910 | 0.43687 |
6aa1d7c9f54267d6e42717a153600f7e111a7f9f | 10,323 | py | Python | color_transfer/__init__.py | AdamSpannbauer/color_transfer | 155e0134615f35bf19bf32f4cacf056603604914 | [
"MIT"
] | null | null | null | color_transfer/__init__.py | AdamSpannbauer/color_transfer | 155e0134615f35bf19bf32f4cacf056603604914 | [
"MIT"
] | null | null | null | color_transfer/__init__.py | AdamSpannbauer/color_transfer | 155e0134615f35bf19bf32f4cacf056603604914 | [
"MIT"
] | 1 | 2020-11-05T17:35:14.000Z | 2020-11-05T17:35:14.000Z | # import the necessary packages
import numpy as np
import cv2
import imutils
def color_transfer(source, target, clip=True, preserve_paper=True):
"""
Transfers the color distribution from the source to the target
image using the mean and standard deviations of the L*a*b*
color space.
This implementation is (loosely) based on to the "Color Transfer
between Images" paper by Reinhard et al., 2001.
Parameters:
-------
source: NumPy array
OpenCV image in BGR color space (the source image)
target: NumPy array
OpenCV image in BGR color space (the target image)
clip: Should components of L*a*b* image be scaled by np.clip before
converting back to BGR color space?
If False then components will be min-max scaled appropriately.
Clipping will keep target image brightness truer to the input.
Scaling will adjust image brightness to avoid washed out portions
in the resulting color transfer that can be caused by clipping.
preserve_paper: Should color transfer strictly follow methodology
laid out in original paper? The method does not always produce
aesthetically pleasing results.
If False then L*a*b* components will scaled using the reciprocal of
the scaling factor proposed in the paper. This method seems to produce
more consistently aesthetically pleasing results
Returns:
-------
transfer: NumPy array
OpenCV image (w, h, 3) NumPy array (uint8)
"""
# convert the images from the RGB to L*ab* color space, being
# sure to utilizing the floating point data type (note: OpenCV
# expects floats to be 32-bit, so use that instead of 64-bit)
source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype("float32")
target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype("float32")
# compute color statistics for the source and target images
(lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = image_stats(source)
(lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = image_stats(target)
# subtract the means from the target image
(l, a, b) = cv2.split(target)
l -= lMeanTar
a -= aMeanTar
b -= bMeanTar
if preserve_paper:
# scale by the standard deviations using paper proposed factor
l = (lStdTar / lStdSrc) * l
a = (aStdTar / aStdSrc) * a
b = (bStdTar / bStdSrc) * b
else:
# scale by the standard deviations using reciprocal of paper proposed factor
l = (lStdSrc / lStdTar) * l
a = (aStdSrc / aStdTar) * a
b = (bStdSrc / bStdTar) * b
# add in the source mean
l += lMeanSrc
a += aMeanSrc
b += bMeanSrc
# clip/scale the pixel intensities to [0, 255] if they fall
# outside this range
l = _scale_array(l, clip=clip)
a = _scale_array(a, clip=clip)
b = _scale_array(b, clip=clip)
# merge the channels together and convert back to the RGB color
# space, being sure to utilize the 8-bit unsigned integer data
# type
transfer = cv2.merge([l, a, b])
transfer = cv2.cvtColor(transfer.astype("uint8"), cv2.COLOR_LAB2BGR)
# return the color transferred image
return transfer
def auto_color_transfer(source, target):
"""Pick color_transfer result truest to source image color
Applies color_transfer with all possible combinations of the clip & preserve_paper arguments.
Mean absolute error (MAE) is computed for the HSV channels of each result and the source image.
The best_result that minimizes the MAE is returned as well as a montage of all candidate results.
Parameters:
-------
source: NumPy array
OpenCV image in BGR color space (the source image)
target: NumPy array
OpenCV image in BGR color space (the target image)
Returns:
-------
tuple: (best_result, comparison)
best_result: NumPy array
result that minimizes mean absolute error between compared to source image in HSV color space
comparison: NumPy array
image showing the results of all combinations of color_transfer options
"""
# get mean HSV stats from source image for comparison
hsv_source = cv2.cvtColor(source, cv2.COLOR_BGR2HSV)
hsv_hist_src = cv2.calcHist([hsv_source], [0, 1, 2], None,
[8, 8, 8], [0, 256, 0, 256, 0, 256])
# iterate through all 4 options for toggling color transfer
bools = [True, False]
candidates = []
best_result = None
best_dist = float('inf')
for clip in bools:
for preserve_paper in bools:
# create candidate image from options of this iteration
candidate = color_transfer(source, target, clip, preserve_paper)
# get mean HSV stats from candidate image for comparison
hsv_candidate = cv2.cvtColor(candidate, cv2.COLOR_BGR2HSV)
hsv_hist_cand = cv2.calcHist([hsv_candidate], [0, 1, 2], None,
[8, 8, 8], [0, 256, 0, 256, 0, 256])
# calc chi square dist
chi2_dist = chi2_distance(hsv_hist_src, hsv_hist_cand)
# propose new truest result if found new smallest mae
if chi2_dist < best_dist:
best_result = candidate[:]
candidates.append(candidate)
# build 2 by 2 image matrix of all candidates for comparison
comparison = np.hstack((np.vstack(candidates[:2]),
np.vstack(candidates[2:])))
# add border annotations showing values of params for each output
comparison = _bool_matrix_border(comparison)
return best_result, comparison
def chi2_distance(hist_a, hist_b, eps=1e-10):
return 0.5 * np.sum(((hist_a - hist_b) ** 2) / (hist_a + hist_b + eps))
def _bool_matrix_border(comparison_image):
"""Apply table formatting for comparison of color_transfer options
Parameters:
-------
target: NumPy array
OpenCV image in BGR color space (the comparison image produced in auto_color_transfer)
Returns:
-------
comparison: NumPy array
OpenCV image in BGR color space with borders applied to easily compare the different
results of the auto_color_transfer
"""
# 200 seems to work well as border size
border_size = 200
# put black border on top and left of input image
h, w = comparison_image.shape[:2]
top = np.zeros(w * border_size, dtype='uint8').reshape(border_size, w)
left = np.zeros((h + border_size) * border_size, dtype='uint8').reshape(h + border_size, border_size)
top = cv2.cvtColor(top, cv2.COLOR_GRAY2BGR)
left = cv2.cvtColor(left, cv2.COLOR_GRAY2BGR)
bordered_comparison_image = np.vstack((top, comparison_image))
bordered_comparison_image = np.hstack((left, bordered_comparison_image))
# add text for clip arg options to top border
top_title_loc = (border_size, 75)
top_true_loc = (border_size, 190)
top_false_loc = (int(border_size + w / 2), 190)
cv2.putText(bordered_comparison_image, 'Clip', top_title_loc,
cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'True', top_true_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'False', top_false_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
# rotate 90 degrees for writing text to left border
bordered_comparison_image = imutils.rotate_bound(bordered_comparison_image, 90)
# add text for preserve paper arg options to left border
top_title_loc = (5, 75)
top_true_loc = (5 + int(h / 2), 190)
top_false_loc = (5, 190)
cv2.putText(bordered_comparison_image, 'Preserve Paper', top_title_loc,
cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'True', top_true_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'False', top_false_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
# rotate -90 degrees to return image in correct orientation
bordered_comparison_image = imutils.rotate_bound(bordered_comparison_image, -90)
return bordered_comparison_image
def image_stats(image):
"""
Parameters:
-------
image: NumPy array
OpenCV image in L*a*b* color space
Returns:
-------
Tuple of mean and standard deviations for the L*, a*, and b*
channels, respectively
"""
# compute the mean and standard deviation of each channel
(l, a, b) = cv2.split(image)
(lMean, lStd) = (l.mean(), l.std())
(aMean, aStd) = (a.mean(), a.std())
(bMean, bStd) = (b.mean(), b.std())
# return the color statistics
return lMean, lStd, aMean, aStd, bMean, bStd
def _min_max_scale(arr, new_range=(0, 255)):
"""
Perform min-max scaling to a NumPy array
Parameters:
-------
arr: NumPy array to be scaled to [new_min, new_max] range
new_range: tuple of form (min, max) specifying range of
transformed array
Returns:
-------
NumPy array that has been scaled to be in
[new_range[0], new_range[1]] range
"""
# get array's current min and max
mn = arr.min()
mx = arr.max()
# check if scaling needs to be done to be in new_range
if mn < new_range[0] or mx > new_range[1]:
# perform min-max scaling
scaled = (new_range[1] - new_range[0]) * (arr - mn) / (mx - mn) + new_range[0]
else:
# return array if already in range
scaled = arr
return scaled
def _scale_array(arr, clip=True):
"""
Trim NumPy array values to be in [0, 255] range with option of
clipping or scaling.
Parameters:
-------
arr: array to be trimmed to [0, 255] range
clip: should array be scaled by np.clip? if False then input
array will be min-max scaled to range
[max([arr.min(), 0]), min([arr.max(), 255])]
Returns:
-------
NumPy array that has been scaled to be in [0, 255] range
"""
if clip:
scaled = np.clip(arr, 0, 255)
else:
scale_range = (max([arr.min(), 0]), min([arr.max(), 255]))
scaled = _min_max_scale(arr, new_range=scale_range)
return scaled
| 36.477032 | 105 | 0.657173 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,400 | 0.523104 |
6aac551e77cffa8d22df81867eace49a7797fd1d | 1,199 | py | Python | misc.py | hldai/wikiprocesspy | 788ccb6f0e0e54a7322863d5a13332635afc240d | [
"MIT"
] | null | null | null | misc.py | hldai/wikiprocesspy | 788ccb6f0e0e54a7322863d5a13332635afc240d | [
"MIT"
] | null | null | null | misc.py | hldai/wikiprocesspy | 788ccb6f0e0e54a7322863d5a13332635afc240d | [
"MIT"
] | null | null | null | import json
def __text_from_anchor_sents_file(anchor_sents_file, output_file):
f = open(anchor_sents_file, encoding='utf-8')
fout = open(output_file, 'w', encoding='utf-8', newline='\n')
for i, line in enumerate(f):
sent = json.loads(line)
fout.write('{}\n'.format(sent['tokens']))
# if i > 5:
# break
f.close()
fout.close()
def merge_files(filenames, output_file):
fout = open(output_file, 'w', encoding='utf-8', newline='\n')
for filename in filenames:
print(filename)
f = open(filename, encoding='utf-8')
for line in f:
fout.write(line)
f.close()
fout.close()
wiki19_anchor_sents_file = 'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents.txt'
anchor_sent_texts_file = 'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents-tok-texts.txt'
# __text_from_anchor_sents_file(wiki19_anchor_sents_file, anchor_sent_texts_file)
part_pos_tag_files = [f'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents-tok-texts-pos-{i}.txt' for i in range(4)]
pos_tag_file = 'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents-tok-texts-pos.txt'
# merge_files(part_pos_tag_files, pos_tag_file)
| 35.264706 | 118 | 0.686405 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 481 | 0.401168 |
6aad4ce5dfa92a930b5b7dfb6e85c80cb8498743 | 2,833 | py | Python | neural_toolbox/inception.py | ibrahimSouleiman/GuessWhat | 60d140de1aae5ccda27e7d3eef2b9fb9548f0854 | [
"Apache-2.0"
] | null | null | null | neural_toolbox/inception.py | ibrahimSouleiman/GuessWhat | 60d140de1aae5ccda27e7d3eef2b9fb9548f0854 | [
"Apache-2.0"
] | null | null | null | neural_toolbox/inception.py | ibrahimSouleiman/GuessWhat | 60d140de1aae5ccda27e7d3eef2b9fb9548f0854 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.python.slim.nets.resnet_v1 as resnet_v1
import tensorflow.contrib.slim.python.slim.nets.inception_v1 as inception_v1
import tensorflow.contrib.slim.python.slim.nets.resnet_utils as slim_utils
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.framework.python.ops import arg_scope
import os
def get_resnet_arg_scope(bn_fn):
"""
Trick to apply CBN from a pretrained tf network. It overides the batchnorm constructor with cbn
:param bn_fn: cbn factory
:return: tensorflow scope
"""
with arg_scope(
[layers_lib.conv2d],
activation_fn=tf.nn.relu,
normalizer_fn=bn_fn,
normalizer_params=None) as arg_sc:
return arg_sc
def create_inception(image_input, is_training, scope="", inception_out="Mixed_5c", resnet_version=50, cbn=None):
"""
Create a resnet by overidding the classic batchnorm with conditional batchnorm
:param image_input: placeholder with image
:param is_training: are you using the resnet at training_time or test_time
:param scope: tensorflow scope
:param resnet_version: 50/101/152
:param cbn: the cbn factory
:return: the resnet output
"""
# assert False, "\n" \
# "There is a bug with classic batchnorm with slim networks (https://github.com/tensorflow/tensorflow/issues/4887). \n" \
# "Please use the following config -> 'cbn': {'use_cbn':true, 'excluded_scope_names': ['*']}"
# arg_sc = slim_utils.resnet_arg_scope(is_training=is_training)
# print("--- 1")
arg_sc = inception_v1.inception_v1_arg_scope()
# Pick the correct version of the resnet
# if resnet_version == 50:
# current_resnet = resnet_v1.resnet_v1_50
# elif resnet_version == 101:
# current_resnet = resnet_v1.resnet_v1_101
# elif resnet_version == 152:
# current_resnet = resnet_v1.resnet_v1_152
# else:
# raise ValueError("Unsupported resnet version")
# inception_scope = os.path.join('InceptionV1/InceptionV1', inception_out)
# print("--- 2")
inception_scope = inception_out
# print(" resnet_out = {} , resnet_scope = {}".format(resnet_out,resnet_scope))
# print("--- 3")
with slim.arg_scope(arg_sc):
net, end_points = inception_v1.inception_v1(image_input, 1001) # 1000 is the number of softmax class
print("Net = ",net)
# print("--- 4")
if len(scope) > 0 and not scope.endswith("/"):
scope += "/"
# print("--- 5")
# print(end_points)
print(" Batch ",inception_scope)
out = end_points[scope + inception_scope]
print("-- out Use: {},output = {}".format(inception_scope,out))
return out,end_points
| 36.320513 | 143 | 0.676668 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,527 | 0.539005 |
6ab1bd9218aece261b575574072df1d919112085 | 1,108 | py | Python | lib/galaxy/web/__init__.py | rikeshi/galaxy | c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a | [
"CC-BY-3.0"
] | 4 | 2015-05-12T20:36:41.000Z | 2017-06-26T15:34:02.000Z | lib/galaxy/web/__init__.py | rikeshi/galaxy | c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a | [
"CC-BY-3.0"
] | 52 | 2015-03-16T14:02:14.000Z | 2021-12-24T09:50:23.000Z | lib/galaxy/web/__init__.py | rikeshi/galaxy | c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a | [
"CC-BY-3.0"
] | 1 | 2016-03-21T12:54:06.000Z | 2016-03-21T12:54:06.000Z | """
The Galaxy web application framework
"""
from .framework import url_for
from .framework.base import httpexceptions
from .framework.decorators import (
do_not_cache,
error,
expose,
expose_api,
expose_api_anonymous,
expose_api_anonymous_and_sessionless,
expose_api_raw,
expose_api_raw_anonymous,
expose_api_raw_anonymous_and_sessionless,
format_return_as_json,
json,
json_pretty,
legacy_expose_api,
legacy_expose_api_anonymous,
legacy_expose_api_raw,
legacy_expose_api_raw_anonymous,
require_admin,
require_login,
)
__all__ = ('FormBuilder', 'do_not_cache', 'error', 'expose', 'expose_api',
'expose_api_anonymous', 'expose_api_anonymous_and_sessionless',
'expose_api_raw', 'expose_api_raw_anonymous',
'expose_api_raw_anonymous_and_sessionless', 'form',
'format_return_as_json', 'httpexceptions', 'json', 'json_pretty',
'legacy_expose_api', 'legacy_expose_api_anonymous',
'legacy_expose_api_raw', 'legacy_expose_api_raw_anonymous',
'require_admin', 'require_login', 'url_for')
| 30.777778 | 74 | 0.737365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 449 | 0.405235 |