content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# encoding: utf-8
"""
This module defines the things that are used in setup.py for building JupyterLab
This includes:
* Functions for finding things like packages, package data, etc.
* A function for checking dependencies.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import io
import json
import os
import pipes
import sys
import shutil
import tempfile
import os.path as osp
from os.path import join as pjoin
from distutils import log
from distutils.cmd import Command
from distutils.version import LooseVersion
from setuptools.command.egg_info import egg_info
from setuptools.command.bdist_egg import bdist_egg
from subprocess import check_call
if sys.platform == 'win32':
from subprocess import list2cmdline
else:
def list2cmdline(cmd_list):
return ' '.join(map(pipes.quote, cmd_list))
# the name of the project
name = 'jupyterlab'
here = osp.dirname(osp.abspath(__file__))
is_repo = osp.exists(pjoin(here, '.git'))
version_ns = {}
with io.open(pjoin(here, name, '_version.py'), encoding="utf8") as f:
exec(f.read(), {}, version_ns)
def run(cmd, *args, **kwargs):
"""Echo a command before running it"""
log.info('> ' + list2cmdline(cmd))
kwargs['shell'] = (sys.platform == 'win32')
return check_call(cmd, *args, **kwargs)
#---------------------------------------------------------------------------
# Find packages
#---------------------------------------------------------------------------
def find_packages():
"""
Find all of the packages.
"""
packages = []
for dir, subdirs, files in os.walk('jupyterlab'):
if 'node_modules' in subdirs:
subdirs.remove('node_modules')
package = dir.replace(osp.sep, '.')
if '__init__.py' not in files:
# not a package
continue
packages.append(package)
return packages
#---------------------------------------------------------------------------
# Find package data
#---------------------------------------------------------------------------
def find_package_data():
"""
Find package_data.
"""
theme_dirs = []
for dir, subdirs, files in os.walk(pjoin('jupyterlab', 'themes')):
slice_len = len('jupyterlab' + os.sep)
theme_dirs.append(pjoin(dir[slice_len:], '*'))
schema_dirs = []
for dir, subdirs, files in os.walk(pjoin('jupyterlab', 'schemas')):
slice_len = len('jupyterlab' + os.sep)
schema_dirs.append(pjoin(dir[slice_len:], '*'))
return {
'jupyterlab': ['build/*', '*.js', 'package.app.json',
'yarn.lock', 'yarn.app.lock', '.yarnrc'
] + theme_dirs + schema_dirs
}
def find_data_files():
"""
Find data_files.
"""
if not os.path.exists(pjoin('jupyterlab', 'build')):
return []
files = []
static_files = os.listdir(pjoin('jupyterlab', 'build'))
files.append(('share/jupyter/lab/static',
['jupyterlab/build/%s' % f for f in static_files]))
for dir, subdirs, fnames in os.walk(pjoin('jupyterlab', 'schemas')):
dir = dir.replace(os.sep, '/')
schema_files = []
for fname in fnames:
schema_files.append('%s/%s' % (dir, fname))
slice_len = len('jupyterlab/')
files.append(('share/jupyter/lab/%s' % dir[slice_len:], schema_files))
for dir, subdirs, fnames in os.walk(pjoin('jupyterlab', 'themes')):
dir = dir.replace(os.sep, '/')
themes_files = []
for fname in fnames:
themes_files.append('%s/%s' % (dir, fname))
slice_len = len('jupyterlab/')
files.append(('share/jupyter/lab/%s' % dir[slice_len:], themes_files))
return files
def js_prerelease(command, strict=False):
"""decorator for building minified js/css prior to another command"""
class DecoratedCommand(command):
def run(self):
jsdeps = self.distribution.get_command_obj('jsdeps')
if not is_repo and all(osp.exists(t) for t in jsdeps.targets):
# sdist, nothing to do
command.run(self)
return
try:
self.distribution.run_command('jsdeps')
except Exception as e:
missing = [t for t in jsdeps.targets if not osp.exists(t)]
if strict or missing:
log.warn('js check failed')
if missing:
log.error('missing files: %s' % missing)
raise e
else:
log.warn('js check failed (not a problem)')
log.warn(str(e))
command.run(self)
return DecoratedCommand
def update_package_data(distribution):
"""update build_py options to get package_data changes"""
build_py = distribution.get_command_obj('build_py')
build_py.finalize_options()
class CheckAssets(Command):
description = 'check for required assets'
user_options = []
# Representative files that should exist after a successful build
targets = [
pjoin(here, 'jupyterlab', 'build', 'release_data.json'),
pjoin(here, 'jupyterlab', 'build', 'main.bundle.js'),
pjoin(here, 'jupyterlab', 'schemas', '@jupyterlab',
'shortcuts-extension', 'plugin.json'),
pjoin(here, 'jupyterlab', 'themes', '@jupyterlab',
'theme-light-extension',
'images', 'jupyterlab.svg')
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
for t in self.targets:
if not osp.exists(t):
msg = 'Missing file: %s' % t
raise ValueError(msg)
target = pjoin(here, 'jupyterlab', 'build', 'release_data.json')
with open(target) as fid:
data = json.load(fid)
if (LooseVersion(data['version']) !=
LooseVersion(version_ns['__version__'])):
msg = 'Release assets version mismatch, please run npm publish'
raise ValueError(msg)
# update package data in case this created new files
update_package_data(self.distribution)
class bdist_egg_disabled(bdist_egg):
"""Disabled version of bdist_egg
Prevents setup.py install performing setuptools' default easy_install,
which it should never ever do.
"""
def run(self):
sys.exit("Aborting implicit building of eggs. Use `pip install .` to install from source.")
class custom_egg_info(egg_info):
"""Prune JavaScript folders from egg_info to avoid locking up pip.
"""
def run(self):
folders = ['examples', 'packages', 'test', 'node_modules']
folders = [f for f in folders if os.path.exists(pjoin(here, f))]
tempdir = tempfile.mkdtemp()
for folder in folders:
shutil.move(pjoin(here, folder), tempdir)
value = egg_info.run(self)
for folder in folders:
shutil.move(pjoin(tempdir, folder), here)
shutil.rmtree(tempdir)
return value
| 30.969565 | 99 | 0.584164 | [
"BSD-3-Clause"
] | bualpha/jupyterlab | setupbase.py | 7,123 | Python |
# # SPDX-License-Identifier: MIT
# from augur.augurplugin import AugurPlugin
# from augur.application import Application
# class HousekeeperPlugin(AugurPlugin):
# """
# This plugin serves as an example as to how to load plugins into Augur
# """
# def __init__(self, augur_app):
# super().__init__(augur_app)
# self.__housekeeper = self.__call__()
# def __call__(self):
# from .housekeeper import Housekeeper
# return Housekeeper(
# user=self._augur.read_config('Database', 'user', 'AUGUR_DB_USER', 'root'),
# password=self._augur.read_config('Database', 'password', 'AUGUR_DB_PASS', 'password'),
# host=self._augur.read_config('Database', 'host', 'AUGUR_DB_HOST', '127.0.0.1'),
# port=self._augur.read_config('Database', 'port', 'AUGUR_DB_PORT', '3306'),
# dbname=self._augur.read_config('Database', 'database', 'AUGUR_DB_NAME', 'msr14')
# )
# HousekeeperPlugin.augur_plugin_meta = {
# 'name': 'housekeeper',
# 'datasource': True
# }
# Application.register_plugin(HousekeeperPlugin)
# __all__ = ['HousekeeperPlugin'] | 39.1 | 104 | 0.636829 | [
"MIT"
] | 0WeiyuFeng0/augur | augur/housekeeper/__init__.py | 1,173 | Python |
""" Video Link: https://youtu.be/1s-Tj65AKZA """
from seleniumbase import __version__
from seleniumbase import BaseCase
class HackTests(BaseCase):
def test_all_your_base_are_belong_to_us(self):
# First make sure that seleniumbase 1.65.0 or newer is installed
version = __version__.split(".")
if version[0] == "1" and int(version[1]) < 65:
raise Exception(
"This test requires minimum seleniumbase version: 1.65.0"
)
self.set_window_size(1220, 740)
ayb = "ALL YOUR BASE"
abtu = "ARE BELONG TO US"
aybabtu = "%s %s" % (ayb, abtu)
sb_banner_logo = "//seleniumbase.io/cdn/img/sb_logo_10.png"
sb_dashboard_logo = "//seleniumbase.io/img/dash_pie_3.png"
yt_chip = "#chips yt-chip-cloud-chip-renderer:nth-of-type"
wiki = "https://en.wikipedia.org/wiki/All_your_base_are_belong_to_us"
self.open(wiki)
self.click_if_visible('button[aria-label="Close"]')
self.set_text_content("h1#firstHeading", aybabtu)
self.set_text_content("#ca-history a", aybabtu)
self.set_text_content('#n-mainpage-description a', "ALL")
self.set_text_content('#n-contents a', "YOUR")
self.set_text_content('#n-currentevents a', "BASE")
self.set_text_content('#n-randompage a', "ARE")
self.set_text_content('#n-aboutsite a', "BELONG")
self.set_text_content('#n-contactpage a', "TO")
self.set_text_content('#n-sitesupport a', "US")
self.set_text_content('.tocsection-1 span.toctext', "ALL")
self.set_text_content('.tocsection-2 span.toctext', "YOUR")
self.set_text_content('.tocsection-3 span.toctext', "BASE")
self.set_text_content('.tocsection-4 span.toctext', "ARE")
self.set_text_content('.tocsection-5 span.toctext', "BELONG")
self.set_text_content('.tocsection-6 span.toctext', "TO")
self.set_text_content('.tocsection-7 span.toctext', "US")
self.highlight("h1#firstHeading", loops=2, scroll=False)
self.highlight("#ca-history a", loops=2, scroll=False)
self.highlight("nav#p-navigation", loops=2, scroll=False)
self.highlight("div#toc", loops=2, scroll=False)
self.highlight('.tocsection-1 span.toctext', loops=1, scroll=False)
self.highlight('.tocsection-2 span.toctext', loops=1, scroll=False)
self.highlight('.tocsection-3 span.toctext', loops=2, scroll=False)
self.highlight('.tocsection-4 span.toctext', loops=1, scroll=False)
self.highlight('.tocsection-5 span.toctext', loops=1, scroll=False)
self.highlight('.tocsection-6 span.toctext', loops=1, scroll=False)
self.highlight('.tocsection-7 span.toctext', loops=2, scroll=False)
zoom_in = 'div.thumbinner{zoom: 1.4;-moz-transform: scale(1.4);}'
self.add_css_style(zoom_in)
self.highlight("div.thumbinner", loops=8, scroll=False)
self.open("https://www.apple.com/store")
self.set_text_content("div.rs-shop-subheader", aybabtu)
self.set_text_content('#shelf-1 a[href*="mac"]', "ALL")
self.set_text_content('#shelf-1 a[href*="iphone"]', "YOUR")
self.set_text_content('#shelf-1 a[href*="ipad"]', "BASE")
self.set_text_content('#shelf-1 a[href*="watch"]', "ARE")
self.set_text_content('#shelf-1 a[href*="airpods"]', "BELONG")
self.set_text_content('#shelf-1 a[href*="airtag"]', "TO")
self.set_text_content('#shelf-1 a[href*="tv"]', "US")
self.set_text_content('#shelf-1 a[href*="homepod"]', ".")
self.set_text_content("h2", aybabtu + ". ")
self.highlight("div.rs-shop-subheader", loops=6, scroll=False)
self.highlight("#shelf-1", loops=2, scroll=False)
self.highlight('#shelf-1 a[href*="mac"]', loops=1, scroll=False)
self.highlight('#shelf-1 a[href*="iphone"]', loops=1, scroll=False)
self.highlight('#shelf-1 a[href*="ipad"]', loops=3, scroll=False)
self.highlight('#shelf-1 a[href*="watch"]', loops=1, scroll=False)
self.highlight('#shelf-1 a[href*="airpods"]', loops=1, scroll=False)
self.highlight('#shelf-1 a[href*="airtag"]', loops=1, scroll=False)
self.highlight('#shelf-1 a[href*="tv"]', loops=3, scroll=False)
self.highlight("h2", loops=9, scroll=False)
self.open("https://google.com/ncr")
self.set_text_content('a[href*="about.google"]', ayb)
self.set_text_content('a[href*="store.google"]', abtu)
self.set_text_content('a[href*="mail.google.com"]', ayb)
self.set_text_content('a[href*="google.com/img"]', abtu)
self.set_attributes('[value="Google Search"]', "value", ayb)
self.set_attributes('[value="I\'m Feeling Lucky"]', "value", abtu)
zoom_in = 'a{zoom: 1.2;-moz-transform: scale(1.2);}'
self.add_css_style(zoom_in)
zoom_in = (
'[value="ALL YOUR BASE"]{zoom: 1.3;-moz-transform: scale(1.3);}'
'[value="ARE BELONG TO US"]{zoom: 1.3;-moz-transform: scale(1.3);}'
)
self.add_css_style(zoom_in)
self.highlight('a[href*="about.google"]', loops=3)
self.highlight('a[href*="store.google"]', loops=3)
self.highlight('a[href*="mail.google.com"]', loops=3)
self.highlight('a[href*="google.com/img"]', loops=3)
self.highlight('form[role="search"]', loops=8)
self.open("https://twitter.com/")
if not self.is_element_visible('a[href*="w/signup"] span'):
self.refresh()
if self.is_element_visible('a[href*="w/signup"] span'):
self.set_text_content('a[href*="w/signup"] span', aybabtu)
self.highlight('a[href*="w/signup"] span', loops=6, scroll=False)
self.highlight('a[href*="w/signup"]', loops=6, scroll=False)
self.open("https://www.youtube.com/")
self.set_text_content('%s(1)' % yt_chip, "ALL")
self.set_text_content('%s(2)' % yt_chip, "YOUR")
self.set_text_content('%s(3)' % yt_chip, "BASE")
self.set_text_content('%s(4)' % yt_chip, "ARE")
self.set_text_content('%s(5)' % yt_chip, "BELONG")
self.set_text_content('%s(6)' % yt_chip, "TO")
self.set_text_content('%s(7)' % yt_chip, "US")
self.set_text_content('%s(8)' % yt_chip, "!")
self.set_text_content('%s(9)' % yt_chip, "!")
self.set_text_content('%s(10)' % yt_chip, "!")
self.click_if_visible("#dismiss-button")
self.click_if_visible('button[aria-label="Close"]')
self.highlight("#scroll-container", loops=5, scroll=False)
self.highlight('%s(1)' % yt_chip, loops=1, scroll=False)
self.highlight('%s(2)' % yt_chip, loops=1, scroll=False)
self.highlight('%s(3)' % yt_chip, loops=3, scroll=False)
self.highlight('%s(4)' % yt_chip, loops=1, scroll=False)
self.highlight('%s(5)' % yt_chip, loops=1, scroll=False)
self.highlight('%s(6)' % yt_chip, loops=1, scroll=False)
self.highlight('%s(7)' % yt_chip, loops=3, scroll=False)
self.highlight("#scroll-container", loops=7, scroll=False)
self.open("https://github.com/features/actions")
self.set_text_content('a[href="/team"]', ayb)
self.set_text_content('a[href="/enterprise"]', abtu)
self.set_text_content('h1 span:nth-child(1)', ayb)
self.set_text_content('h1 span:nth-of-type(2)', "ARE")
self.set_text_content('h1 span:nth-of-type(3)', "BELONG")
self.set_text_content('h1 span:nth-of-type(4)', "TO")
self.set_text_content('h1 span:nth-of-type(5)', "US")
self.type('input[name="q"]', aybabtu.lower())
self.click("h1", scroll=False)
self.highlight("nav", loops=5, scroll=False)
self.highlight('input[name="q"]', loops=5, scroll=False)
self.highlight("h1", loops=8, scroll=False)
self.open("https://dev.to/top/infinity")
self.click_if_visible('button[aria-label="Close campaign banner"]')
self.set_text_content('nav a[data-text="Relevant"]', "ALL")
self.set_text_content('nav a[data-text="Latest"]', "YOUR")
self.set_text_content('nav a[data-text="Top"]', "BASE")
self.set_text_content('nav a[data-text="Week"]', "ARE")
self.set_text_content('nav a[data-text="Month"]', "BELONG")
self.set_text_content('nav a[data-text="Year"]', "TO")
self.set_text_content('nav a[data-text="Infinity"]', "US")
self.set_text_content('aside a[class*="tful"]', aybabtu)
self.set_text_content('aside a[aria-label="Create new account"]', ayb)
self.set_text_content('aside a[aria-label="Log in"]', abtu)
self.set_text_content('aside a[class*="tful"]:nth-child(2)', aybabtu)
self.set_text_content('aside a[class*="tful"]:nth-child(3)', aybabtu)
self.set_text_content('aside a[class*="tful"]:nth-child(4)', aybabtu)
self.set_text_content('aside a[class*="tful"]:nth-child(5)', aybabtu)
self.set_attribute("a.crayons-avatar img", "src", sb_dashboard_logo)
self.set_text_content('.profile-preview-card button', "SeleniumBase")
self.set_text_content('h2.crayons-story__title a', aybabtu)
self.type('input[name="q"]', aybabtu)
self.highlight('input[name="q"]', loops=4, scroll=False)
self.highlight('[aria-label="Primary sidebar"] div div', scroll=False)
self.highlight('nav a[data-text="Relevant"]', loops=1, scroll=False)
self.highlight('nav a[data-text="Latest"]', loops=1, scroll=False)
self.highlight('nav a[data-text="Top"]', loops=2, scroll=False)
self.highlight('nav a[data-text="Week"]', loops=1, scroll=False)
self.highlight('nav a[data-text="Month"]', loops=1, scroll=False)
self.highlight('nav a[data-text="Year"]', loops=1, scroll=False)
self.highlight('nav a[data-text="Infinity"]', loops=2, scroll=False)
self.highlight('aside[id*="sidebar"] section', loops=5, scroll=False)
self.highlight("div.crayons-story__body", loops=7, scroll=False)
self.open("https://azure.microsoft.com/en-us/services/playfab/")
self.set_text_content("h1", aybabtu)
self.set_text_content('a[aria-label*="Try PlayF"]', ayb)
self.set_text_content('a[aria-label*="Sign in to"]', abtu)
self.set_text_content('span:contains("Chat with Sales")', aybabtu)
self.highlight("h1", loops=6, scroll=False)
self.highlight('a[aria-label*="Try PlayF"]', loops=4, scroll=False)
self.highlight('a[aria-label*="Sign in to"]', loops=4, scroll=False)
self.highlight('button#live-engage-btn', loops=6, scroll=False)
self.open("https://www.snapchat.com/")
self.set_text_content("h1", ayb)
self.set_text_content("form .button-large span span", abtu)
zoom_in = 'a.button-large span{zoom: 1.2;-moz-transform: scale(1.2);}'
self.add_css_style(zoom_in)
self.highlight("h1", loops=6, scroll=False)
self.highlight("form .button-large span span", loops=8, scroll=False)
self.open("https://store.steampowered.com/")
self.set_text_content('div.content a[href*="/about/"]', " ")
self.set_text_content('div.content a[href*="help.steam"]', aybabtu)
self.set_text_content("#foryou_tab a", "ALL")
self.set_text_content("#noteworthy_tab a", "YOUR BASE")
self.set_text_content("#genre_tab a", "ARE")
self.set_text_content('span:contains("Points Shop")', "BELONG")
self.set_text_content('span:contains("News")', "TO")
self.set_text_content('span:contains("Labs")', "US")
self.set_value("input#store_nav_search_term", ayb + " . . . .")
self.highlight('div.content a[href*="help.steam"]', loops=6)
self.highlight('#store_nav_area', loops=2, scroll=False)
self.highlight("#foryou_tab a", loops=1, scroll=False)
self.highlight("#noteworthy_tab a", loops=3, scroll=False)
self.highlight("#genre_tab a", loops=1, scroll=False)
self.highlight('span:contains("BELONG")', loops=1, scroll=False)
self.highlight('span:contains("TO")', loops=1, scroll=False)
self.highlight('span:contains("US")', loops=2, scroll=False)
self.js_click('input[id*="nav_search"]')
self.highlight('input[id*="nav_search"]', loops=6, scroll=False)
self.open("https://xkcd.com/286/")
self.set_text_content('a[href="/archive"]', "ALL")
self.set_text_content('a[href*="what-if"]', "YOUR")
self.set_text_content('a[href*="//blag."]', "BASE")
self.set_text_content('a[href*="/about"]', abtu)
self.remove_element('li:contains("Feed")')
self.remove_element('li:contains("TW")')
self.remove_element('li:contains("Books")')
self.remove_element('li:contains("What")')
self.remove_element('li:contains("WI")')
self.set_attributes("#news img", "src", sb_banner_logo)
self.set_text_content('#ctitle', aybabtu)
self.set_text_content('a[rel="prev"]', "All")
self.set_text_content('a[href*="random"]', "Your")
self.set_text_content('a[rel="next"]', "Base")
self.highlight("#topLeft ul", loops=5, scroll=False)
self.highlight('a[href="/archive"]', loops=1, scroll=False)
self.highlight('a[href*="what-if"]', loops=1, scroll=False)
self.highlight('a[href*="//blag."]', loops=2, scroll=False)
self.highlight('a[href*="/about"]', loops=5, scroll=False)
self.highlight('a[rel="prev"]', loops=1, scroll=False)
self.highlight('a[href*="random"]', loops=1, scroll=False)
self.highlight('a[rel="next"]', loops=3, scroll=False)
self.highlight("#ctitle", loops=7, scroll=False)
self.open("https://www.nintendo.com/whatsnew/")
self.set_text_content('button[aria-label="Search"]', aybabtu)
self.set_text_content('button[data-section="newsevents"]', aybabtu)
self.set_text_content("h2", aybabtu)
self.highlight('div.search-flex', loops=4, scroll=False)
self.highlight('button[data-section*="news"]', loops=4, scroll=False)
self.highlight("h2", loops=6, scroll=False)
self.open("https://support.gog.com/hc/en-us?product=gog")
self.set_text_content("div.intro-title", aybabtu)
self.set_text_content("h4", aybabtu)
self.highlight("div.intro-title", loops=8, scroll=False)
self.highlight("h4", loops=8, scroll=False)
self.open("https://slack.com/help/articles/204714258-Giphy-for-Slack")
self.set_text_content("h1", aybabtu)
self.set_text_content('a[prettyslug="getting-started"]', "ALL")
self.set_text_content('a[prettyslug="using-slack"]', "YOUR")
self.set_text_content('a[prettyslug="your-profile"]', "BASE")
self.set_text_content('a[prettyslug="connect-tools"]', "ARE")
self.set_text_content('a[prettyslug="administration"]', "BELONG")
self.set_text_content('a[prettyslug="tutorials"]', "TO US")
self.highlight("h1", loops=4, scroll=False)
self.highlight("div#global_menu", loops=2, scroll=False)
self.highlight('a[prettyslug*="g-started"]', loops=1, scroll=False)
self.highlight('a[prettyslug="using-slack"]', loops=1, scroll=False)
self.highlight('a[prettyslug="your-profile"]', loops=2, scroll=False)
self.highlight('a[prettyslug="connect-tools"]', loops=1, scroll=False)
self.highlight('a[prettyslug="administration"]', loops=1, scroll=False)
self.highlight('a[prettyslug="tutorials"]', loops=2, scroll=False)
self.open("https://kubernetes.io/")
self.set_text_content('nav a[href="/docs/"]', "ALL")
self.set_text_content('nav a[href="/blog/"]', "YOUR")
self.set_text_content('nav a[href="/training/"]', "BASE")
self.set_text_content('nav a[href="/partners/"]', "ARE")
self.set_text_content('nav a[href="/community/"]', "BELONG")
self.set_text_content('nav a[href="/case-studies/"]', "TO")
self.set_text_content('nav #navbarDropdown', "US")
self.set_text_content('nav #navbarDropdownMenuLink', ".")
if self.is_element_visible("h1"):
self.set_text_content("h1", aybabtu)
self.highlight("nav ul.navbar-nav", loops=3, scroll=False)
self.highlight('nav a[href="/docs/"]', loops=1, scroll=False)
self.highlight('nav a[href="/blog/"]', loops=1, scroll=False)
self.highlight('nav a[href="/training/"]', loops=2, scroll=False)
self.highlight('nav a[href="/partners/"]', loops=1, scroll=False)
self.highlight('nav a[href="/community/"]', loops=1, scroll=False)
self.highlight('nav a[href="/case-studies/"]', loops=1, scroll=False)
self.highlight('nav #navbarDropdown', loops=2, scroll=False)
if self.is_element_visible("h1"):
self.highlight('h1', loops=6, scroll=False)
self.open("https://www.selenium.dev/")
self.set_attributes("a.dropdown-toggle", "class", "nav-link")
self.set_text_content('li a:contains("About")', "ALL")
self.set_text_content('li a:contains("Downloads")', "YOUR")
self.set_text_content('li a:contains("Documentation")', "BASE")
self.set_text_content('li a:contains("Projects")', "ARE")
self.set_text_content('li a:contains("Support")', "BELONG")
self.set_text_content('li a:contains("Blog")', "TO")
self.set_text_content('li a:contains("English")', "US")
self.set_text_content("div.lead", aybabtu)
self.set_text_content("h2", aybabtu)
zoom_in = 'div.lead{zoom: 1.25;-moz-transform: scale(1.25);}'
self.add_css_style(zoom_in)
self.highlight("div#main_navbar", loops=1, scroll=False)
self.highlight('li a:contains("ALL")', loops=1, scroll=False)
self.highlight('li a:contains("YOUR")', loops=1, scroll=False)
self.highlight('li a:contains("BASE")', loops=2, scroll=False)
self.highlight('li a:contains("ARE")', loops=1, scroll=False)
self.highlight('li a:contains("BELONG")', loops=1, scroll=False)
self.highlight('li a:contains("TO")', loops=1, scroll=False)
self.highlight('li a:contains("US")', loops=2, scroll=False)
self.highlight("div.lead", loops=6, scroll=False)
self.highlight("h2", loops=8, scroll=False)
self.open("https://www.python.org/")
self.set_text_content('a[class="donate-button"]', ayb)
self.set_text_content("#about a", "ALL")
self.set_text_content("#downloads a", "YOUR")
self.set_text_content("#documentation a", "BASE")
self.set_text_content("#community a", "ARE")
self.set_text_content("#success-stories a", "BELONG")
self.set_text_content("#news a", "TO")
self.set_text_content("#events a", "US")
self.highlight('a[class="donate-button"]', loops=4, scroll=False)
self.highlight("nav#mainnav", loops=5, scroll=False)
self.highlight("#about a", loops=1, scroll=False)
self.highlight("#downloads a", loops=1, scroll=False)
self.highlight("#documentation a", loops=2, scroll=False)
self.highlight("#community a", loops=1, scroll=False)
self.highlight("#success-stories a", loops=1, scroll=False)
self.highlight("#news a", loops=1, scroll=False)
self.highlight("#events a", loops=2, scroll=False)
self.open("https://docs.pytest.org/")
self.set_text_content("h1", "pytest: " + aybabtu)
self.highlight("h1", loops=10, scroll=False)
self.open("https://wordpress.com/")
self.set_text_content('a[title="Plans & Pricing"]', aybabtu)
self.set_text_content('a[title="Get Started"]', ayb)
self.set_text_content("p.no-widows", aybabtu)
self.set_text_content("a#lpc-button", "Automate with SeleniumBase")
self.highlight('a[title="Plans & Pricing"]', loops=6, scroll=False)
self.highlight('a[title="Get Started"]', loops=4, scroll=False)
self.highlight("p.no-widows", loops=8, scroll=False)
self.highlight("a#lpc-button", loops=4, scroll=False)
self.open("https://seleniumbase.com/")
self.set_text_content("h1", aybabtu)
self.highlight("h1", loops=10, scroll=False)
self.open("https://pypi.org/")
self.set_text_content('a[href="/sponsors/"]', aybabtu)
self.set_text_content("h1", aybabtu)
self.set_value("input#search", aybabtu, scroll=False)
self.highlight('a[href="/sponsors/"]', loops=6, scroll=False)
self.highlight("h1", loops=6, scroll=False)
self.highlight("input#search", loops=8, scroll=False)
self.open("https://www.atlassian.com/software/jira")
self.set_text_content('a[href*="jira/pricing"]', ayb)
self.set_text_content('a[href*="jira/enterprise"]', abtu)
self.set_text_content('a[href="/software/jira/features"]', "")
self.set_text_content('a[href="/software/jira/guides"]', "")
self.set_text_content("h1", ayb)
self.set_text_content('div.xs-none-bottom a[href*="free"]', abtu)
self.highlight("ul.imkt-navbar__link-list", loops=2, scroll=False)
self.highlight('a[href*="jira/pricing"]', loops=3, scroll=False)
self.highlight('a[href*="jira/enterprise"]', loops=3, scroll=False)
self.highlight("h1", loops=3, scroll=False)
self.highlight('div.xs-none-bottom a[href*="free"]', scroll=False)
self.open("https://status.iboss.com/ibcloud/app/cloudStatus.html")
self.set_text_content('div[translate*="cloudStatus"]', ayb)
self.set_text_content('div[translate*="maintenance"]', "ARE")
self.set_text_content('div[translate*="advisory"]', "BELONG")
self.set_text_content('div[translate*="incident"]', "TO US")
self.set_text_content("h1", "Cloud Status - " + aybabtu)
self.highlight("nav div.ibcloud-header-contents", loops=3)
self.highlight('div[translate*="cloudStatus"]', loops=4)
self.highlight('div[translate*="maintenance"]', loops=1)
self.highlight('div[translate*="advisory"]', loops=1)
self.highlight('div[translate*="incident"]', loops=3)
self.highlight("h1", loops=9, scroll=False)
self.open("https://git-scm.com/")
self.set_text_content("span#tagline", aybabtu)
self.set_text_content("#nav-about h3", ayb)
self.set_text_content("#nav-documentation h3", abtu)
self.highlight("span#tagline", loops=8, scroll=False)
self.highlight("#nav-about h3", loops=5, scroll=False)
self.highlight("#nav-documentation h3", loops=6, scroll=False)
self.open("https://teamtreehouse.com/")
self.set_text_content("li.nav-item-free-trial", aybabtu)
self.set_text_content("h1", aybabtu)
self.set_text_content("h2", aybabtu)
self.set_text_content("p.homepage-signup-form-banner", aybabtu)
self.highlight("li.nav-item-free-trial", loops=6, scroll=False)
self.highlight("h1", loops=6, scroll=False)
self.highlight('p[class*="signup-form"]', loops=8, scroll=False)
self.open("https://pragprog.com/")
self.set_text_content("header p", aybabtu)
zoom_in = 'header p{zoom: 1.35;-moz-transform: scale(1.35);}'
self.add_css_style(zoom_in)
self.highlight("header p", loops=10, scroll=False)
self.open("https://seleniumbase.io/")
self.set_text_content("h1", aybabtu)
self.highlight("h1", loops=10, scroll=False)
| 57.1477 | 79 | 0.631726 | [
"MIT"
] | GoVanguard/SeleniumBase | examples/hack_the_planet.py | 23,602 | Python |
from os.path import join as pjoin
from scrapy.spiders import (
Rule,
CrawlSpider,
)
from scrapy import exceptions
from scrapy.linkextractors import LinkExtractor
from django.conf import settings
from django.core.cache import caches
import tldextract
from core.extractors import ck0tp
from crawler import items
lockin = caches['lock_in_task']
EOAIENT = settings.ENDPOINTS['ck0tp']
ENDPOINT = EOAIENT['ENDPOINT']
ENDPATH = EOAIENT['ENDPATH']
DIRECTIVE = EOAIENT['DIRECTIVE']
DIRECTIVES = EOAIENT['DIRECTIVES']
class Ck0tp(CrawlSpider):
name = 'ck0tp'
allowed_domains = [
tldextract.extract(ENDPOINT).registered_domain
]
start_urls = [
pjoin(ENDPOINT, DIRECTIVE),
ENDPOINT,
] + [
pjoin(ENDPOINT, d)
for d in DIRECTIVES
]
rules = (
Rule(
LinkExtractor(allow=(r'{}/\d+/?$'.format(ENDPATH), )),
callback='parse_video', follow=True
),
)
def __init__(self, *args, **kwargs):
super(Ck0tp, self).__init__(*args, **kwargs)
# unduplicate lock
if not lockin.add(self.__class__.__name__, 'true', 60 * 60 * 24 * 5):
raise exceptions.CloseSpider('already launched spider')
def closed(self, *args, **kwargs):
lockin.delete(self.__class__.__name__)
def parse_video(self, response):
vid = ck0tp.Video(response.url)
return items.Entry(vid.info())
| 22.777778 | 77 | 0.64878 | [
"MIT"
] | ikeikeikeike/scrape-django-app | scrape/crawler/crawler/spiders/ck0tp.py | 1,435 | Python |
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import KFold
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
# import data and preprocess it
def preprocessing(file_name: str):
# data import
fish_df = pd.read_csv(file_name)
fish_df = pd.get_dummies(fish_df, columns=['Species'], prefix='Species')
return fish_df
# train-test split by a percentage.
# input: dataframe, label column name, split ration, and random state
# returns: x_train, x_test, y_train, y_test
def split_df(user_df: pd.DataFrame, label_name: str, split_ratio=0.8, random_value=42):
x_train = user_df.sample(frac=split_ratio, random_state=random_value)
x_test = user_df.drop(x_train.index)
return x_train.drop(label_name, axis=1), x_test.drop(label_name, axis=1), pd.DataFrame(
x_train[label_name]), pd.DataFrame(x_test[label_name])
# Create as arrays of trees in a given size and depth
def create_random_forest(forest_size: int, max_depth: int, random_state_local: int):
random_forest = []
for i in range(0, forest_size, 1):
random_forest.append(DecisionTreeRegressor(criterion='friedman_mse', max_depth=max_depth,
random_state=random_state_local))
return random_forest
# train trees in a forest by fitting each tree to the previous tree's error
# input: forest of trees, initial training guess, x and y databases, alpha coefficient.
# returns: trained forest, initial average value, r_matrix of solutions and mse_list of the results (mean square error)
def train_forest(random_forest: list, initial_average_weight: float, x_df: pd.DataFrame, y_df: pd.DataFrame,
alpha: float = 0.1):
# initial average weight and residuals to be used in the 1st tree
predictions = np.ones(len(y_df))*initial_average_weight
residuals = np.array(y_df['Weight'])-predictions
residuals_matrix = [residuals]
# calculates the first mse value
mse_list = [(np.square(residuals)).sum()/len(predictions)]
for tree in random_forest:
# train the current stump
tree.fit(x_df, residuals)
# predict results based on its training error
residuals = tree.predict(x_df)
# record residuals and calculate mse
residuals_matrix.append(residuals)
mse_list.append((np.square(residuals)).sum()/len(predictions))
# update predictions and calculate new residuals
predictions = predictions + alpha * residuals
residuals = np.array(y_df['Weight']) - predictions
return random_forest, predictions, residuals_matrix, mse_list
# predict test database by the trained random forest
# input: forest of trees, initial training guess, x and y databases.
# returns: mse_list of the forest (mean square error)
def test_forest(random_forest: list, initial_average_weight: float, x_df: pd.DataFrame, y_df: pd.DataFrame,
alpha: float = 0.1):
predictions = np.ones(len(y_df))*initial_average_weight
mse_list = [(np.square(np.array(y_df['Weight']) - predictions)).sum()/len(predictions)]
for tree in random_forest:
predictions = predictions + alpha * tree.predict(x_df)
mse_list.append((np.square(np.array(y_df['Weight']) - predictions)).sum()//len(predictions))
return predictions, mse_list
def main():
# data import and preprocessing
fish_df = preprocessing("Fish.csv")
# splitting of the data
x_train, x_test, y_train, y_test = split_df(fish_df, 'Weight', 0.8, 42)
# setting up a random forest:
#forest_size_list = [4, 5, 6, 7, 8] # variable calibrated by KFold train-validate
forest_size = 20
# max_depth_list = [1, 2, 3, 4, 5] # variable calibrated by KFold train-validate
max_depth = 3
random_state_local = 42
random_forest = create_random_forest(forest_size, max_depth, random_state_local)
#%% Train
#alpha_list = [0.1, 0.3, 0.5, 0.7, 0.9] # variable calibrated by KFold train-validate
alpha = 0.5 # gradiant coefficient
kf = KFold(n_splits=2, shuffle=True, random_state=42)
for train_index, test_index in kf.split(x_train, y_train):
X_train, X_validate = x_train.iloc[train_index], x_train.iloc[test_index]
Y_train, Y_validate = y_train.iloc[train_index], y_train.iloc[test_index]
# first guess
initial_average_weight = np.average(Y_train['Weight'].tolist())
# train forest
random_forest, predictions_train, r_matrix, mse_list_train = train_forest(random_forest, initial_average_weight,
X_train, Y_train, alpha)
# validate
predictions_validate, mse_list_validate = test_forest(random_forest, initial_average_weight, X_validate,
Y_validate, alpha)
results = pd.DataFrame(data=np.arange(0, forest_size+1, 1), columns=['tree_intervals'])
results['Train'] = mse_list_train
results['Validation'] = mse_list_validate
fig = px.scatter(results, x='tree_intervals', y=['Train', 'Validation'], size='tree_intervals')
fig.update_layout(xaxis_title="Amount of Intervals (num.)", yaxis_title="mean square error")
fig.show()
#%% Test
predictions_test, mse_list_test = test_forest(random_forest, initial_average_weight, x_test, y_test, alpha)
# %% plot success rate vs tree intervals
fig = make_subplots(rows=1, cols=3, subplot_titles=('Train', 'Validation', 'Test'),
x_title='Amount of Intervals (num.)', y_title='mean square error')
results = pd.DataFrame(data=np.arange(0, forest_size+1, 1), columns=['tree_intervals'])
results['Train'] = mse_list_train
fig.add_trace(go.Scatter(x=results['tree_intervals'], y=results['Train'], name='Train'), row=1, col=1)
results = pd.DataFrame(data=np.arange(0, forest_size + 1, 1), columns=['tree_intervals'])
results['Validation'] = mse_list_validate
fig.add_trace(go.Scatter(x=results['tree_intervals'], y=results['Validation'], name='Validation'), row=1, col=2)
results = pd.DataFrame(data=np.arange(0, forest_size + 1, 1), columns=['tree_intervals'])
results['Test'] = mse_list_test
fig.add_trace(go.Scatter(x=results['tree_intervals'], y=results['Test'], name='Test'), row=1, col=3)
fig.update_layout(title_text="Random Forest Gradient Boosting")
fig.show()
if __name__ == '__main__':
main()
| 41.509202 | 121 | 0.671593 | [
"Apache-2.0"
] | ofir-frd/Machine-Learning-Bootcamp | gradient-boosting/main.py | 6,766 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http:# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
DJANGO_APPS = [ "kafka" ]
REQUIRES_HADOOP = False
MENU_INDEX = 100
NICE_NAME = "Kafka"
ICON = "kafka/art/icon_kafka_24.png"
IS_URL_NAMESPACED = True
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
METRICS_INI = os.path.join(PROJECT_ROOT, 'metrics.ini')
| 37.75 | 74 | 0.772942 | [
"Apache-2.0"
] | Code-distancing/kafka-hue | kafka/src/kafka/settings.py | 1,057 | Python |
# -*- coding: utf-8 -*-
SUCCESSFUL_TERMINAL_STATUSES = ('complete', )
UNSUCCESSFUL_TERMINAL_STATUSES = ('cancelled', 'unsuccessful')
CONTRACT_REQUIRED_FIELDS = [
'awardID', 'contractID', 'items', 'suppliers',
'value', 'dateSigned',
#'documents'
]
CONTRACT_NOT_REQUIRED_FIELDS = [
'contractNumber', 'title', 'title_en', 'title_ru',
'description', 'description_en', 'description_ru'
]
| 31 | 62 | 0.689826 | [
"Apache-2.0"
] | Scandie/openregistry.convoy | openregistry/convoy/loki/constants.py | 403 | Python |
"""
Plugin for Czech TV (Ceska televize).
Following channels are working:
* CT1 - https://www.ceskatelevize.cz/porady/ct1/
* CT2 - https://www.ceskatelevize.cz/porady/ct2/
* CT24 - https://ct24.ceskatelevize.cz/#live
* CT sport - https://www.ceskatelevize.cz/sport/zive-vysilani/
* CT Decko - https://decko.ceskatelevize.cz/zive
* CT Art - https://www.ceskatelevize.cz/porady/art/
Additionally, videos from iVysilani archive should work as well.
"""
import json
import logging
import re
from html import unescape as html_unescape
from urllib.parse import quote
from streamlink.plugin import Plugin, PluginError, pluginmatcher
from streamlink.plugin.api import useragents, validate
from streamlink.stream import DASHStream, HLSStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r'https?://([\w-]+\.)*ceskatelevize\.cz'
))
class Ceskatelevize(Plugin):
ajax_url = 'https://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist'
_player_re = re.compile(
r'ivysilani/embed/iFramePlayer[^"]+'
)
_hash_re = re.compile(
r'hash:"([0-9a-z]+)"'
)
_playlist_info_re = re.compile(
r'{"type":"([a-z]+)","id":"([0-9]+)"'
)
_playlist_url_schema = validate.Schema({
validate.optional("streamingProtocol"): validate.text,
"url": validate.any(
validate.url(),
"Error",
"error_region"
)
})
_playlist_schema = validate.Schema({
"playlist": [{
validate.optional("type"): validate.text,
"streamUrls": {
"main": validate.url(),
}
}]
})
def _get_streams(self):
self.session.http.headers.update({'User-Agent': useragents.IPAD})
self.session.http.verify = False
log.warning('SSL certificate verification is disabled.')
# fetch requested url and find playlist info
response = self.session.http.get(self.url)
info = self._find_playlist_info(response)
if not info:
# do next try with new API
def _fallback_api(*args, **kwargs):
self.api2 = CeskatelevizeAPI2(self.session, self.url, *args, **kwargs)
return self.api2._get_streams()
# playlist info not found, let's try to find player url
player_url = self._find_player_url(response)
if not player_url:
log.debug('Cannot find playlist info or player url, do next try with new API')
return _fallback_api(res=response)
# get player url and try to find playlist info in it
response = self.session.http.get(player_url)
info = self._find_playlist_info(response)
if not info:
log.debug('Cannot find playlist info in the player url, do next try with new API')
return _fallback_api()
log.trace('{0!r}'.format(info))
data = {
'playlist[0][type]': info['type'],
'playlist[0][id]': info['id'],
'requestUrl': '/ivysilani/embed/iFramePlayer.php',
'requestSource': 'iVysilani',
'type': 'html'
}
headers = {
'x-addr': '127.0.0.1',
}
# fetch playlist url
response = self.session.http.post(
self.ajax_url,
data=data,
headers=headers
)
json_data = self.session.http.json(response, schema=self._playlist_url_schema)
log.trace('{0!r}'.format(json_data))
if json_data['url'] in ['Error', 'error_region']:
log.error('This stream is not available')
return
# fetch playlist
response = self.session.http.post(json_data['url'])
json_data = self.session.http.json(response, schema=self._playlist_schema)
log.trace('{0!r}'.format(json_data))
playlist = json_data['playlist'][0]['streamUrls']['main']
return HLSStream.parse_variant_playlist(self.session, playlist)
@classmethod
def _find_playlist_info(cls, response):
"""
Finds playlist info (type, id) in HTTP response.
:param response: Response object.
:returns: Dictionary with type and id.
"""
values = {}
matches = cls._playlist_info_re.search(response.text)
if matches:
values['type'] = matches.group(1)
values['id'] = matches.group(2)
return values
@classmethod
def _find_player_url(cls, response):
"""
Finds embedded player url in HTTP response.
:param response: Response object.
:returns: Player url (str).
"""
url = ''
matches = cls._player_re.search(response.text)
if matches:
tmp_url = matches.group(0).replace('&', '&')
if 'hash' not in tmp_url:
# there's no hash in the URL, try to find it
matches = cls._hash_re.search(response.text)
if matches:
url = tmp_url + '&hash=' + matches.group(1)
else:
url = tmp_url
return 'http://ceskatelevize.cz/' + url
class CeskatelevizeAPI2:
_player_api = 'https://playlist.ceskatelevize.cz/'
_url_re = re.compile(r'http(s)?://([^.]*.)?ceskatelevize.cz')
_playlist_info_re = re.compile(r'{\s*"type":\s*"([a-z]+)",\s*"id":\s*"(\w+)"')
_playlist_schema = validate.Schema({
"CODE": validate.contains("OK"),
"RESULT": {
"playlist": [{
"streamUrls": {
"main": validate.url(),
}
}]
}
})
_ctcomp_re = re.compile(r'data-ctcomp="Video"\sdata-video-id="(?P<val1>[^"]*)"\sdata-ctcomp-data="(?P<val2>[^"]+)">')
_ctcomp_schema = validate.Schema(
validate.text,
validate.transform(_ctcomp_re.findall),
validate.transform(lambda vl: [{"video-id": v[0], "ctcomp-data": json.loads(html_unescape(v[1]))} for v in vl])
)
_playlist_info_schema = validate.Schema({
"type": validate.text,
"id": validate.any(validate.text, int),
"key": validate.text,
"date": validate.text,
"requestSource": validate.text,
"drm": int,
validate.optional("canBePlay"): int,
validate.optional("assetId"): validate.text,
"quality": validate.text,
validate.optional("region"): int
})
def __init__(self, session, url, res=None):
self.session = session
self.url = url
self.response = res
def _get_streams(self):
if self.response is None:
infos = self.session.http.get(self.url, schema=self._ctcomp_schema)
else:
infos = self.session.http.json(self.response, schema=self._ctcomp_schema)
if not infos:
# playlist infos not found
raise PluginError('Cannot find playlist infos!')
vod_prio = len(infos) == 2
for info in infos:
try:
pl = info['ctcomp-data']['source']['playlist'][0]
except KeyError:
raise PluginError('Cannot find playlist info!')
pl = self._playlist_info_schema.validate(pl)
if vod_prio and pl['type'] != 'VOD':
continue
log.trace('{0!r}'.format(info))
if pl['type'] == 'LIVE':
data = {
"contentType": "live",
"items": [{
"id": pl["id"],
"assetId": pl["assetId"],
"key": pl["key"],
"playerType": "dash",
"date": pl["date"],
"requestSource": pl["requestSource"],
"drm": pl["drm"],
"quality": pl["quality"],
}]
}
elif pl['type'] == 'VOD':
data = {
"contentType": "vod",
"items": [{
"id": pl["id"],
"key": pl["key"],
"playerType": "dash",
"date": pl["date"],
"requestSource": pl["requestSource"],
"drm": pl["drm"],
"canBePlay": pl["canBePlay"],
"quality": pl["quality"],
"region": pl["region"]
}]
}
headers = {
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
}
data = json.dumps(data)
response = self.session.http.post(
self._player_api,
data="data={}".format(quote(data)),
headers=headers
)
json_data = self.session.http.json(response, schema=self._playlist_schema)
log.trace('{0!r}'.format(json_data))
playlist = json_data['RESULT']['playlist'][0]['streamUrls']['main']
yield from DASHStream.parse_manifest(self.session, playlist).items()
__plugin__ = Ceskatelevize
| 34.8327 | 121 | 0.539024 | [
"BSD-2-Clause"
] | Erk-/streamlink | src/streamlink/plugins/ceskatelevize.py | 9,161 | Python |
from __future__ import absolute_import
from sentry.testutils.cases import RuleTestCase
from sentry.rules.conditions.event_attribute import (EventAttributeCondition, MatchType)
class EventAttributeConditionTest(RuleTestCase):
rule_cls = EventAttributeCondition
def get_event(self):
event = self.create_event(
message='hello world',
platform='php',
data={
'type': 'error',
'sentry.interfaces.Http': {
'method': 'GET',
'url': 'http://example.com',
},
'sentry.interfaces.User': {
'id': '1',
'ip_address': '127.0.0.1',
'email': 'foo@example.com',
'username': 'foo',
},
'sentry.interfaces.Exception': {
'values': [
{
'type': 'SyntaxError',
'value': 'hello world',
'stacktrace': {
'frames': [
{
'filename': 'example.php',
'module': 'example',
'context_line': 'echo "hello";',
}
]
}
},
],
},
'tags': [('environment', 'production')],
'extra': {
'foo': {
'bar': 'baz',
},
'biz': ['baz'],
'bar': 'foo',
}
},
)
return event
def test_render_label(self):
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': u'\xc3',
'value': u'\xc4',
})
assert rule.render_label() == u'An event\'s \xc3 value equals \xc4'
def test_equals(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'platform',
'value': 'php',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'platform',
'value': 'python',
})
self.assertDoesNotPass(rule, event)
def test_does_not_equal(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.NOT_EQUAL,
'attribute': 'platform',
'value': 'php',
})
self.assertDoesNotPass(rule, event)
rule = self.get_rule(data={
'match': MatchType.NOT_EQUAL,
'attribute': 'platform',
'value': 'python',
})
self.assertPasses(rule, event)
def test_starts_with(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.STARTS_WITH,
'attribute': 'platform',
'value': 'ph',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.STARTS_WITH,
'attribute': 'platform',
'value': 'py',
})
self.assertDoesNotPass(rule, event)
def test_ends_with(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.ENDS_WITH,
'attribute': 'platform',
'value': 'hp',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.ENDS_WITH,
'attribute': 'platform',
'value': 'thon',
})
self.assertDoesNotPass(rule, event)
def test_contains(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.CONTAINS,
'attribute': 'platform',
'value': 'p',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.CONTAINS,
'attribute': 'platform',
'value': 'z',
})
self.assertDoesNotPass(rule, event)
def test_does_not_contain(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.NOT_CONTAINS,
'attribute': 'platform',
'value': 'p',
})
self.assertDoesNotPass(rule, event)
rule = self.get_rule(data={
'match': MatchType.NOT_CONTAINS,
'attribute': 'platform',
'value': 'z',
})
self.assertPasses(rule, event)
def test_message(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'message',
'value': 'hello world',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'message',
'value': 'php',
})
self.assertDoesNotPass(rule, event)
def test_environment(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'environment',
'value': 'production',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'environment',
'value': 'staging',
})
self.assertDoesNotPass(rule, event)
def test_http_method(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'http.method',
'value': 'get',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'http.method',
'value': 'post',
})
self.assertDoesNotPass(rule, event)
def test_http_url(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'http.url',
'value': 'http://example.com',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'http.url',
'value': 'http://foo.com',
})
self.assertDoesNotPass(rule, event)
def test_user_id(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'user.id',
'value': '1',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'user.id',
'value': '2',
})
self.assertDoesNotPass(rule, event)
def test_user_ip_address(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'user.ip_address',
'value': '127.0.0.1',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'user.ip_address',
'value': '2',
})
self.assertDoesNotPass(rule, event)
def test_user_email(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'user.email',
'value': 'foo@example.com',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'user.email',
'value': '2',
})
self.assertDoesNotPass(rule, event)
def test_user_username(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'user.username',
'value': 'foo',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'user.username',
'value': '2',
})
self.assertDoesNotPass(rule, event)
def test_exception_type(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'exception.type',
'value': 'SyntaxError',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'exception.type',
'value': 'TypeError',
})
self.assertDoesNotPass(rule, event)
def test_exception_value(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'exception.value',
'value': 'hello world',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'exception.value',
'value': 'foo bar',
})
self.assertDoesNotPass(rule, event)
def test_stacktrace_filename(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'stacktrace.filename',
'value': 'example.php',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'stacktrace.filename',
'value': 'foo.php',
})
self.assertDoesNotPass(rule, event)
def test_stacktrace_module(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'stacktrace.module',
'value': 'example',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'stacktrace.module',
'value': 'foo',
})
self.assertDoesNotPass(rule, event)
def test_stacktrace_code(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'stacktrace.code',
'value': 'echo "hello";',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'stacktrace.code',
'value': 'foo',
})
self.assertDoesNotPass(rule, event)
def test_extra_simple_value(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'extra.bar',
'value': 'foo',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'extra.bar',
'value': 'bar',
})
self.assertDoesNotPass(rule, event)
def test_extra_nested_value(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'extra.foo.bar',
'value': 'baz',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'extra.foo.bar',
'value': 'bar',
})
self.assertDoesNotPass(rule, event)
def test_extra_nested_list(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'extra.biz',
'value': 'baz',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'extra.biz',
'value': 'bar',
})
self.assertDoesNotPass(rule, event)
def test_event_type(self):
event = self.get_event()
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'type',
'value': 'error',
})
self.assertPasses(rule, event)
rule = self.get_rule(data={
'match': MatchType.EQUAL,
'attribute': 'type',
'value': 'csp',
})
self.assertDoesNotPass(rule, event)
| 29.662791 | 88 | 0.482634 | [
"BSD-3-Clause"
] | AlexWayfer/sentry | tests/sentry/rules/conditions/test_event_attribute.py | 12,755 | Python |
import re
class HeadersFormat(object):
@staticmethod
def call(header):
return HeadersFormat.format(re.sub(r'^HTTP(?:_|-)', '', header, flags=re.I))
@staticmethod
def format(header):
return '-'.join([v.capitalize() for v in re.split(r'_|-', header)])
| 23.75 | 84 | 0.617544 | [
"MIT"
] | castle/castle-python | castle/headers/format.py | 285 | Python |
a = input()
b = input()
something = a > b
if something:
print(a)
c = input()
<caret> | 12.285714 | 17 | 0.593023 | [
"Apache-2.0"
] | 06needhamt/intellij-community | python/testData/codeInsight/mlcompletion/isAfterIfWithoutElseAfterSameLevelLine.py | 86 | Python |
#basic example of dict synat
my_dict = {'key1':'value1','key2':'value2','key3':'value3'}
print(my_dict)
print(my_dict['key3'])
#xmpl 2
prices = {'apple':100,'banana':60,'gavava':90,'rice':50}
print(prices['rice'])
| 23.888889 | 59 | 0.669767 | [
"MIT"
] | alok-techqware/basic_python_practicse | python_basics/Dictionary/dict.py | 215 | Python |
import numpy as np
from time import sleep
import struct
import matplotlib.pyplot as plt
# input raw samples from MCU
# in_data = 'out/data_raw.txt'
in_data = 'out/8bit.txt'
fs = 5000
in_bits = 8
# load file
raw = np.loadtxt(in_data)
# Stats
print("Max=%d Min=%d Mean=%d swing=%d %.1fbits" % \
(np.max(raw), np.min(raw), np.mean(raw),
np.max(raw) - np.min(raw), np.log2(np.max(raw) - np.min(raw))))
# generate different bit audio
data_depth = {}
print(raw)
data_depth['16bit'] = 2**(in_bits-16)*(raw / (2**(in_bits-16))).astype('int')
print(data_depth['16bit'])
data_depth['10bit'] = 2**(in_bits-10)*(raw / (2**(in_bits-10))).astype('int')
data_depth['8bit'] = 2**(in_bits-8)*(raw / (2**(in_bits-8))).astype('int')
data_depth['7bit'] = 2**(in_bits-7)*(raw / (2**(in_bits-7))).astype('int')
data_depth['6bit'] = 2**(in_bits-6)*(raw / (2**(in_bits-6))).astype('int')
data_depth['2bit'] = 2**(in_bits-2)*(raw / (2**(in_bits-2))).astype('int')
# normalize and zero mean all
for key in data_depth:
data_depth[key] = data_depth[key] - np.mean(data_depth[key])
data_depth[key] = data_depth[key] / np.max(np.abs(data_depth[key]))
# write audio files
from scipy.io.wavfile import write
for key in data_depth:
write('out/test'+key+'.wav', fs, data_depth[key])
# plot some
t = np.arange(0, len(raw)/fs, 1/fs)
fig, axs = plt.subplots(1, 1)
axs.step(t, data_depth['16bit'], label='16bit')
axs.step(t, data_depth['8bit'], label='8bit')
axs.step(t, data_depth['7bit'], label='7bit')
axs.step(t, data_depth['6bit'], label='6bit')
axs.step(t, data_depth['2bit'], label='2bit')
# axs.set_xlim(0, 6e-3)
# axs.set_ylim(-1, 1)
axs.set_xlabel('time [s]')
axs.set_ylabel('mic data')
axs.grid(True)
axs.legend()
fig.tight_layout()
plt.show()
| 28.080645 | 77 | 0.659391 | [
"Apache-2.0"
] | noah95/edison | audio/edison/audio/bit_depth_analyze.py | 1,741 | Python |
#!/home/pi/Documents/Codigos/API_Estacao/bin/python3
"""Simple FTDI EEPROM configurator.
"""
# Copyright (c) 2019-2020, Emmanuel Blot <emmanuel.blot@free.fr>
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from argparse import ArgumentParser, FileType
from io import StringIO
from logging import Formatter, StreamHandler, DEBUG, ERROR
from sys import modules, stderr
from textwrap import fill
from traceback import format_exc
from pyftdi import FtdiLogger
from pyftdi.eeprom import FtdiEeprom
from pyftdi.ftdi import Ftdi
from pyftdi.misc import add_custom_devices, hexdump
#pylint: disable-msg=too-many-locals
#pylint: disable-msg=too-many-branches
#pylint: disable-msg=too-many-statements
def main():
"""Main routine"""
debug = False
try:
argparser = ArgumentParser(description=modules[__name__].__doc__)
argparser.add_argument('device', nargs='?', default='ftdi:///?',
help='serial port device name')
argparser.add_argument('-x', '--hexdump', action='store_true',
help='dump EEPROM content as ASCII')
argparser.add_argument('-X', '--hexblock', type=int,
help='dump EEPROM as indented hexa blocks')
argparser.add_argument('-i', '--input', type=FileType('rt'),
help='input ini file to load EEPROM content')
argparser.add_argument('-l', '--load', default='all',
choices=('all', 'raw', 'values'),
help='section(s) to load from input file')
argparser.add_argument('-o', '--output', type=FileType('wt'),
help='output ini file to save EEPROM content')
argparser.add_argument('-s', '--serial-number',
help='set serial number')
argparser.add_argument('-m', '--manufacturer',
help='set manufacturer name')
argparser.add_argument('-p', '--product',
help='set product name')
argparser.add_argument('-c', '--config', action='append',
help='change/configure a property '
'as key=value pair')
argparser.add_argument('-e', '--erase', action='store_true',
help='erase the whole EEPROM content')
argparser.add_argument('-u', '--update', action='store_true',
help='perform actual update, use w/ care')
argparser.add_argument('-P', '--vidpid', action='append',
help='specify a custom VID:PID device ID, '
'may be repeated')
argparser.add_argument('-V', '--virtual', type=FileType('r'),
help='use a virtual device, specified as YaML')
argparser.add_argument('-v', '--verbose', action='count', default=0,
help='increase verbosity')
argparser.add_argument('-d', '--debug', action='store_true',
help='enable debug mode')
args = argparser.parse_args()
debug = args.debug
if not args.device:
argparser.error('Serial device not specified')
loglevel = max(DEBUG, ERROR - (10 * args.verbose))
loglevel = min(ERROR, loglevel)
if debug:
formatter = Formatter('%(asctime)s.%(msecs)03d %(name)-20s '
'%(message)s', '%H:%M:%S')
else:
formatter = Formatter('%(message)s')
FtdiLogger.set_formatter(formatter)
FtdiLogger.set_level(loglevel)
FtdiLogger.log.addHandler(StreamHandler(stderr))
if args.virtual:
#pylint: disable-msg=import-outside-toplevel
from pyftdi.usbtools import UsbTools
# Force PyUSB to use PyFtdi test framework for USB backends
UsbTools.BACKENDS = ('pyftdi.tests.backend.usbvirt', )
# Ensure the virtual backend can be found and is loaded
backend = UsbTools.find_backend()
loader = backend.create_loader()()
loader.load(args.virtual)
try:
add_custom_devices(Ftdi, args.vidpid, force_hex=True)
except ValueError as exc:
argparser.error(str(exc))
eeprom = FtdiEeprom()
eeprom.open(args.device)
if args.erase:
eeprom.erase()
if args.input:
eeprom.load_config(args.input, args.load)
if args.serial_number:
eeprom.set_serial_number(args.serial_number)
if args.manufacturer:
eeprom.set_manufacturer_name(args.manufacturer)
if args.product:
eeprom.set_product_name(args.product)
for conf in args.config or []:
if conf == '?':
helpstr = ', '.join(sorted(eeprom.properties))
print(fill(helpstr, initial_indent=' ',
subsequent_indent=' '))
exit(1)
for sep in ':=':
if sep in conf:
name, value = conf.split(sep, 1)
if not value:
argparser.error('Configuration %s without value' %
conf)
helpio = StringIO()
eeprom.set_property(name, value, helpio)
helpstr = helpio.getvalue()
if helpstr:
print(fill(helpstr, initial_indent=' ',
subsequent_indent=' '))
exit(1)
break
else:
argparser.error('Missing name:value separator in %s' % conf)
if args.hexdump:
print(hexdump(eeprom.data))
if args.hexblock is not None:
indent = ' ' * args.hexblock
for pos in range(0, len(eeprom.data), 16):
hexa = ' '.join(['%02x' % x for x in eeprom.data[pos:pos+16]])
print(indent, hexa, sep='')
if args.update:
if eeprom.commit(False):
eeprom.reset_device()
if args.verbose > 0:
eeprom.dump_config()
if args.output:
eeprom.save_config(args.output)
except (ImportError, IOError, NotImplementedError, ValueError) as exc:
print('\nError: %s' % exc, file=stderr)
if debug:
print(format_exc(chain=False), file=stderr)
exit(1)
except KeyboardInterrupt:
exit(2)
if __name__ == '__main__':
main()
| 42.050314 | 78 | 0.542327 | [
"Apache-2.0"
] | andrario/API_Estacao | bin/ftconf.py | 6,686 | Python |
#!/usr/bin/env python3
######################################################
## Calibrating the extrinsics between T265 and D4xx ##
## Based on this example: https://github.com/IntelRealSense/librealsense/pull/4355
## with changes and modifications.
######################################################
######################################################
#
# General steps:
# 1. Mount the two cameras rigidly
# 2. Print any one of the checkerboards from: https://markhedleyjones.com/projects/calibration-checkerboard-collection
# - The default settings in this script are for: https://markhedleyjones.com/storage/checkerboards/Checkerboard-A4-25mm-8x6.pdf
# - Measure the actual printed grid size of the squares and modify size.
# 3. Modify the script:
# - Change grid_H, grid_W and size according to the actual printed checkerboard.
# - Change the path and file_name if necessary (ex: use this script as standalone).
# 4. Run the script online:
# - python calibrate_extrinsics.py
# 5. The results include intrinsics (save file) and extrinsics (terminal output)
#
######################################################
from __future__ import print_function
import pyrealsense2 as rs
import numpy as np
np.set_printoptions(suppress=True,precision=5)
import cv2
assert cv2.__version__[0] >= '3', 'The fisheye module requires opencv version >= 3.0.0'
import os
import shutil
import json
import argparse
import glob
from collections import OrderedDict
parser = argparse.ArgumentParser()
parser.add_argument('--SN_T265', help='serial number of T265')
parser.add_argument('--SN_D4xx', help='serial number of D4xx')
parser.add_argument('--path', default="calibration_results", help='image path')
parser.add_argument('--file_name', default="/intrinsics.json", help='intrinsics calibration file name')
parser.add_argument('--save_tmp', default=False, help='save the temporary files of this program, useful for debugging purposes')
parser.add_argument('--grid_H', default=8, help='grid height (inner corners)')
parser.add_argument('--grid_W', default=6, help='grid width (inner corners)')
parser.add_argument('--size', default=0.0282, help='grid side length')
parser.add_argument('--calibrate', default=False, help='run calibration (only)', action='store_true')
parser.add_argument('--visualize', default=True, help='with GUI', action='store_true')
args = parser.parse_args()
CHECKERBOARD = (args.grid_H, args.grid_W)
SIDE_LENGTH = args.size
tmp_folder = args.path + "/tmp"
def add_camera_calibration(intrinsics, streams = None):
cam = {}
cam['center_px'] = [intrinsics.ppx, intrinsics.ppy]
cam['focal_length_px'] = [intrinsics.fx, intrinsics.fy]
cam['distortion'] = {}
cam['distortion']['type'] = 'kannalabrandt4'
cam['distortion']['k'] = intrinsics.coeffs[:4]
if streams:
ext = streams["cam1"].get_extrinsics_to(streams["pose"]) # w.r.t.
#print(ext)
cam["extrinsics"] = {}
cam["extrinsics"]["T"] = ext.translation
#print(ext.rotation)
cam["extrinsics"]["R"] = ext.rotation
return cam
def save_intrinsics(directory, file_name, intrinsics, streams):
D = OrderedDict() # in order (cam1,cam2)
D['cameras'] = []
D['cameras'].append(add_camera_calibration(intrinsics["cam1"], streams))
D['cameras'].append(add_camera_calibration(intrinsics["cam2"]))
if not os.path.exists(directory):
os.mkdir(directory)
with open(directory + file_name, 'w') as f:
json.dump(D, f, indent=4)
print("Intrinsics output written to " + directory + file_name)
def read_calibration(cam, extrinsics = False):
#print("read_calibration")
# intrinsics
K = np.array([[cam['focal_length_px'][0], 0, cam['center_px'][0]],
[ 0, cam['focal_length_px'][1], cam['center_px'][1]],
[ 0, 0, 1]])
D = np.array(cam['distortion']['k'])
if extrinsics:
H = np.eye(4)
H[:3,:3] = np.reshape(cam["extrinsics"]["R"],(3,3))
H[:3,3] = cam["extrinsics"]["T"]
#print(H)
return (K, D, H)
return (K, D)
def load_calibration(directory, file_name):
with open(directory + file_name, 'r') as f:
D = json.load(f)
(K1, D1, H1) = read_calibration(D['cameras'][0], True)
(K2, D2) = read_calibration(D['cameras'][1])
return (K1, D1, K2, D2, H1)
def find_realsense_serial_no(type):
camera_name = ['Intel RealSense T265', 'Intel RealSense D435']
# Get realsense pipeline handle
pipe = rs.pipeline()
# Find the T265
devices = rs.context().devices
for i in range(len(devices)):
if (devices[i].get_info(rs.camera_info.name) == camera_name[type]):
print('Found one connected ' + camera_name[type] + ' with serial no:', devices[i].get_info(rs.camera_info.serial_number))
return devices[i].get_info(rs.camera_info.serial_number)
print('No ' + camera_name[type] + ' found, please check connection or input serial manually')
return None
if not args.calibrate:
# Obtain the serial number of the cameras, either automatically or from user's input
print("Trying to connect devices...")
serial_t265 = None
serial_d4xx = None
if (not args.SN_T265):
serial_t265 = find_realsense_serial_no(0)
else:
serial_t265 = args.SN_T265
if (not args.SN_D4xx):
serial_d4xx = find_realsense_serial_no(1)
else:
serial_d4xx = args.SN_D4xx
if (not serial_t265) or (not serial_d4xx):
print("Specify serial numbers --SN_T265 and --SN_D4xx (for online calibration, or --calibrate for prerecorded images with --path path to folder)")
exit()
# cam 1
pipe1 = rs.pipeline()
cfg1 = rs.config()
cfg1.enable_device(serial_t265)
pipe1.start(cfg1)
# cam 2
pipe2 = rs.pipeline()
cfg2 = rs.config()
cfg2.enable_device(serial_d4xx)
cfg2.enable_all_streams()
pipe2_profile = pipe2.start(cfg2)
sensor_depth = pipe2_profile.get_device().first_depth_sensor()
sensor_depth.set_option(rs.option.emitter_enabled, 0) # turn OFF projector
try:
# Retreive the stream and intrinsic properties for both cameras
profile1 = pipe1.get_active_profile()
profile2 = pipe2.get_active_profile()
# future improvements: make both stream configureable
streams = {"cam1" : profile1.get_stream(rs.stream.fisheye, 1).as_video_stream_profile(),
"pose" : profile1.get_stream(rs.stream.pose),
"cam2" : profile2.get_stream(rs.stream.infrared, 1).as_video_stream_profile()} # IR1
#"cam2" : profile1.get_stream(rs.stream.fisheye, 2).as_video_stream_profile()} # test
intrinsics = {"cam1" : streams["cam1"].get_intrinsics(),
"cam2" : streams["cam2"].get_intrinsics()}
#print("cam1:", intrinsics["cam1"])
#print("cam2:", intrinsics["right"])
save_intrinsics(args.path, args.file_name, intrinsics, streams)
# capture images
i = 0
print("Press 's' to save image.\nPress 'q' or 'c' to quit recording and start the calibration.")
while True:
# cam 1
frames1 = pipe1.wait_for_frames()
f_fe1 = frames1.get_fisheye_frame(1) # left fisheye
f_fe2 = frames1.get_fisheye_frame(2) # right fisheye
if not f_fe1 or not f_fe2:
continue
img_fe1 = np.asanyarray(f_fe1.get_data())
img_fe2 = np.asanyarray(f_fe2.get_data())
# cam 2
frames2 = pipe2.wait_for_frames()
f_ir1 = frames2.get_infrared_frame(1) # left infrared
f_ir2 = frames2.get_infrared_frame(2) # right infrared
f_color = frames2.get_color_frame()
if not f_ir1 or not f_ir2 or not f_color:
continue
img_ir1 = np.asanyarray(f_ir1.get_data())
img_ir2 = np.asanyarray(f_ir2.get_data())
img_color = np.asanyarray(f_color.get_data())
# TODO: configure streams
img1 = img_fe1
img2 = img_ir1
# display
cv2.imshow('cam1', img1)
cv2.imshow('cam2', img2)
# save or quit
k = cv2.waitKey(1)
if k == ord('s'):
print("'s' key pressed. Saving temp images..")
if not os.path.exists(tmp_folder):
os.mkdir(tmp_folder)
cv2.imwrite(tmp_folder + '/fe1_' + str(i) + '.png', img_fe1)
cv2.imwrite(tmp_folder + '/fe2_' + str(i) + '.png', img_fe2)
cv2.imwrite(tmp_folder + '/ir1_' + str(i) + '.png', img_ir1)
# cv2.imwrite(tmp_folder+ '/ir2_' + str(i) + '.png', img_ir2)
cv2.imwrite(tmp_folder + '/color_' + str(i) + '.png', img_color)
print("Saved temp images in temp folder " + tmp_folder)
i = i+1
if k == ord('q') or k == ord('c'):
break
finally:
pipe1.stop()
pipe2.stop()
# calibrate
print("Calibrate extrinsics now...")
# arrays to store detections
P3 = [] # w.r.t. target frame
P2_1 = [] # in image #1
P2_2 = [] # in image #2
# TODO: configure streams
images1 = glob.glob(tmp_folder + '/fe1_*')
#images2 = glob.glob(tmp_folder + '/fe2_*') # test
images2 = glob.glob(tmp_folder + '/ir1_*')
images1.sort()
images2.sort()
#print(images1)
#print(images2)
if len(images1) == len(images2) == 0:
print("No images found. Exit.")
exit(0)
try:
for i, fname in enumerate(images1):
img1 = cv2.imread(images1[i])
img2 = cv2.imread(images2[i])
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# detect
ret1, corners1 = cv2.findChessboardCorners(gray1, CHECKERBOARD, None)
ret2, corners2 = cv2.findChessboardCorners(gray2, CHECKERBOARD, None)
if ret1 and ret2:
# subpixel refinement
criteria_sub = (cv2.TermCriteria_COUNT + cv2.TERM_CRITERIA_EPS, 10, 1e-1)
rt = cv2.cornerSubPix(gray1, corners1, (7, 7), (-1, -1), criteria_sub)
P2_1.append(corners1)
if args.visualize:
ret1 = cv2.drawChessboardCorners(img1, CHECKERBOARD, corners1, ret1)
cv2.imshow("img1", img1)
cv2.waitKey(200)
rt = cv2.cornerSubPix(gray2, corners2, (7, 7), (-1, -1), criteria_sub)
P2_2.append(corners2)
if args.visualize:
ret2 = cv2.drawChessboardCorners(img2, CHECKERBOARD, corners2, ret2)
cv2.imshow("img2", img2)
cv2.waitKey(200)
except cv2.error as e:
print("Error: ", e)
# calibration (stereo extrinsics)
R = np.zeros((1, 1, 3), dtype=np.float64)
T = np.zeros((1, 1, 3), dtype=np.float64)
N = len(P2_1) # number of successful detections
p3d = np.zeros( (CHECKERBOARD[0]*CHECKERBOARD[1], 1, 3) , np.float64)
p3d[:,0, :2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)
# fisheye.stereoCalibrate needs different data structures/dimensions than cv2.stereoCalibrate, i.e. (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 2/3)!
P3 = np.array([p3d]*N, dtype=np.float64)
P2_1 = np.asarray(P2_1, dtype=np.float64)
P2_2 = np.asarray(P2_2, dtype=np.float64)
P3 = np.reshape(P3, (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 3))*SIDE_LENGTH
P2_1 = np.reshape(P2_1, (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 2))
P2_2 = np.reshape(P2_2, (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 2))
(K1, D1, K2, D2, H1) = load_calibration(args.path, args.file_name)
try:
(rms, _, _, _, _, R, T) = \
cv2.fisheye.stereoCalibrate(
P3,
P2_1,
P2_2,
K1,
D1,
K2,
D2,
(0,0), # only used to initialize intrinsics when no intrinsics provided
R,
T,
cv2.fisheye.CALIB_FIX_INTRINSIC # extrinsics only
)
except cv2.error as e:
print("Error: ", e)
print("Please make sure that the checkerboard exists in the images. See tmp images in " + tmp_folder + " to debug.")
exit()
print("RMS:", rms)
H_cam2_cam1 = np.eye(4)
H_cam2_cam1[:3,:3] = R
H_cam2_cam1[:3,3] = T.flatten()
# w.r.t. pose
H_ir1_fe1 = H_cam2_cam1 # TODO: configure
H_pose_fe1 = H1
H_pose_ir1 = H_pose_fe1.dot( np.linalg.inv(H_ir1_fe1) )
print("H (ir1 wrt pose) =", H_pose_ir1)
fn = args.path + "/H.txt"
np.savetxt(fn, H_pose_ir1, fmt='%.9f')
print("Extrinsic output written to", fn)
if not args.save_tmp:
if os.path.isdir(tmp_folder):
shutil.rmtree(tmp_folder, ignore_errors=True)
print("Temporary files deleted. If you wish to keep the tmp files, use --save_tmp True.") | 37.34593 | 154 | 0.617887 | [
"Apache-2.0"
] | mikobski/Critbot | robot/src/vision_to_mavros/scripts/calibrate_extrinsics.py | 12,847 | Python |
"""Implementation of Rule L044."""
from typing import Optional
from sqlfluff.core.rules.analysis.select_crawler import Query, SelectCrawler
from sqlfluff.core.parser import BaseSegment
from sqlfluff.core.rules.base import BaseRule, LintResult, RuleContext
from sqlfluff.core.rules.doc_decorators import document_groups
from sqlfluff.core.rules.functional import sp
class RuleFailure(Exception):
"""Exception class for reporting lint failure inside deeply nested code."""
def __init__(self, anchor: BaseSegment):
self.anchor: BaseSegment = anchor
@document_groups
class Rule_L044(BaseRule):
"""Query produces an unknown number of result columns.
**Anti-pattern**
Querying all columns using ``*`` produces a query result where the number
or ordering of columns changes if the upstream table's schema changes.
This should generally be avoided because it can cause slow performance,
cause important schema changes to go undetected, or break production code.
For example:
* If a query does ``SELECT t.*`` and is expected to return columns ``a``, ``b``,
and ``c``, the actual columns returned will be wrong/different if columns
are added to or deleted from the input table.
* ``UNION`` and ``DIFFERENCE`` clauses require the inputs have the same number
of columns (and compatible types).
* ``JOIN`` queries may break due to new column name conflicts, e.g. the
query references a column ``c`` which initially existed in only one input
table but a column of the same name is added to another table.
* ``CREATE TABLE (<<column schema>>) AS SELECT *``
.. code-block:: sql
WITH cte AS (
SELECT * FROM foo
)
SELECT * FROM cte
UNION
SELECT a, b FROM t
**Best practice**
Somewhere along the "path" to the source data, specify columns explicitly.
.. code-block:: sql
WITH cte AS (
SELECT * FROM foo
)
SELECT a, b FROM cte
UNION
SELECT a, b FROM t
"""
groups = ("all",)
_works_on_unparsable = False
def _handle_alias(self, selectable, alias_info, query):
select_info_target = SelectCrawler.get(
query, alias_info.from_expression_element
)[0]
if isinstance(select_info_target, str):
# It's an alias to an external table whose
# number of columns could vary without our
# knowledge. Thus, warn.
self.logger.debug(
f"Query target {select_info_target} is external. Generating warning."
)
raise RuleFailure(selectable.selectable)
else:
# Handle nested SELECT.
self._analyze_result_columns(select_info_target)
def _analyze_result_columns(self, query: Query):
"""Given info on a list of SELECTs, determine whether to warn."""
# Recursively walk from the given query (select_info_list) to any
# wildcard columns in the select targets. If every wildcard evdentually
# resolves to a query without wildcards, all is well. Otherwise, warn.
if not query.selectables:
return # pragma: no cover
for selectable in query.selectables:
self.logger.debug(f"Analyzing query: {selectable.selectable.raw}")
for wildcard in selectable.get_wildcard_info():
if wildcard.tables:
for wildcard_table in wildcard.tables:
self.logger.debug(
f"Wildcard: {wildcard.segment.raw} has target "
"{wildcard_table}"
)
# Is it an alias?
alias_info = selectable.find_alias(wildcard_table)
if alias_info:
# Found the alias matching the wildcard. Recurse,
# analyzing the query associated with that alias.
self._handle_alias(selectable, alias_info, query)
else:
# Not an alias. Is it a CTE?
cte = query.lookup_cte(wildcard_table)
if cte:
# Wildcard refers to a CTE. Analyze it.
self._analyze_result_columns(cte)
else:
# Not CTE, not table alias. Presumably an
# external table. Warn.
self.logger.debug(
f"Query target {wildcard_table} is external. "
"Generating warning."
)
raise RuleFailure(selectable.selectable)
else:
# No table was specified with the wildcard. Assume we're
# querying from a nested select in FROM.
query_list = SelectCrawler.get(
query, query.selectables[0].selectable
)
for o in query_list:
if isinstance(o, Query):
self._analyze_result_columns(o)
return
self.logger.debug(
f'Query target "{query.selectables[0].selectable.raw}" has no '
"targets. Generating warning."
)
raise RuleFailure(query.selectables[0].selectable)
def _eval(self, context: RuleContext) -> Optional[LintResult]:
"""Outermost query should produce known number of columns."""
start_types = ["select_statement", "set_expression", "with_compound_statement"]
if context.segment.is_type(
*start_types
) and not context.functional.parent_stack.any(sp.is_type(*start_types)):
crawler = SelectCrawler(context.segment, context.dialect)
# Begin analysis at the outer query.
if crawler.query_tree:
try:
return self._analyze_result_columns(crawler.query_tree)
except RuleFailure as e:
return LintResult(anchor=e.anchor)
return None
| 41.633987 | 87 | 0.571115 | [
"MIT"
] | R7L208/sqlfluff | src/sqlfluff/rules/L044.py | 6,370 | Python |
import os, paramiko, time, schedule, smtplib, ssl
from datetime import datetime
from email.message import EmailMessage
host='localhost'
port='5432'
user='postgres'
password='admin'
database='testdb'
#chemin de sauvegarde locale
local_dir = 'C:\\Users\\Kamla\\projets\\auto-backup-sqldb\\backup\\'
#local_dir = 'Chemin vers le dossier de la base de donnees a sauvegarder\\'
#chemin de sauvegarde distant
remote_dir = '/C:/Users/vmwin10/Documents/ftpfile/'
def job():
print("Backup working...")
filestamp = time.strftime('%Y-%m-%dT%H-%M-%S.%z')
#nom pour le fichier sql qui serra genere par pg_dump
database_remote = database+"_"+filestamp+".bak.sql"
PASS="set PGPASSWORD=%s" % (password)
#lancement de la commande mysqldump qui va faire une sauvegarde en local
#les fichiers sont sauvegarder dans le respertoire 'backup'
os.system("(cd backup) && ("+PASS+") && (pg_dump -h %s -p %s -U %s -f %s -C -d %s)" % (host, port, user, database_remote, database))
print("Database dumped to "+database_remote)
# debut du SFTP
ssh_client=paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
#on se connecte a la machine dans laquelle serra sauvegarde le le fichier backup
ssh_client.connect(hostname='192.168.126.2',username='vmwin10',password='vmwin10')
ftp_client=ssh_client.open_sftp()
#envoie du fichier local vers le remote
ftp_client.put(local_dir+database_remote,remote_dir+database_remote)
ftp_client.close()
print("Successfull Backup")
# A chaque backup un email est envoye
msg = EmailMessage()
msg.set_content("Un backup vient d'etre effectue")
msg["Subject"] = "Email de Backup"
msg["From"] = "ksb.cmr@gmail.com"
msg["To"] = "test@mail.com"
context=ssl.create_default_context()
with smtplib.SMTP("smtp.gmail.com", port=587) as smtp:
smtp.starttls(context=context)
smtp.login(msg["From"], "password")
smtp.send_message(msg)
# le backup se fait chaque 1h
schedule.every(3).seconds.do(job)
#schedule.every(15).minutes.do(job)
#schedule.every().hour.do(job)
#schedule.every().day.at("10:30").do(job)
#schedule.every(10).to(10).minutes.do(job)
#schedule.every().monday.do(job)
#schedule.every().wednesday.at("15:00").do(job)
#schedule.every().minute.at(":15").do(job)
while True:
schedule.run_pending()
time.sleep(1)
| 33.315068 | 136 | 0.690789 | [
"MIT"
] | mykamla/auto-backup-sqldb | pgsqlbackup.py | 2,432 | Python |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
from datetime import date
from pathlib import Path
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
PACKAGE_DIR = ROOT_DIR / "email_service"
DOCS_DIR = ROOT_DIR / "email_service"
version_file_path = PACKAGE_DIR / "version.py"
code_obj = compile(version_file_path.read_text(), version_file_path, "exec")
__version__ = dict()
exec(code_obj, __version__)
version = __version__["__version__"]
# -- Project information -----------------------------------------------------
project = "Email Service"
copyright = """2021, Aditya Raman"""
author = "Aditya Raman"
# The full version, including alpha/beta/rc tags
version = release = f"v{version}"
today = str(date.today())
language = "en"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx_rtd_theme",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme" # alternate: "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# These paths are either relative to html_static_path
# or fully qualified paths (eg. https://...)
# html_css_files = []
#
# html_style = ""
master_doc = "index"
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
"papersize": "a4paper",
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
"preamble": "\\addto\\captionsenglish{\\renewcommand{\\contentsname}{Table of contents}}",
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
latex_show_urls = "footnote"
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
add_function_parentheses = False
show_authors = True
| 33.525773 | 94 | 0.679582 | [
"MIT"
] | ramanaditya/email-service | docs/conf.py | 3,252 | Python |
# NOTE - Still seems to be a leak here somewhere
# gateway count doesnt hit zero. Hence the print statements!
import sys
sys.coinit_flags = 0 # Must be free-threaded!
import win32api, pythoncom, time
import pywintypes
import os
import winerror
import win32com
import win32com.client.connect
from win32com.test.util import CheckClean
from win32com.client import constants, DispatchBaseClass, CastTo, VARIANT
from win32com.test.util import RegisterPythonServer
from pywin32_testutil import str2memory
import datetime
import decimal
import win32timezone
importMsg = "**** PyCOMTest is not installed ***\n PyCOMTest is a Python test specific COM client and server.\n It is likely this server is not installed on this machine\n To install the server, you must get the win32com sources\n and build it using MS Visual C++"
error = Exception
# This test uses a Python implemented COM server - ensure correctly registered.
RegisterPythonServer(
os.path.join(os.path.dirname(__file__), "..", "servers", "test_pycomtest.py"),
"Python.Test.PyCOMTest",
)
from win32com.client import gencache
try:
gencache.EnsureModule("{6BCDCB60-5605-11D0-AE5F-CADD4C000000}", 0, 1, 1)
except pythoncom.com_error:
print("The PyCOMTest module can not be located or generated.")
print(importMsg)
raise RuntimeError(importMsg)
# We had a bg where RegisterInterfaces would fail if gencache had
# already been run - exercise that here
from win32com import universal
universal.RegisterInterfaces("{6BCDCB60-5605-11D0-AE5F-CADD4C000000}", 0, 1, 1)
verbose = 0
# convert a normal int to a long int - used to avoid, eg, '1L' for py3k
# friendliness
def ensure_long(int_val):
if sys.version_info > (3,):
# py3k - no such thing as a 'long'
return int_val
# on py2x, we just use an expression that results in a long
return 0x100000000 - 0x100000000 + int_val
def check_get_set(func, arg):
got = func(arg)
if got != arg:
raise error("%s failed - expected %r, got %r" % (func, arg, got))
def check_get_set_raises(exc, func, arg):
try:
got = func(arg)
except exc as e:
pass # what we expect!
else:
raise error(
"%s with arg %r didn't raise %s - returned %r" % (func, arg, exc, got)
)
def progress(*args):
if verbose:
for arg in args:
print(arg, end=" ")
print()
def TestApplyResult(fn, args, result):
try:
fnName = str(fn).split()[1]
except:
fnName = str(fn)
progress("Testing ", fnName)
pref = "function " + fnName
rc = fn(*args)
if rc != result:
raise error("%s failed - result not %r but %r" % (pref, result, rc))
def TestConstant(constName, pyConst):
try:
comConst = getattr(constants, constName)
except:
raise error("Constant %s missing" % (constName,))
if comConst != pyConst:
raise error(
"Constant value wrong for %s - got %s, wanted %s"
% (constName, comConst, pyConst)
)
# Simple handler class. This demo only fires one event.
class RandomEventHandler:
def _Init(self):
self.fireds = {}
def OnFire(self, no):
try:
self.fireds[no] = self.fireds[no] + 1
except KeyError:
self.fireds[no] = 0
def OnFireWithNamedParams(self, no, a_bool, out1, out2):
# This test exists mainly to help with an old bug, where named
# params would come in reverse.
Missing = pythoncom.Missing
if no is not Missing:
# We know our impl called 'OnFire' with the same ID
assert no in self.fireds
assert no + 1 == out1, "expecting 'out1' param to be ID+1"
assert no + 2 == out2, "expecting 'out2' param to be ID+2"
# The middle must be a boolean.
assert a_bool is Missing or type(a_bool) == bool, "middle param not a bool"
return out1 + 2, out2 + 2
def _DumpFireds(self):
if not self.fireds:
print("ERROR: Nothing was received!")
for firedId, no in self.fireds.items():
progress("ID %d fired %d times" % (firedId, no))
# A simple handler class that derives from object (ie, a "new style class") -
# only relevant for Python 2.x (ie, the 2 classes should be identical in 3.x)
class NewStyleRandomEventHandler(object):
def _Init(self):
self.fireds = {}
def OnFire(self, no):
try:
self.fireds[no] = self.fireds[no] + 1
except KeyError:
self.fireds[no] = 0
def OnFireWithNamedParams(self, no, a_bool, out1, out2):
# This test exists mainly to help with an old bug, where named
# params would come in reverse.
Missing = pythoncom.Missing
if no is not Missing:
# We know our impl called 'OnFire' with the same ID
assert no in self.fireds
assert no + 1 == out1, "expecting 'out1' param to be ID+1"
assert no + 2 == out2, "expecting 'out2' param to be ID+2"
# The middle must be a boolean.
assert a_bool is Missing or type(a_bool) == bool, "middle param not a bool"
return out1 + 2, out2 + 2
def _DumpFireds(self):
if not self.fireds:
print("ERROR: Nothing was received!")
for firedId, no in self.fireds.items():
progress("ID %d fired %d times" % (firedId, no))
# Test everything which can be tested using both the "dynamic" and "generated"
# COM objects (or when there are very subtle differences)
def TestCommon(o, is_generated):
progress("Getting counter")
counter = o.GetSimpleCounter()
TestCounter(counter, is_generated)
progress("Checking default args")
rc = o.TestOptionals()
if rc[:-1] != ("def", 0, 1) or abs(rc[-1] - 3.14) > 0.01:
print(rc)
raise error("Did not get the optional values correctly")
rc = o.TestOptionals("Hi", 2, 3, 1.1)
if rc[:-1] != ("Hi", 2, 3) or abs(rc[-1] - 1.1) > 0.01:
print(rc)
raise error("Did not get the specified optional values correctly")
rc = o.TestOptionals2(0)
if rc != (0, "", 1):
print(rc)
raise error("Did not get the optional2 values correctly")
rc = o.TestOptionals2(1.1, "Hi", 2)
if rc[1:] != ("Hi", 2) or abs(rc[0] - 1.1) > 0.01:
print(rc)
raise error("Did not get the specified optional2 values correctly")
progress("Checking getting/passing IUnknown")
check_get_set(o.GetSetUnknown, o)
progress("Checking getting/passing IDispatch")
# This might be called with either the interface or the CoClass - but these
# functions always return from the interface.
expected_class = o.__class__
# CoClass instances have `default_interface`
expected_class = getattr(expected_class, "default_interface", expected_class)
if not isinstance(o.GetSetDispatch(o), expected_class):
raise error("GetSetDispatch failed: %r" % (o.GetSetDispatch(o),))
progress("Checking getting/passing IDispatch of known type")
expected_class = o.__class__
expected_class = getattr(expected_class, "default_interface", expected_class)
if o.GetSetInterface(o).__class__ != expected_class:
raise error("GetSetDispatch failed")
progress("Checking misc args")
check_get_set(o.GetSetVariant, 4)
check_get_set(o.GetSetVariant, "foo")
check_get_set(o.GetSetVariant, o)
# signed/unsigned.
check_get_set(o.GetSetInt, 0)
check_get_set(o.GetSetInt, -1)
check_get_set(o.GetSetInt, 1)
check_get_set(o.GetSetUnsignedInt, 0)
check_get_set(o.GetSetUnsignedInt, 1)
check_get_set(o.GetSetUnsignedInt, 0x80000000)
if o.GetSetUnsignedInt(-1) != 0xFFFFFFFF:
# -1 is a special case - we accept a negative int (silently converting to
# unsigned) but when getting it back we convert it to a long.
raise error("unsigned -1 failed")
check_get_set(o.GetSetLong, 0)
check_get_set(o.GetSetLong, -1)
check_get_set(o.GetSetLong, 1)
check_get_set(o.GetSetUnsignedLong, 0)
check_get_set(o.GetSetUnsignedLong, 1)
check_get_set(o.GetSetUnsignedLong, 0x80000000)
# -1 is a special case - see above.
if o.GetSetUnsignedLong(-1) != 0xFFFFFFFF:
raise error("unsigned -1 failed")
# We want to explicitly test > 32 bits. py3k has no 'maxint' and
# 'maxsize+1' is no good on 64bit platforms as its 65 bits!
big = 2147483647 # sys.maxint on py2k
for l in big, big + 1, 1 << 65:
check_get_set(o.GetSetVariant, l)
progress("Checking structs")
r = o.GetStruct()
assert r.int_value == 99 and str(r.str_value) == "Hello from C++"
assert o.DoubleString("foo") == "foofoo"
progress("Checking var args")
o.SetVarArgs("Hi", "There", "From", "Python", 1)
if o.GetLastVarArgs() != ("Hi", "There", "From", "Python", 1):
raise error("VarArgs failed -" + str(o.GetLastVarArgs()))
progress("Checking arrays")
l = []
TestApplyResult(o.SetVariantSafeArray, (l,), len(l))
l = [1, 2, 3, 4]
TestApplyResult(o.SetVariantSafeArray, (l,), len(l))
TestApplyResult(
o.CheckVariantSafeArray,
(
(
1,
2,
3,
4,
),
),
1,
)
# and binary
TestApplyResult(o.SetBinSafeArray, (str2memory("foo\0bar"),), 7)
progress("Checking properties")
o.LongProp = 3
if o.LongProp != 3 or o.IntProp != 3:
raise error("Property value wrong - got %d/%d" % (o.LongProp, o.IntProp))
o.LongProp = o.IntProp = -3
if o.LongProp != -3 or o.IntProp != -3:
raise error("Property value wrong - got %d/%d" % (o.LongProp, o.IntProp))
# This number fits in an unsigned long. Attempting to set it to a normal
# long will involve overflow, which is to be expected. But we do
# expect it to work in a property explicitly a VT_UI4.
check = 3 * 10 ** 9
o.ULongProp = check
if o.ULongProp != check:
raise error(
"Property value wrong - got %d (expected %d)" % (o.ULongProp, check)
)
TestApplyResult(o.Test, ("Unused", 99), 1) # A bool function
TestApplyResult(o.Test, ("Unused", -1), 1) # A bool function
TestApplyResult(o.Test, ("Unused", 1 == 1), 1) # A bool function
TestApplyResult(o.Test, ("Unused", 0), 0)
TestApplyResult(o.Test, ("Unused", 1 == 0), 0)
assert o.DoubleString("foo") == "foofoo"
TestConstant("ULongTest1", ensure_long(0xFFFFFFFF))
TestConstant("ULongTest2", ensure_long(0x7FFFFFFF))
TestConstant("LongTest1", ensure_long(-0x7FFFFFFF))
TestConstant("LongTest2", ensure_long(0x7FFFFFFF))
TestConstant("UCharTest", 255)
TestConstant("CharTest", -1)
# 'Hello World', but the 'r' is the "Registered" sign (\xae)
TestConstant("StringTest", "Hello Wo\xaeld")
progress("Checking dates and times")
# For now *all* times passed must be tz-aware.
now = win32timezone.now()
# but conversion to and from a VARIANT loses sub-second...
now = now.replace(microsecond=0)
later = now + datetime.timedelta(seconds=1)
TestApplyResult(o.EarliestDate, (now, later), now)
# The below used to fail with `ValueError: microsecond must be in 0..999999` - see #1655
# https://planetcalc.com/7027/ says that float is: Sun, 25 Mar 1951 7:23:49 am
assert o.MakeDate(18712.308206013888) == datetime.datetime.fromisoformat(
"1951-03-25 07:23:49+00:00"
)
progress("Checking currency")
# currency.
pythoncom.__future_currency__ = 1
if o.CurrencyProp != 0:
raise error("Expecting 0, got %r" % (o.CurrencyProp,))
for val in ("1234.5678", "1234.56", "1234"):
o.CurrencyProp = decimal.Decimal(val)
if o.CurrencyProp != decimal.Decimal(val):
raise error("%s got %r" % (val, o.CurrencyProp))
v1 = decimal.Decimal("1234.5678")
TestApplyResult(o.DoubleCurrency, (v1,), v1 * 2)
v2 = decimal.Decimal("9012.3456")
TestApplyResult(o.AddCurrencies, (v1, v2), v1 + v2)
TestTrickyTypesWithVariants(o, is_generated)
progress("Checking win32com.client.VARIANT")
TestPyVariant(o, is_generated)
def TestTrickyTypesWithVariants(o, is_generated):
# Test tricky stuff with type handling and generally only works with
# "generated" support but can be worked around using VARIANT.
if is_generated:
got = o.TestByRefVariant(2)
else:
v = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_VARIANT, 2)
o.TestByRefVariant(v)
got = v.value
if got != 4:
raise error("TestByRefVariant failed")
if is_generated:
got = o.TestByRefString("Foo")
else:
v = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_BSTR, "Foo")
o.TestByRefString(v)
got = v.value
if got != "FooFoo":
raise error("TestByRefString failed")
# check we can pass ints as a VT_UI1
vals = [1, 2, 3, 4]
if is_generated:
arg = vals
else:
arg = VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_UI1, vals)
TestApplyResult(o.SetBinSafeArray, (arg,), len(vals))
# safearrays of doubles and floats
vals = [0, 1.1, 2.2, 3.3]
if is_generated:
arg = vals
else:
arg = VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_R8, vals)
TestApplyResult(o.SetDoubleSafeArray, (arg,), len(vals))
if is_generated:
arg = vals
else:
arg = VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_R4, vals)
TestApplyResult(o.SetFloatSafeArray, (arg,), len(vals))
vals = [1.1, 2.2, 3.3, 4.4]
expected = (1.1 * 2, 2.2 * 2, 3.3 * 2, 4.4 * 2)
if is_generated:
TestApplyResult(o.ChangeDoubleSafeArray, (vals,), expected)
else:
arg = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_ARRAY | pythoncom.VT_R8, vals)
o.ChangeDoubleSafeArray(arg)
if arg.value != expected:
raise error("ChangeDoubleSafeArray got the wrong value")
if is_generated:
got = o.DoubleInOutString("foo")
else:
v = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_BSTR, "foo")
o.DoubleInOutString(v)
got = v.value
assert got == "foofoo", got
val = decimal.Decimal("1234.5678")
if is_generated:
got = o.DoubleCurrencyByVal(val)
else:
v = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_CY, val)
o.DoubleCurrencyByVal(v)
got = v.value
assert got == val * 2
def TestDynamic():
progress("Testing Dynamic")
import win32com.client.dynamic
o = win32com.client.dynamic.DumbDispatch("PyCOMTest.PyCOMTest")
TestCommon(o, False)
counter = win32com.client.dynamic.DumbDispatch("PyCOMTest.SimpleCounter")
TestCounter(counter, False)
# Dynamic doesn't know this should be an int, so we get a COM
# TypeMismatch error.
try:
check_get_set_raises(ValueError, o.GetSetInt, "foo")
raise error("no exception raised")
except pythoncom.com_error as exc:
if exc.hresult != winerror.DISP_E_TYPEMISMATCH:
raise
arg1 = VARIANT(pythoncom.VT_R4 | pythoncom.VT_BYREF, 2.0)
arg2 = VARIANT(pythoncom.VT_BOOL | pythoncom.VT_BYREF, True)
arg3 = VARIANT(pythoncom.VT_I4 | pythoncom.VT_BYREF, 4)
o.TestInOut(arg1, arg2, arg3)
assert arg1.value == 4.0, arg1
assert arg2.value == False
assert arg3.value == 8
# damn - props with params don't work for dynamic objects :(
# o.SetParamProp(0, 1)
# if o.ParamProp(0) != 1:
# raise RuntimeError, o.paramProp(0)
def TestGenerated():
# Create an instance of the server.
from win32com.client.gencache import EnsureDispatch
o = EnsureDispatch("PyCOMTest.PyCOMTest")
TestCommon(o, True)
counter = EnsureDispatch("PyCOMTest.SimpleCounter")
TestCounter(counter, True)
# This dance lets us get a CoClass even though it's not explicitly registered.
# This is `CoPyComTest`
from win32com.client.CLSIDToClass import GetClass
coclass_o = GetClass("{8EE0C520-5605-11D0-AE5F-CADD4C000000}")()
TestCommon(coclass_o, True)
# Test the regression reported in #1753
assert bool(coclass_o)
# This is `CoSimpleCounter` and the counter tests should work.
coclass = GetClass("{B88DD310-BAE8-11D0-AE86-76F2C1000000}")()
TestCounter(coclass, True)
# XXX - this is failing in dynamic tests, but should work fine.
i1, i2 = o.GetMultipleInterfaces()
if not isinstance(i1, DispatchBaseClass) or not isinstance(i2, DispatchBaseClass):
# Yay - is now an instance returned!
raise error(
"GetMultipleInterfaces did not return instances - got '%s', '%s'" % (i1, i2)
)
del i1
del i2
# Generated knows to only pass a 32bit int, so should fail.
check_get_set_raises(OverflowError, o.GetSetInt, 0x80000000)
check_get_set_raises(OverflowError, o.GetSetLong, 0x80000000)
# Generated knows this should be an int, so raises ValueError
check_get_set_raises(ValueError, o.GetSetInt, "foo")
check_get_set_raises(ValueError, o.GetSetLong, "foo")
# Pass some non-sequence objects to our array decoder, and watch it fail.
try:
o.SetVariantSafeArray("foo")
raise error("Expected a type error")
except TypeError:
pass
try:
o.SetVariantSafeArray(666)
raise error("Expected a type error")
except TypeError:
pass
o.GetSimpleSafeArray(None)
TestApplyResult(o.GetSimpleSafeArray, (None,), tuple(range(10)))
resultCheck = tuple(range(5)), tuple(range(10)), tuple(range(20))
TestApplyResult(o.GetSafeArrays, (None, None, None), resultCheck)
l = []
TestApplyResult(o.SetIntSafeArray, (l,), len(l))
l = [1, 2, 3, 4]
TestApplyResult(o.SetIntSafeArray, (l,), len(l))
ll = [1, 2, 3, 0x100000000]
TestApplyResult(o.SetLongLongSafeArray, (ll,), len(ll))
TestApplyResult(o.SetULongLongSafeArray, (ll,), len(ll))
# Tell the server to do what it does!
TestApplyResult(o.Test2, (constants.Attr2,), constants.Attr2)
TestApplyResult(o.Test3, (constants.Attr2,), constants.Attr2)
TestApplyResult(o.Test4, (constants.Attr2,), constants.Attr2)
TestApplyResult(o.Test5, (constants.Attr2,), constants.Attr2)
TestApplyResult(o.Test6, (constants.WideAttr1,), constants.WideAttr1)
TestApplyResult(o.Test6, (constants.WideAttr2,), constants.WideAttr2)
TestApplyResult(o.Test6, (constants.WideAttr3,), constants.WideAttr3)
TestApplyResult(o.Test6, (constants.WideAttr4,), constants.WideAttr4)
TestApplyResult(o.Test6, (constants.WideAttr5,), constants.WideAttr5)
TestApplyResult(o.TestInOut, (2.0, True, 4), (4.0, False, 8))
o.SetParamProp(0, 1)
if o.ParamProp(0) != 1:
raise RuntimeError(o.paramProp(0))
# Make sure CastTo works - even though it is only casting it to itself!
o2 = CastTo(o, "IPyCOMTest")
if o != o2:
raise error("CastTo should have returned the same object")
# Do the connection point thing...
# Create a connection object.
progress("Testing connection points")
o2 = win32com.client.DispatchWithEvents(o, RandomEventHandler)
TestEvents(o2, o2)
o2 = win32com.client.DispatchWithEvents(o, NewStyleRandomEventHandler)
TestEvents(o2, o2)
# and a plain "WithEvents".
handler = win32com.client.WithEvents(o, RandomEventHandler)
TestEvents(o, handler)
handler = win32com.client.WithEvents(o, NewStyleRandomEventHandler)
TestEvents(o, handler)
progress("Finished generated .py test.")
def TestEvents(o, handler):
sessions = []
handler._Init()
try:
for i in range(3):
session = o.Start()
sessions.append(session)
time.sleep(0.5)
finally:
# Stop the servers
for session in sessions:
o.Stop(session)
handler._DumpFireds()
handler.close()
def _TestPyVariant(o, is_generated, val, checker=None):
if is_generated:
vt, got = o.GetVariantAndType(val)
else:
# Gotta supply all 3 args with the last 2 being explicit variants to
# get the byref behaviour.
var_vt = VARIANT(pythoncom.VT_UI2 | pythoncom.VT_BYREF, 0)
var_result = VARIANT(pythoncom.VT_VARIANT | pythoncom.VT_BYREF, 0)
o.GetVariantAndType(val, var_vt, var_result)
vt = var_vt.value
got = var_result.value
if checker is not None:
checker(got)
return
# default checking.
assert vt == val.varianttype, (vt, val.varianttype)
# Handle our safe-array test - if the passed value is a list of variants,
# compare against the actual values.
if type(val.value) in (tuple, list):
check = [v.value if isinstance(v, VARIANT) else v for v in val.value]
# pythoncom always returns arrays as tuples.
got = list(got)
else:
check = val.value
assert type(check) == type(got), (type(check), type(got))
assert check == got, (check, got)
def _TestPyVariantFails(o, is_generated, val, exc):
try:
_TestPyVariant(o, is_generated, val)
raise error("Setting %r didn't raise %s" % (val, exc))
except exc:
pass
def TestPyVariant(o, is_generated):
_TestPyVariant(o, is_generated, VARIANT(pythoncom.VT_UI1, 1))
_TestPyVariant(
o, is_generated, VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_UI4, [1, 2, 3])
)
_TestPyVariant(o, is_generated, VARIANT(pythoncom.VT_BSTR, "hello"))
_TestPyVariant(
o,
is_generated,
VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_BSTR, ["hello", "there"]),
)
def check_dispatch(got):
assert isinstance(got._oleobj_, pythoncom.TypeIIDs[pythoncom.IID_IDispatch])
_TestPyVariant(o, is_generated, VARIANT(pythoncom.VT_DISPATCH, o), check_dispatch)
_TestPyVariant(
o, is_generated, VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_DISPATCH, [o])
)
# an array of variants each with a specific type.
v = VARIANT(
pythoncom.VT_ARRAY | pythoncom.VT_VARIANT,
[
VARIANT(pythoncom.VT_UI4, 1),
VARIANT(pythoncom.VT_UI4, 2),
VARIANT(pythoncom.VT_UI4, 3),
],
)
_TestPyVariant(o, is_generated, v)
# and failures
_TestPyVariantFails(o, is_generated, VARIANT(pythoncom.VT_UI1, "foo"), ValueError)
def TestCounter(counter, bIsGenerated):
# Test random access into container
progress("Testing counter", repr(counter))
import random
for i in range(50):
num = int(random.random() * len(counter))
try:
# XXX - this appears broken by commit 08a14d4deb374eaa06378509cf44078ad467b9dc -
# We shouldn't need to do generated differently than dynamic.
if bIsGenerated:
ret = counter.Item(num + 1)
else:
ret = counter[num]
if ret != num + 1:
raise error(
"Random access into element %d failed - return was %s"
% (num, repr(ret))
)
except IndexError:
raise error("** IndexError accessing collection element %d" % num)
num = 0
if bIsGenerated:
counter.SetTestProperty(1)
counter.TestProperty = 1 # Note this has a second, default arg.
counter.SetTestProperty(1, 2)
if counter.TestPropertyWithDef != 0:
raise error("Unexpected property set value!")
if counter.TestPropertyNoDef(1) != 1:
raise error("Unexpected property set value!")
else:
pass
# counter.TestProperty = 1
counter.LBound = 1
counter.UBound = 10
if counter.LBound != 1 or counter.UBound != 10:
print("** Error - counter did not keep its properties")
if bIsGenerated:
bounds = counter.GetBounds()
if bounds[0] != 1 or bounds[1] != 10:
raise error("** Error - counter did not give the same properties back")
counter.SetBounds(bounds[0], bounds[1])
for item in counter:
num = num + 1
if num != len(counter):
raise error("*** Length of counter and loop iterations dont match ***")
if num != 10:
raise error("*** Unexpected number of loop iterations ***")
try:
counter = iter(counter)._iter_.Clone() # Test Clone() and enum directly
except AttributeError:
# *sob* - sometimes this is a real iterator and sometimes not :/
progress("Finished testing counter (but skipped the iterator stuff")
return
counter.Reset()
num = 0
for item in counter:
num = num + 1
if num != 10:
raise error("*** Unexpected number of loop iterations - got %d ***" % num)
progress("Finished testing counter")
def TestLocalVTable(ob):
# Python doesn't fully implement this interface.
if ob.DoubleString("foo") != "foofoo":
raise error("couldn't foofoo")
###############################
##
## Some vtable tests of the interface
##
def TestVTable(clsctx=pythoncom.CLSCTX_ALL):
# Any vtable interfaces marked as dual *should* be able to be
# correctly implemented as IDispatch.
ob = win32com.client.Dispatch("Python.Test.PyCOMTest")
TestLocalVTable(ob)
# Now test it via vtable - use some C++ code to help here as Python can't do it directly yet.
tester = win32com.client.Dispatch("PyCOMTest.PyCOMTest")
testee = pythoncom.CoCreateInstance(
"Python.Test.PyCOMTest", None, clsctx, pythoncom.IID_IUnknown
)
# check we fail gracefully with None passed.
try:
tester.TestMyInterface(None)
except pythoncom.com_error as details:
pass
# and a real object.
tester.TestMyInterface(testee)
def TestVTable2():
# We once crashed creating our object with the native interface as
# the first IID specified. We must do it _after_ the tests, so that
# Python has already had the gateway registered from last run.
ob = win32com.client.Dispatch("Python.Test.PyCOMTest")
iid = pythoncom.InterfaceNames["IPyCOMTest"]
clsid = "Python.Test.PyCOMTest"
clsctx = pythoncom.CLSCTX_SERVER
try:
testee = pythoncom.CoCreateInstance(clsid, None, clsctx, iid)
except TypeError:
# Python can't actually _use_ this interface yet, so this is
# "expected". Any COM error is not.
pass
def TestVTableMI():
clsctx = pythoncom.CLSCTX_SERVER
ob = pythoncom.CoCreateInstance(
"Python.Test.PyCOMTestMI", None, clsctx, pythoncom.IID_IUnknown
)
# This inherits from IStream.
ob.QueryInterface(pythoncom.IID_IStream)
# This implements IStorage, specifying the IID as a string
ob.QueryInterface(pythoncom.IID_IStorage)
# IDispatch should always work
ob.QueryInterface(pythoncom.IID_IDispatch)
iid = pythoncom.InterfaceNames["IPyCOMTest"]
try:
ob.QueryInterface(iid)
except TypeError:
# Python can't actually _use_ this interface yet, so this is
# "expected". Any COM error is not.
pass
def TestQueryInterface(long_lived_server=0, iterations=5):
tester = win32com.client.Dispatch("PyCOMTest.PyCOMTest")
if long_lived_server:
# Create a local server
t0 = win32com.client.Dispatch(
"Python.Test.PyCOMTest", clsctx=pythoncom.CLSCTX_LOCAL_SERVER
)
# Request custom interfaces a number of times
prompt = [
"Testing QueryInterface without long-lived local-server #%d of %d...",
"Testing QueryInterface with long-lived local-server #%d of %d...",
]
for i in range(iterations):
progress(prompt[long_lived_server != 0] % (i + 1, iterations))
tester.TestQueryInterface()
class Tester(win32com.test.util.TestCase):
def testVTableInProc(self):
# We used to crash running this the second time - do it a few times
for i in range(3):
progress("Testing VTables in-process #%d..." % (i + 1))
TestVTable(pythoncom.CLSCTX_INPROC_SERVER)
def testVTableLocalServer(self):
for i in range(3):
progress("Testing VTables out-of-process #%d..." % (i + 1))
TestVTable(pythoncom.CLSCTX_LOCAL_SERVER)
def testVTable2(self):
for i in range(3):
TestVTable2()
def testVTableMI(self):
for i in range(3):
TestVTableMI()
def testMultiQueryInterface(self):
TestQueryInterface(0, 6)
# When we use the custom interface in the presence of a long-lived
# local server, i.e. a local server that is already running when
# we request an instance of our COM object, and remains afterwards,
# then after repeated requests to create an instance of our object
# the custom interface disappears -- i.e. QueryInterface fails with
# E_NOINTERFACE. Set the upper range of the following test to 2 to
# pass this test, i.e. TestQueryInterface(1,2)
TestQueryInterface(1, 6)
def testDynamic(self):
TestDynamic()
def testGenerated(self):
TestGenerated()
if __name__ == "__main__":
# XXX - todo - Complete hack to crank threading support.
# Should NOT be necessary
def NullThreadFunc():
pass
import _thread
_thread.start_new(NullThreadFunc, ())
if "-v" in sys.argv:
verbose = 1
win32com.test.util.testmain()
| 34.930012 | 268 | 0.646166 | [
"MIT"
] | AndresFPerezG/jarvisProject | env/Lib/site-packages/win32com/test/testPyComTest.py | 29,446 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name
# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements
"""Read individual image files and perform augmentations."""
from __future__ import absolute_import, print_function
import os
import random
import logging
import json
import warnings
import numpy as np
try:
import cv2
except ImportError:
cv2 = None
from ..base import numeric_types
from .. import ndarray as nd
from ..ndarray import _internal
from ..ndarray._internal import _cvimresize as imresize
from ..ndarray._internal import _cvcopyMakeBorder as copyMakeBorder
from .. import io
from .. import recordio
def imread(filename, *args, **kwargs):
"""Read and decode an image to an NDArray.
Note: `imread` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
filename : str
Name of the image file to be loaded.
flag : {0, 1}, default 1
1 for three channel color output. 0 for grayscale output.
to_rgb : bool, default True
True for RGB formatted output (MXNet default).
False for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> mx.img.imread("flower.jpg")
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> mx.img.imread("flower.jpg", flag=0)
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> mx.img.imread("flower.jpg", to_rgb=0)
<NDArray 224x224x3 @cpu(0)>
"""
return _internal._cvimread(filename, *args, **kwargs)
def imdecode(buf, *args, **kwargs):
"""Decode an image to an NDArray.
Note: `imdecode` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
buf : str/bytes or numpy.ndarray
Binary image data as string or numpy ndarray.
flag : int, optional, default=1
1 for three channel color output. 0 for grayscale output.
to_rgb : int, optional, default=1
1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, flag=0)
>>> image
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, to_rgb=0)
>>> image
<NDArray 224x224x3 @cpu(0)>
"""
if not isinstance(buf, nd.NDArray):
buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)
return _internal._cvimdecode(buf, *args, **kwargs)
def scale_down(src_size, size):
"""Scales down crop size if it's larger than image size.
If width/height of the crop is larger than the width/height of the image,
sets the width/height to the width/height of the image.
Parameters
----------
src_size : tuple of int
Size of the image in (width, height) format.
size : tuple of int
Size of the crop in (width, height) format.
Returns
-------
tuple of int
A tuple containing the scaled crop size in (width, height) format.
Example
--------
>>> src_size = (640,480)
>>> size = (720,120)
>>> new_size = mx.img.scale_down(src_size, size)
>>> new_size
(640,106)
"""
w, h = size
sw, sh = src_size
if sh < h:
w, h = float(w * sh) / h, sh
if sw < w:
w, h = sw, float(h * sw) / w
return int(w), int(h)
def _get_interp_method(interp, sizes=()):
"""Get the interpolation method for resize functions.
The major purpose of this function is to wrap a random interp method selection
and a auto-estimation method.
Parameters
----------
interp : int
interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
sizes : tuple of int
(old_height, old_width, new_height, new_width), if None provided, auto(9)
will return Area(2) anyway.
Returns
-------
int
interp method from 0 to 4
"""
if interp == 9:
if sizes:
assert len(sizes) == 4
oh, ow, nh, nw = sizes
if nh > oh and nw > ow:
return 2
elif nh < oh and nw < ow:
return 3
else:
return 1
else:
return 2
if interp == 10:
return random.randint(0, 4)
if interp not in (0, 1, 2, 3, 4):
raise ValueError('Unknown interp method %d' % interp)
return interp
def resize_short(src, size, interp=2):
"""Resizes shorter edge to size.
Note: `resize_short` uses OpenCV (not the CV2 Python library).
MXNet must have been built with OpenCV for `resize_short` to work.
Resizes the original image by setting the shorter edge to size
and setting the longer edge accordingly.
Resizing function is called from OpenCV.
Parameters
----------
src : NDArray
The original image.
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method used for resizing the image.
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
Returns
-------
NDArray
An 'NDArray' containing the resized image.
Example
-------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> size = 640
>>> new_image = mx.img.resize_short(image, size)
>>> new_image
<NDArray 2321x3482x3 @cpu(0)>
"""
h, w, _ = src.shape
if h > w:
new_h, new_w = size * h // w, size
else:
new_h, new_w = size, size * w // h
return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w)))
def fixed_crop(src, x0, y0, w, h, size=None, interp=2):
"""Crop src at fixed location, and (optionally) resize it to size.
Parameters
----------
src : NDArray
Input image
x0 : int
Left boundary of the cropping area
y0 : int
Top boundary of the cropping area
w : int
Width of the cropping area
h : int
Height of the cropping area
size : tuple of (w, h)
Optional, resize to new size after cropping
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
"""
out = nd.crop(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2])))
if size is not None and (w, h) != size:
sizes = (h, w, size[1], size[0])
out = imresize(out, *size, interp=_get_interp_method(interp, sizes))
return out
def random_crop(src, size, interp=2):
"""Randomly crop `src` with `size` (width, height).
Upsample result if `src` is smaller than `size`.
Parameters
----------
src: Source image `NDArray`
size: Size of the crop formatted as (width, height). If the `size` is larger
than the image, then the source image is upsampled to `size` and returned.
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
Example
-------
>>> im = mx.nd.array(cv2.imread("flower.jpg"))
>>> cropped_im, rect = mx.image.random_crop(im, (100, 100))
>>> print cropped_im
<NDArray 100x100x1 @cpu(0)>
>>> print rect
(20, 21, 100, 100)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def center_crop(src, size, interp=2):
"""Crops the image `src` to the given `size` by trimming on all four
sides and preserving the center of the image. Upsamples if `src` is smaller
than `size`.
.. note:: This requires MXNet to be compiled with USE_OPENCV.
Parameters
----------
src : NDArray
Binary source image data.
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
The cropped image.
Tuple
(x, y, width, height) where x, y are the positions of the crop in the
original image and width, height the dimensions of the crop.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.image.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))
>>> cropped_image
<NDArray 500x1000x3 @cpu(0)>
>>> x, y, width, height
(1241, 910, 1000, 500)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = int((w - new_w) / 2)
y0 = int((h - new_h) / 2)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def color_normalize(src, mean, std=None):
"""Normalize src with mean and std.
Parameters
----------
src : NDArray
Input image
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
Returns
-------
NDArray
An `NDArray` containing the normalized image.
"""
if mean is not None:
src -= mean
if std is not None:
src /= std
return src
def random_size_crop(src, size, area, ratio, interp=2, **kwargs):
"""Randomly crop src with size. Randomize area and aspect ratio.
Parameters
----------
src : NDArray
Input image
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
"""
h, w, _ = src.shape
src_area = h * w
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
area = kwargs.pop('min_area')
assert not kwargs, "unexpected keyword arguments for `random_size_crop`."
if isinstance(area, numeric_types):
area = (area, 1.0)
for _ in range(10):
target_area = random.uniform(area[0], area[1]) * src_area
new_ratio = random.uniform(*ratio)
new_w = int(round(np.sqrt(target_area * new_ratio)))
new_h = int(round(np.sqrt(target_area / new_ratio)))
if random.random() < 0.5:
new_h, new_w = new_w, new_h
if new_w <= w and new_h <= h:
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
# fall back to center_crop
return center_crop(src, size, interp)
class Augmenter(object):
"""Image Augmenter base class"""
def __init__(self, **kwargs):
self._kwargs = kwargs
for k, v in self._kwargs.items():
if isinstance(v, nd.NDArray):
v = v.asnumpy()
if isinstance(v, np.ndarray):
v = v.tolist()
self._kwargs[k] = v
def dumps(self):
"""Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter.
"""
return json.dumps([self.__class__.__name__.lower(), self._kwargs])
def __call__(self, src):
"""Abstract implementation body"""
raise NotImplementedError("Must override implementation.")
class SequentialAug(Augmenter):
"""Composing a sequential augmenter list.
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in sequential order.
"""
def __init__(self, ts):
super(SequentialAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
for aug in self.ts:
src = aug(src)
return src
class ResizeAug(Augmenter):
"""Make resize shorter edge to size augmenter.
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return resize_short(src, self.size, self.interp)
class ForceResizeAug(Augmenter):
"""Force resize to size regardless of aspect ratio
Parameters
----------
size : tuple of (int, int)
The desired size as in (width, height)
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ForceResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
sizes = (src.shape[0], src.shape[1], self.size[1], self.size[0])
return imresize(src, *self.size, interp=_get_interp_method(self.interp, sizes))
class RandomCropAug(Augmenter):
"""Make random crop augmenter
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(RandomCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return random_crop(src, self.size, self.interp)[0]
class RandomSizedCropAug(Augmenter):
"""Make random crop with random resizing and random aspect ratio jitter augmenter.
Parameters
----------
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, area, ratio, interp=2, **kwargs):
super(RandomSizedCropAug, self).__init__(size=size, area=area,
ratio=ratio, interp=interp)
self.size = size
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
self.area = kwargs.pop('min_area')
else:
self.area = area
self.ratio = ratio
self.interp = interp
assert not kwargs, "unexpected keyword arguments for `RandomSizedCropAug`."
def __call__(self, src):
"""Augmenter body"""
return random_size_crop(src, self.size, self.area, self.ratio, self.interp)[0]
class CenterCropAug(Augmenter):
"""Make center crop augmenter.
Parameters
----------
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(CenterCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return center_crop(src, self.size, self.interp)[0]
class RandomOrderAug(Augmenter):
"""Apply list of augmenters in random order
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in random order
"""
def __init__(self, ts):
super(RandomOrderAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
random.shuffle(self.ts)
for t in self.ts:
src = t(src)
return src
class BrightnessJitterAug(Augmenter):
"""Random brightness jitter augmentation.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
"""
def __init__(self, brightness):
super(BrightnessJitterAug, self).__init__(brightness=brightness)
self.brightness = brightness
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.brightness, self.brightness)
src *= alpha
return src
class ContrastJitterAug(Augmenter):
"""Random contrast jitter augmentation.
Parameters
----------
contrast : float
The contrast jitter ratio range, [0, 1]
"""
def __init__(self, contrast):
super(ContrastJitterAug, self).__init__(contrast=contrast)
self.contrast = contrast
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.contrast, self.contrast)
gray = src * self.coef
gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)
src *= alpha
src += gray
return src
class SaturationJitterAug(Augmenter):
"""Random saturation jitter augmentation.
Parameters
----------
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, saturation):
super(SaturationJitterAug, self).__init__(saturation=saturation)
self.saturation = saturation
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.saturation, self.saturation)
gray = src * self.coef
gray = nd.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src
class HueJitterAug(Augmenter):
"""Random hue jitter augmentation.
Parameters
----------
hue : float
The hue jitter ratio range, [0, 1]
"""
def __init__(self, hue):
super(HueJitterAug, self).__init__(hue=hue)
self.hue = hue
self.tyiq = np.array([[0.299, 0.587, 0.114],
[0.596, -0.274, -0.321],
[0.211, -0.523, 0.311]])
self.ityiq = np.array([[1.0, 0.956, 0.621],
[1.0, -0.272, -0.647],
[1.0, -1.107, 1.705]])
def __call__(self, src):
"""Augmenter body.
Using approximate linear transfomation described in:
https://beesbuzz.biz/code/hsv_color_transforms.php
"""
alpha = random.uniform(-self.hue, self.hue)
u = np.cos(alpha * np.pi)
w = np.sin(alpha * np.pi)
bt = np.array([[1.0, 0.0, 0.0],
[0.0, u, -w],
[0.0, w, u]])
t = np.dot(np.dot(self.ityiq, bt), self.tyiq).T
src = nd.dot(src, nd.array(t))
return src
class ColorJitterAug(RandomOrderAug):
"""Apply random brightness, contrast and saturation jitter in random order.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
contrast : float
The contrast jitter ratio range, [0, 1]
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, brightness, contrast, saturation):
ts = []
if brightness > 0:
ts.append(BrightnessJitterAug(brightness))
if contrast > 0:
ts.append(ContrastJitterAug(contrast))
if saturation > 0:
ts.append(SaturationJitterAug(saturation))
super(ColorJitterAug, self).__init__(ts)
class LightingAug(Augmenter):
"""Add PCA based noise.
Parameters
----------
alphastd : float
Noise level
eigval : 3x1 np.array
Eigen values
eigvec : 3x3 np.array
Eigen vectors
"""
def __init__(self, alphastd, eigval, eigvec):
super(LightingAug, self).__init__(alphastd=alphastd, eigval=eigval, eigvec=eigvec)
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, src):
"""Augmenter body"""
alpha = np.random.normal(0, self.alphastd, size=(3,))
rgb = np.dot(self.eigvec * alpha, self.eigval)
src += nd.array(rgb)
return src
class ColorNormalizeAug(Augmenter):
"""Mean and std normalization.
Parameters
----------
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
"""
def __init__(self, mean, std):
super(ColorNormalizeAug, self).__init__(mean=mean, std=std)
self.mean = mean if mean is None or isinstance(mean, nd.NDArray) else nd.array(mean)
self.std = std if std is None or isinstance(std, nd.NDArray) else nd.array(std)
def __call__(self, src):
"""Augmenter body"""
return color_normalize(src, self.mean, self.std)
class RandomGrayAug(Augmenter):
"""Randomly convert to gray image.
Parameters
----------
p : float
Probability to convert to grayscale
"""
def __init__(self, p):
super(RandomGrayAug, self).__init__(p=p)
self.p = p
self.mat = nd.array([[0.21, 0.21, 0.21],
[0.72, 0.72, 0.72],
[0.07, 0.07, 0.07]])
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.dot(src, self.mat)
return src
class HorizontalFlipAug(Augmenter):
"""Random horizontal flip.
Parameters
----------
p : float
Probability to flip image horizontally
"""
def __init__(self, p):
super(HorizontalFlipAug, self).__init__(p=p)
self.p = p
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.flip(src, axis=1)
return src
class CastAug(Augmenter):
"""Cast to float32"""
def __init__(self, typ='float32'):
super(CastAug, self).__init__(type=typ)
self.typ = typ
def __call__(self, src):
"""Augmenter body"""
src = src.astype(self.typ)
return src
def CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False,
mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0,
pca_noise=0, rand_gray=0, inter_method=2):
"""Creates an augmenter list.
Parameters
----------
data_shape : tuple of int
Shape for output data
resize : int
Resize shorter edge if larger than 0 at the begining
rand_crop : bool
Whether to enable random cropping other than center crop
rand_resize : bool
Whether to enable random sized cropping, require rand_crop to be enabled
rand_gray : float
[0, 1], probability to convert to grayscale for all channels, the number
of channels will not be reduced to 1
rand_mirror : bool
Whether to apply horizontal flip to image with probability 0.5
mean : np.ndarray or None
Mean pixel values for [r, g, b]
std : np.ndarray or None
Standard deviations for [r, g, b]
brightness : float
Brightness jittering range (percent)
contrast : float
Contrast jittering range (percent)
saturation : float
Saturation jittering range (percent)
hue : float
Hue jittering range (percent)
pca_noise : float
Pca noise level (percent)
inter_method : int, default=2(Area-based)
Interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
Examples
--------
>>> # An example of creating multiple augmenters
>>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,
... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,
... saturation=0.125, pca_noise=0.05, inter_method=10)
>>> # dump the details
>>> for aug in augs:
... aug.dumps()
"""
auglist = []
if resize > 0:
auglist.append(ResizeAug(resize, inter_method))
crop_size = (data_shape[2], data_shape[1])
if rand_resize:
assert rand_crop
auglist.append(RandomSizedCropAug(crop_size, 0.08, (3.0 / 4.0, 4.0 / 3.0), inter_method))
elif rand_crop:
auglist.append(RandomCropAug(crop_size, inter_method))
else:
auglist.append(CenterCropAug(crop_size, inter_method))
if rand_mirror:
auglist.append(HorizontalFlipAug(0.5))
auglist.append(CastAug())
if brightness or contrast or saturation:
auglist.append(ColorJitterAug(brightness, contrast, saturation))
if hue:
auglist.append(HueJitterAug(hue))
if pca_noise > 0:
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
auglist.append(LightingAug(pca_noise, eigval, eigvec))
if rand_gray > 0:
auglist.append(RandomGrayAug(rand_gray))
if mean is True:
mean = nd.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, (np.ndarray, nd.NDArray)) and mean.shape[0] in [1, 3]
if std is True:
std = nd.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, (np.ndarray, nd.NDArray)) and std.shape[0] in [1, 3]
if mean is not None or std is not None:
auglist.append(ColorNormalizeAug(mean, std))
return auglist
class ImageIter(io.DataIter):
"""Image data iterator with a large number of augmentation choices.
This iterator supports reading from both .rec files and raw image files.
To load input images from .rec files, use `path_imgrec` parameter and to load from raw image
files, use `path_imglist` and `path_root` parameters.
To use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter.
Parameters
----------
batch_size : int
Number of examples per batch.
data_shape : tuple
Data shape in (channels, height, width) format.
For now, only RGB image with 3 channels is supported.
label_width : int, optional
Number of labels per example. The default label width is 1.
path_imgrec : str
Path to image record file (.rec).
Created with tools/im2rec.py or bin/im2rec.
path_imglist : str
Path to image list (.lst).
Created with tools/im2rec.py or with custom script.
Format: Tab separated record of index, one or more labels and relative_path_from_root.
imglist: list
A list of images with the label(s).
Each item is a list [imagelabel: float or list of float, imgpath].
path_root : str
Root folder of image files.
path_imgidx : str
Path to image index file. Needed for partition and shuffling when using .rec source.
shuffle : bool
Whether to shuffle all images at the start of each iteration or not.
Can be slow for HDD.
part_index : int
Partition index.
num_parts : int
Total number of partitions.
data_name : str
Data name for provided symbols.
label_name : str
Label name for provided symbols.
dtype : str
Label data type. Default: float32. Other options: int32, int64, float64
last_batch_handle : str, optional
How to handle the last batch.
This parameter can be 'pad'(default), 'discard' or 'roll_over'.
If 'pad', the last batch will be padded with data starting from the begining
If 'discard', the last batch will be discarded
If 'roll_over', the remaining elements will be rolled over to the next iteration
kwargs : ...
More arguments for creating augmenter. See mx.image.CreateAugmenter.
"""
def __init__(self, batch_size, data_shape, label_width=1,
path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,
shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None,
data_name='data', label_name='softmax_label', dtype='float32',
last_batch_handle='pad', **kwargs):
super(ImageIter, self).__init__()
assert path_imgrec or path_imglist or (isinstance(imglist, list))
assert dtype in ['int32', 'float32', 'int64', 'float64'], dtype + ' label not supported'
num_threads = os.environ.get('MXNET_CPU_WORKER_NTHREADS', 1)
logging.info('Using %s threads for decoding...', str(num_threads))
logging.info('Set enviroment variable MXNET_CPU_WORKER_NTHREADS to a'
' larger number to use more threads.')
class_name = self.__class__.__name__
if path_imgrec:
logging.info('%s: loading recordio %s...',
class_name, path_imgrec)
if path_imgidx:
self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = list(self.imgrec.keys)
else:
self.imgrec = recordio.MXRecordIO(path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = None
else:
self.imgrec = None
if path_imglist:
logging.info('%s: loading image list %s...', class_name, path_imglist)
with open(path_imglist) as fin:
imglist = {}
imgkeys = []
for line in iter(fin.readline, ''):
line = line.strip().split('\t')
label = nd.array(line[1:-1], dtype=dtype)
key = int(line[0])
imglist[key] = (label, line[-1])
imgkeys.append(key)
self.imglist = imglist
elif isinstance(imglist, list):
logging.info('%s: loading image list...', class_name)
result = {}
imgkeys = []
index = 1
for img in imglist:
key = str(index) # pylint: disable=redefined-variable-type
index += 1
if len(img) > 2:
label = nd.array(img[:-1], dtype=dtype)
elif isinstance(img[0], numeric_types):
label = nd.array([img[0]], dtype=dtype)
else:
label = nd.array(img[0], dtype=dtype)
result[key] = (label, img[-1])
imgkeys.append(str(key))
self.imglist = result
else:
self.imglist = None
self.path_root = path_root
self.check_data_shape(data_shape)
self.provide_data = [(data_name, (batch_size,) + data_shape)]
if label_width > 1:
self.provide_label = [(label_name, (batch_size, label_width))]
else:
self.provide_label = [(label_name, (batch_size,))]
self.batch_size = batch_size
self.data_shape = data_shape
self.label_width = label_width
self.shuffle = shuffle
if self.imgrec is None:
self.seq = imgkeys
elif shuffle or num_parts > 1:
assert self.imgidx is not None
self.seq = self.imgidx
else:
self.seq = None
if num_parts > 1:
assert part_index < num_parts
N = len(self.seq)
C = N // num_parts
self.seq = self.seq[part_index * C:(part_index + 1) * C]
if aug_list is None:
self.auglist = CreateAugmenter(data_shape, **kwargs)
else:
self.auglist = aug_list
self.cur = 0
self._allow_read = True
self.last_batch_handle = last_batch_handle
self.num_image = len(self.seq) if self.seq is not None else None
self._cache_data = None
self._cache_label = None
self._cache_idx = None
self.reset()
def reset(self):
"""Resets the iterator to the beginning of the data."""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.last_batch_handle != 'roll_over' or \
self._cache_data is None:
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
if self._allow_read is False:
self._allow_read = True
def hard_reset(self):
"""Resets the iterator and ignore roll over data"""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
self._allow_read = True
self._cache_data = None
self._cache_label = None
self._cache_idx = None
def next_sample(self):
"""Helper function for reading in next sample."""
if self._allow_read is False:
raise StopIteration
if self.seq is not None:
if self.cur < self.num_image:
idx = self.seq[self.cur]
else:
if self.last_batch_handle != 'discard':
self.cur = 0
raise StopIteration
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
if self.imglist is None:
return header.label, img
else:
return self.imglist[idx][0], img
else:
label, fname = self.imglist[idx]
return label, self.read_image(fname)
else:
s = self.imgrec.read()
if s is None:
if self.last_batch_handle != 'discard':
self.imgrec.reset()
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img
def _batchify(self, batch_data, batch_label, start=0):
"""Helper function for batchifying data"""
i = start
batch_size = self.batch_size
try:
while i < batch_size:
label, s = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
data = self.augmentation_transform(data)
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
batch_data[i] = self.postprocess_data(data)
batch_label[i] = label
i += 1
except StopIteration:
if not i:
raise StopIteration
return i
def next(self):
"""Returns the next batch of data."""
batch_size = self.batch_size
c, h, w = self.data_shape
# if last batch data is rolled over
if self._cache_data is not None:
# check both the data and label have values
assert self._cache_label is not None, "_cache_label didn't have values"
assert self._cache_idx is not None, "_cache_idx didn't have values"
batch_data = self._cache_data
batch_label = self._cache_label
i = self._cache_idx
# clear the cache data
else:
batch_data = nd.empty((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
i = self._batchify(batch_data, batch_label)
# calculate the padding
pad = batch_size - i
# handle padding for the last batch
if pad != 0:
if self.last_batch_handle == 'discard':
raise StopIteration
# if the option is 'roll_over', throw StopIteration and cache the data
elif self.last_batch_handle == 'roll_over' and \
self._cache_data is None:
self._cache_data = batch_data
self._cache_label = batch_label
self._cache_idx = i
raise StopIteration
else:
_ = self._batchify(batch_data, batch_label, i)
if self.last_batch_handle == 'pad':
self._allow_read = False
else:
self._cache_data = None
self._cache_label = None
self._cache_idx = None
return io.DataBatch([batch_data], [batch_label], pad=pad)
def check_data_shape(self, data_shape):
"""Checks if the input data shape is valid"""
if not len(data_shape) == 3:
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if not data_shape[0] == 3:
raise ValueError('This iterator expects inputs to have 3 channels.')
def check_valid_image(self, data):
"""Checks if the input data is valid"""
if len(data[0].shape) == 0:
raise RuntimeError('Data shape is wrong')
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
def locate():
"""Locate the image file/index if decode fails."""
if self.seq is not None:
idx = self.seq[(self.cur % self.num_image) - 1]
else:
idx = (self.cur % self.num_image) - 1
if self.imglist is not None:
_, fname = self.imglist[idx]
msg = "filename: {}".format(fname)
else:
msg = "index: {}".format(idx)
return "Broken image " + msg
try:
img = imdecode(s)
except Exception as e:
raise RuntimeError("{}, {}".format(locate(), e))
return img
def read_image(self, fname):
"""Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
"""
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img
def augmentation_transform(self, data):
"""Transforms input data with specified augmentation."""
for aug in self.auglist:
data = aug(data)
return data
def postprocess_data(self, datum):
"""Final postprocessing step before image is loaded into the batch."""
return nd.transpose(datum, axes=(2, 0, 1))
| 33.763473 | 130 | 0.598031 | [
"Apache-2.0"
] | Vikas89/private-mxnet | python/mxnet/image/image.py | 45,108 | Python |
# coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'unversioned.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: v3.6.0-alpha.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import openshift.client
from kubernetes.client.rest import ApiException
from openshift.client.models.v1beta1_cpu_target_utilization import V1beta1CPUTargetUtilization
class TestV1beta1CPUTargetUtilization(unittest.TestCase):
""" V1beta1CPUTargetUtilization unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1CPUTargetUtilization(self):
"""
Test V1beta1CPUTargetUtilization
"""
model = openshift.client.models.v1beta1_cpu_target_utilization.V1beta1CPUTargetUtilization()
if __name__ == '__main__':
unittest.main()
| 99.023256 | 3,380 | 0.793565 | [
"Apache-2.0"
] | flaper87/openshift-restclient-python | openshift/test/test_v1beta1_cpu_target_utilization.py | 4,258 | Python |
import warnings
import mmcv
import numpy as np
import torch
from torch.nn.modules.utils import _pair
from mmdet.core.anchor.builder import ANCHOR_GENERATORS
from mmdet.core.anchor import AnchorGenerator
@ANCHOR_GENERATORS.register_module(force=True)
class SSDAnchorGenerator(AnchorGenerator):
"""Anchor generator for SSD
Args:
strides (list[int] | list[tuple[int, int]]): Strides of anchors
in multiple feature levels.
ratios (list[float]): The list of ratios between the height and width
of anchors in a single level.
basesize_ratio_range (tuple(float)): Ratio range of anchors.
input_size (int): Size of feature map, 300 for SSD300,
512 for SSD512.
scale_major (bool): Whether to multiply scales first when generating
base anchors. If true, the anchors in the same row will have the
same scales. It is always set to be False in SSD.
"""
def __init__(self,
strides,
ratios,
basesize_ratio_range,
input_size=300,
scale_major=True):
assert len(strides) == len(ratios)
assert mmcv.is_tuple_of(basesize_ratio_range, float)
self.strides = [_pair(stride) for stride in strides]
self.input_size = max(input_size) if isinstance(input_size, (list,tuple)) else input_size
self.centers = [(stride[0] / 2., stride[1] / 2.)
for stride in self.strides]
self.basesize_ratio_range = basesize_ratio_range
# calculate anchor ratios and sizes
min_ratio, max_ratio = basesize_ratio_range
min_ratio = int(min_ratio * 100)
max_ratio = int(max_ratio * 100)
step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2))
min_sizes = []
max_sizes = []
for ratio in range(int(min_ratio), int(max_ratio) + 1, step):
min_sizes.append(int(self.input_size * ratio / 100))
max_sizes.append(int(self.input_size * (ratio + step) / 100))
if self.input_size == 300:
if basesize_ratio_range[0] == 0.15: # SSD300 COCO
min_sizes.insert(0, int(self.input_size * 7 / 100))
max_sizes.insert(0, int(self.input_size * 15 / 100))
elif basesize_ratio_range[0] == 0.2: # SSD300 VOC
min_sizes.insert(0, int(self.input_size * 10 / 100))
max_sizes.insert(0, int(self.input_size * 20 / 100))
else:
min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4))
max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0]))
warnings.warn(
'according to original SSD, basesize_ratio_range[0] should be either 0.15'
'or 0.2 when input_size is 300, got '
f'{basesize_ratio_range[0]}.')
elif self.input_size == 512:
if basesize_ratio_range[0] == 0.1: # SSD512 COCO
min_sizes.insert(0, int(self.input_size * 4 / 100))
max_sizes.insert(0, int(self.input_size * 10 / 100))
elif basesize_ratio_range[0] == 0.15: # SSD512 VOC
min_sizes.insert(0, int(self.input_size * 7 / 100))
max_sizes.insert(0, int(self.input_size * 15 / 100))
else:
min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4))
max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0]))
warnings.warn('according to original SSD, basesize_ratio_range[0] should be either 0.1'
'or 0.15 when input_size is 512, got'
f' {basesize_ratio_range[0]}.')
else:
if basesize_ratio_range[0] == 0.1: # SSD512 COCO
min_sizes.insert(0, int(self.input_size * 4 / 100))
max_sizes.insert(0, int(self.input_size * 10 / 100))
else:
min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4))
max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0]))
anchor_ratios = []
anchor_scales = []
for k in range(len(self.strides)):
scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])]
anchor_ratio = [1.]
for r in ratios[k]:
anchor_ratio += [1 / r, r] # 4 or 6 ratio
anchor_ratios.append(torch.Tensor(anchor_ratio))
anchor_scales.append(torch.Tensor(scales))
self.base_sizes = min_sizes
self.scales = anchor_scales
self.ratios = anchor_ratios
self.scale_major = scale_major
self.center_offset = 0
self.base_anchors = self.gen_base_anchors()
# added for proto export
self.min_sizes = min_sizes
self.max_sizes = max_sizes
def gen_base_anchors(self):
"""Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple \
feature levels.
"""
multi_level_base_anchors = []
for i, base_size in enumerate(self.base_sizes):
base_anchors = self.gen_single_level_base_anchors(
base_size,
scales=self.scales[i],
ratios=self.ratios[i],
center=self.centers[i])
indices = list(range(len(self.ratios[i])))
indices.insert(1, len(indices))
base_anchors = torch.index_select(base_anchors, 0,
torch.LongTensor(indices))
multi_level_base_anchors.append(base_anchors)
return multi_level_base_anchors
def __repr__(self):
"""str: a string that describes the module"""
indent_str = ' '
repr_str = self.__class__.__name__ + '(\n'
repr_str += f'{indent_str}strides={self.strides},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}scale_major={self.scale_major},\n'
repr_str += f'{indent_str}input_size={self.input_size},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}ratios={self.ratios},\n'
repr_str += f'{indent_str}num_levels={self.num_levels},\n'
repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
repr_str += f'{indent_str}basesize_ratio_range='
repr_str += f'{self.basesize_ratio_range})'
return repr_str
| 45.770833 | 103 | 0.591716 | [
"BSD-3-Clause"
] | www516717402/edgeai-mmdetection | xmmdet/core/anchor/anchor_generator.py | 6,591 | Python |
from __future__ import absolute_import
"""This module offers a display and interaction frontend with Qt.
It will try importing PySide first, and if that fails PyQt. The code will
constantly be tested with both bindings."""
from .displaywidgets import DisplayWidget, NewDisplayWidget
from .control import ControlWidget
#from .mainwin import ZasimMainWindow
display_objects = []
class ZasimDisplay(object):
simulator = None
"""The `Simulator` object for this display."""
display = None
"""The `BaseDisplayWidget` in use."""
window = None
"""The `ZasimMainWindow` instance in use."""
control = None
"""The `ControlWidget` in use."""
def __init__(self, simulator):
"""Instantiate a Display (thas is: a window with a display widget and
simulation controls) from a simulator.
:param simulator: The simulator to use."""
self.simulator = simulator
if not self.display:
if 'tiles' in self.simulator.palette_info:
self.display = NewDisplayWidget(self.simulator)
else:
self.display = DisplayWidget(self.simulator)
if self.control is None:
self.control = ControlWidget(self.simulator)
from .mainwin import ZasimMainWindow
self.window = ZasimMainWindow(self.simulator, self.display, self.control)
display_objects.append(self.window)
self.window.show()
def set_scale(self, scale):
"""Sets the scale of the display component."""
self.display.set_scale(scale)
| 28.981481 | 81 | 0.671565 | [
"BSD-3-Clause"
] | timo/zasim | zasim/gui/display.py | 1,565 | Python |
import json
import pytest
from typing import ClassVar, Dict, List, Sequence, Tuple, Union
from kat.harness import sanitize, variants, Query, Runner
from abstract_tests import AmbassadorTest, HTTP, AHTTP
from abstract_tests import MappingTest, OptionTest, ServiceType, Node, Test
class LogServiceTest(AmbassadorTest):
def init(self):
self.extra_ports = [25565]
self.target = HTTP()
def manifests(self) -> str:
return self.format("""
---
apiVersion: v1
kind: Service
metadata:
name: stenography
spec:
selector:
app: stenography
ports:
- port: 25565
name: http
targetPort: http
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: stenography
spec:
selector:
matchLabels:
app: stenography
replicas: 1
strategy:
type: RollingUpdate
template:
metadata:
labels:
app: stenography
spec:
containers:
- name: stenography
image: securityinsanity/stenography:latest
env:
- name: PORT
value: "25565"
ports:
- name: http
containerPort: 25565
""") + super().manifests()
def config(self):
yield self, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: LogService
name: custom-http-logging
service: stenography:25565
driver: http
driver_config:
additional_log_headers:
- header_name: "included-on-all"
- header_name: "not-included-on-trailer"
during_trailer: false
- header_name: "not-included on resp-trail"
during_trailer: false
during_response: false
- header_name: "not-anywhere"
during_trailer: false
during_response: false
during_request: false
flush_interval_time: 1
flush_interval_byte_size: 1
""")
yield self, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
name: config__dump
hostname: "*"
prefix: /config_dump
rewrite: /config_dump
service: http://127.0.0.1:8001
""")
def requirements(self):
yield from super().requirements()
yield ("url", Query(self.url("config_dump")))
def queries(self):
yield Query(self.url("config_dump"), phase=2)
def check(self):
found_bootstrap_dump = False
found_clusters_dump = False
found_listeners_dump = False
body = json.loads(self.results[0].body)
for config_obj in body.get('configs'):
if config_obj.get('@type') == 'type.googleapis.com/envoy.admin.v3.BootstrapConfigDump':
found_bootstrap_dump = True
clusters = config_obj.get('bootstrap').get('static_resources').get('clusters')
found_stenography = False
assert len(clusters) > 0, "No clusters found"
for cluster in clusters:
if cluster.get('name') == 'cluster_logging_stenography_25565_default':
found_stenography = True
break
assert found_stenography
if config_obj.get('@type') == 'type.googleapis.com/envoy.admin.v3.ClustersConfigDump':
found_clusters_dump = True
clusters = config_obj.get('static_clusters')
found_stenography = False
assert len(clusters) > 0, "No clusters found"
for cluster in clusters:
if cluster.get('cluster').get('name') == 'cluster_logging_stenography_25565_default':
found_stenography = True
break
assert found_stenography
if config_obj.get('@type') == 'type.googleapis.com/envoy.admin.v3.ListenersConfigDump':
found_listeners_dump = True
for listener in config_obj.get('dynamic_listeners'):
for filter_chain in listener.get('active_state').get('listener').get('filter_chains'):
for filter_obj in filter_chain.get('filters'):
access_logs = filter_obj.get('typed_config').get('access_log')
found_configured_access_log = False
assert len(
access_logs) > 0, "No access log configurations found in any listeners filter chains"
for access_log in access_logs:
if access_log.get('name') == 'envoy.access_loggers.http_grpc' and access_log.get(
'typed_config').get('common_config').get('grpc_service').get('envoy_grpc').get(
'cluster_name') == 'cluster_logging_stenography_25565_default':
found_configured_access_log = True
break
assert found_configured_access_log
assert found_listeners_dump, "Could not find listeners config dump. Did the config dump endpoint work? Did we change Envoy API versions?"
assert found_clusters_dump, "Could not find clusters config dump. Did the config dump endpoint work? Did we change Envoy API versions?"
assert found_bootstrap_dump, "Could not find bootstrap config dump. Did the config dump endpoint work? Did we change Envoy API versions?"
class LogServiceTestLongServiceName(AmbassadorTest):
def init(self):
self.extra_ports = [25565]
self.target = HTTP()
def manifests(self) -> str:
return self.format("""
---
apiVersion: v1
kind: Service
metadata:
name: stenographylongservicenamewithnearly60characterss
spec:
selector:
app: stenography-longservicename
ports:
- port: 25565
name: http
targetPort: http
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: stenography-longservicename
spec:
selector:
matchLabels:
app: stenography-longservicename
replicas: 1
strategy:
type: RollingUpdate
template:
metadata:
labels:
app: stenography-longservicename
spec:
containers:
- name: stenography
image: securityinsanity/stenography:latest
env:
- name: PORT
value: "25565"
ports:
- name: http
containerPort: 25565
""") + super().manifests()
def config(self):
yield self, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: LogService
name: custom-http-logging
service: stenographylongservicenamewithnearly60characterss:25565
driver: http
driver_config:
additional_log_headers:
- header_name: "included-on-all"
- header_name: "not-included-on-trailer"
during_trailer: false
- header_name: "not-included on resp-trail"
during_trailer: false
during_response: false
- header_name: "not-anywhere"
during_trailer: false
during_response: false
during_request: false
flush_interval_time: 1
flush_interval_byte_size: 1
""")
yield self, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
name: config__dump-longservicename
hostname: "*"
prefix: /config_dump
rewrite: /config_dump
service: http://127.0.0.1:8001
""")
def requirements(self):
yield from super().requirements()
yield ("url", Query(self.url("config_dump")))
def queries(self):
yield Query(self.url("config_dump"), phase=2)
def check(self):
found_bootstrap_dump = False
found_clusters_dump = False
found_listeners_dump = False
body = json.loads(self.results[0].body)
for config_obj in body.get('configs'):
if config_obj.get('@type') == 'type.googleapis.com/envoy.admin.v3.BootstrapConfigDump':
found_bootstrap_dump = True
clusters = config_obj.get('bootstrap').get('static_resources').get('clusters')
found_stenography = False
assert len(clusters) > 0, "No clusters found"
for cluster in clusters:
if cluster.get('name') == 'cluster_logging_stenographylongservicena-0':
found_stenography = True
break
assert found_stenography
if config_obj.get('@type') == 'type.googleapis.com/envoy.admin.v3.ClustersConfigDump':
found_clusters_dump = True
clusters = config_obj.get('static_clusters')
found_stenography = False
assert len(clusters) > 0, "No clusters found"
for cluster in clusters:
if cluster.get('cluster').get('name') == 'cluster_logging_stenographylongservicena-0':
found_stenography = True
break
assert found_stenography
if config_obj.get('@type') == 'type.googleapis.com/envoy.admin.v3.ListenersConfigDump':
found_listeners_dump = True
for listener in config_obj.get('dynamic_listeners'):
for filter_chain in listener.get('active_state').get('listener').get('filter_chains'):
for filter_obj in filter_chain.get('filters'):
access_logs = filter_obj.get('typed_config').get('access_log')
found_configured_access_log = False
assert len(
access_logs) > 0, "No access log configurations found in any listeners filter chains"
for access_log in access_logs:
if access_log.get('name') == 'envoy.access_loggers.http_grpc' and access_log.get(
'typed_config').get('common_config').get('grpc_service').get('envoy_grpc').get(
'cluster_name') == 'cluster_logging_stenographylongservicena-0':
found_configured_access_log = True
break
assert found_configured_access_log
assert found_listeners_dump, "Could not find listeners config dump. Did the config dump endpoint work? Did we change Envoy API versions?"
assert found_clusters_dump, "Could not find clusters config dump. Did the config dump endpoint work? Did we change Envoy API versions?"
assert found_bootstrap_dump, "Could not find bootstrap config dump. Did the config dump endpoint work? Did we change Envoy API versions?"
| 36.333333 | 145 | 0.616017 | [
"Apache-2.0"
] | DoodleScheduling/emissary | python/tests/kat/t_logservice.py | 10,464 | Python |
import typing
from typing import Dict, Union, Tuple, Iterator, Any
from typing import Optional
import numpy as np
import torch
from gym.utils import seeding
from advisor_losses import AlphaScheduler, AdvisorWeightedStage
from allenact.algorithms.offpolicy_sync.losses.abstract_offpolicy_loss import (
AbstractOffPolicyLoss,
)
from allenact.algorithms.onpolicy_sync.policy import ActorCriticModel
from allenact.base_abstractions.misc import Memory
_DATASET_CACHE: Dict[str, Any] = {}
class PoisonedDoorsOffPolicyExpertCELoss(AbstractOffPolicyLoss[ActorCriticModel]):
def __init__(self, total_episodes_in_epoch: Optional[int] = None):
super().__init__()
self.total_episodes_in_epoch = total_episodes_in_epoch
def loss(
self,
model: ActorCriticModel,
batch: Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]],
memory: Memory,
*args,
**kwargs
) -> Tuple[torch.FloatTensor, Dict[str, float], Memory, int]:
rollout_len, nrollouts, _, = batch["poisoned_door_state"].shape
observations = {}
for k in ["poisoned_door_state"]:
if k in batch:
observations[k] = batch[k].view(
rollout_len, nrollouts, *batch[k].shape[2:]
)
ac_out, memory = model.forward(
observations=observations,
memory=memory,
prev_actions=None,
masks=batch["masks"],
)
expert_ce_loss = -ac_out.distributions.log_prob(
batch["expert_action"].view(rollout_len, nrollouts, 1)
).mean()
info = {"expert_ce": expert_ce_loss.item()}
if self.total_episodes_in_epoch is not None:
if "completed_episode_count" not in memory:
memory["completed_episode_count"] = 0
memory["completed_episode_count"] += (
int(np.prod(batch["masks"].shape)) - batch["masks"].sum().item()
)
info["epoch_progress"] = (
memory["completed_episode_count"] / self.total_episodes_in_epoch
)
return expert_ce_loss, info, memory, rollout_len * nrollouts
class PoisonedDoorsOffPolicyAdvisorLoss(AbstractOffPolicyLoss[ActorCriticModel]):
def __init__(
self,
total_episodes_in_epoch: Optional[int] = None,
fixed_alpha: Optional[float] = 1,
fixed_bound: Optional[float] = 0.0,
alpha_scheduler: AlphaScheduler = None,
smooth_expert_weight_decay: Optional[float] = None,
*args,
**kwargs
):
super().__init__()
self.advisor_loss = AdvisorWeightedStage(
rl_loss=None,
fixed_alpha=fixed_alpha,
fixed_bound=fixed_bound,
alpha_scheduler=alpha_scheduler,
smooth_expert_weight_decay=smooth_expert_weight_decay,
*args,
**kwargs
)
self.total_episodes_in_epoch = total_episodes_in_epoch
def loss(
self,
step_count: int,
model: ActorCriticModel,
batch: Dict[str, Union[torch.Tensor, Dict[str, torch.Tensor]]],
memory: Memory,
**kwargs
) -> Tuple[torch.FloatTensor, Dict[str, float], Memory, int]:
rollout_len, nrollouts, _ = batch["poisoned_door_state"].shape
observations = {"poisoned_door_state": batch["poisoned_door_state"]}
ac_out, memory = model.forward(
observations=observations,
memory=memory,
prev_actions=None,
masks=batch["masks"].view(rollout_len, nrollouts, -1),
)
total_loss, losses_dict = self.advisor_loss.loss(
step_count=step_count,
batch={
"observations": {
"expert_action": torch.cat(
(
batch["expert_action"].view(rollout_len, nrollouts, 1),
torch.ones(rollout_len, nrollouts, 1, dtype=torch.int64).to(
batch["expert_action"].device
),
),
dim=-1,
)
}
},
actor_critic_output=ac_out,
)
info = {"offpolicy_" + key: val for key, val in losses_dict.items()}
if self.total_episodes_in_epoch is not None:
if "completed_episode_count" not in memory:
memory["completed_episode_count"] = 0
memory["completed_episode_count"] += (
int(np.prod(batch["masks"].shape)) - batch["masks"].sum().item()
)
info["epoch_progress"] = (
memory["completed_episode_count"] / self.total_episodes_in_epoch
)
return total_loss, info, memory, rollout_len * nrollouts
class PoisonedDoorsExpertTrajectoryIterator(Iterator):
def __init__(
self, num_doors: int, nrollouts: int, rollout_len: int, dataset_size: int,
):
super(PoisonedDoorsExpertTrajectoryIterator, self).__init__()
self.np_seeded_random_gen, _ = typing.cast(
Tuple[np.random.RandomState, Any], seeding.np_random(0)
)
self.ndoors = num_doors
self.nrollouts = nrollouts
self.rollout_len = rollout_len
self.dataset_size = dataset_size
self.initial_observations = np.zeros(
(rollout_len, nrollouts, 1), dtype=np.int64
)
self.mask = np.zeros((rollout_len, nrollouts, 1), dtype=np.float32)
self.expert_actions = np.random.randint(
4, 3 + num_doors, size=(self.dataset_size, 1)
)
self.current_ind = 0
def __next__(self) -> Dict[str, torch.Tensor]:
start = self.current_ind
end = self.current_ind + self.nrollouts * self.rollout_len
if end > self.dataset_size:
raise StopIteration()
self.current_ind = end
return {
"masks": torch.from_numpy(self.mask),
"poisoned_door_state": torch.from_numpy(self.initial_observations),
"expert_action": torch.from_numpy(
self.expert_actions[start:end].reshape(
(self.rollout_len, self.nrollouts)
)
),
}
def create_poisoneddoors_offpolicy_data_iterator(
num_doors: int, nrollouts: int, rollout_len: int, dataset_size: int,
) -> PoisonedDoorsExpertTrajectoryIterator:
return PoisonedDoorsExpertTrajectoryIterator(
num_doors=num_doors,
nrollouts=nrollouts,
rollout_len=rollout_len,
dataset_size=dataset_size,
)
| 33.497487 | 88 | 0.60156 | [
"Apache-2.0"
] | allenai/advisor | poisoneddoors_plugin/poisoneddoors_offpolicy.py | 6,666 | Python |
from django.contrib import admin
from django.db.models import get_model
ConditionalOffer = get_model('offer', 'ConditionalOffer')
Condition = get_model('offer', 'Condition')
Benefit = get_model('offer', 'Benefit')
Range = get_model('offer', 'Range')
class ConditionAdmin(admin.ModelAdmin):
list_display = ('type', 'value', 'range')
class BenefitAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'type', 'value', 'range')
class ConditionalOfferAdmin(admin.ModelAdmin):
list_display = ('name', 'offer_type', 'start_date', 'end_date', 'condition', 'benefit', 'total_discount')
list_filter = ('offer_type',)
readonly_fields = ('total_discount', 'num_orders')
fieldsets = (
(None, {
'fields': ('name', 'description', 'offer_type', 'condition', 'benefit', 'start_date', 'end_date', 'priority')
}),
('Usage', {
'fields': ('total_discount', 'num_orders')
}),
)
admin.site.register(ConditionalOffer, ConditionalOfferAdmin)
admin.site.register(Condition, ConditionAdmin)
admin.site.register(Benefit, BenefitAdmin)
admin.site.register(Range)
| 31.611111 | 121 | 0.674868 | [
"BSD-3-Clause"
] | endgame/django-oscar | oscar/apps/offer/admin.py | 1,138 | Python |
import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
class ResizeImages3D(function_node.FunctionNode):
def __init__(self, output_shape):
self.out_H = output_shape[0]
self.out_W = output_shape[1]
self.out_D = output_shape[2]
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.char == 'f',
x_type.ndim == 5
)
def forward(self, inputs):
x, = inputs
xp = cuda.get_array_module(x)
B, C, H, W, D = x.shape
u_1d = xp.linspace(0, W - 1, num=self.out_W)
v_1d = xp.linspace(0, H - 1, num=self.out_H)
t_1d = xp.linspace(0, D - 1, num=self.out_D)
grid = xp.meshgrid(u_1d, v_1d, t_1d)
u = grid[0].ravel()
v = grid[1].ravel()
t = grid[2].ravel()
u0 = xp.floor(u).astype(numpy.int32)
u0 = u0.clip(0, W - 2)
u1 = u0 + 1
v0 = xp.floor(v).astype(numpy.int32)
v0 = v0.clip(0, H - 2)
v1 = v0 + 1
t0 = xp.floor(t).astype(numpy.int32)
t0 = t0.clip(0, D - 2)
t1 = t0 + 1
# weights
w1 = (u1 - u) * (v1 - v) * (t1 - t)
w2 = (u - u0) * (v1 - v) * (t1 - t)
w3 = (u1 - u) * (v - v0) * (t1 - t)
w4 = (u - u0) * (v - v0) * (t1 - t)
w5 = (u1 - u) * (v1 - v) * (t - t0)
w6 = (u - u0) * (v1 - v) * (t - t0)
w7 = (u1 - u) * (v - v0) * (t - t0)
w8 = (u - u0) * (v - v0) * (t - t0)
w1 = w1.astype(x.dtype)
w2 = w2.astype(x.dtype)
w3 = w3.astype(x.dtype)
w4 = w4.astype(x.dtype)
w5 = w5.astype(x.dtype)
w6 = w6.astype(x.dtype)
w7 = w7.astype(x.dtype)
w8 = w8.astype(x.dtype)
y = (w1[None, None, :] * x[:, :, v0, u0, t0] +
w2[None, None, :] * x[:, :, v0, u1, t0] +
w3[None, None, :] * x[:, :, v1, u0, t0] +
w4[None, None, :] * x[:, :, v1, u1, t0] +
w5[None, None, :] * x[:, :, v0, u0, t1] +
w6[None, None, :] * x[:, :, v0, u1, t1] +
w7[None, None, :] * x[:, :, v1, u0, t1] +
w8[None, None, :] * x[:, :, v1, u1, t1])
y = y.reshape(B, C, self.out_H, self.out_W, self.out_D)
return y,
def backward(self, indexes, grad_outputs):
return ResizeImagesGrad3D(
self.inputs[0].shape,
(self.out_H, self.out_W, self.out_D)).apply(grad_outputs)
class ResizeImagesGrad3D(function_node.FunctionNode):
def __init__(self, input_shape, output_shape):
self.out_H = output_shape[0]
self.out_W = output_shape[1]
self.out_D = output_shape[2]
self.input_shape = input_shape
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.char == 'f',
x_type.ndim == 5
)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
gy, = inputs
B, C, H, W, D = self.input_shape
u_1d = xp.linspace(0, W - 1, num=self.out_W)
v_1d = xp.linspace(0, H - 1, num=self.out_H)
t_1d = xp.linspace(0, D - 1, num=self.out_D)
grid = xp.meshgrid(u_1d, v_1d, t_1d)
u = grid[0].ravel()
v = grid[1].ravel()
t = grid[2].ravel()
u0 = xp.floor(u).astype(numpy.int32)
u0 = u0.clip(0, W - 2)
u1 = u0 + 1
v0 = xp.floor(v).astype(numpy.int32)
v0 = v0.clip(0, H - 2)
v1 = v0 + 1
t0 = xp.floor(t).astype(numpy.int32)
t0 = t0.clip(0, D - 2)
t1 = t0 + 1
# weights
wu0 = u - u0
wu1 = u1 - u
wv0 = v - v0
wv1 = v1 - v
wt0 = t - t0
wt1 = t1 - t
wu0 = wu0.astype(gy.dtype)
wu1 = wu1.astype(gy.dtype)
wv0 = wv0.astype(gy.dtype)
wv1 = wv1.astype(gy.dtype)
wt0 = wt0.astype(gy.dtype)
wt1 = wt1.astype(gy.dtype)
# --- gx
if xp is numpy:
scatter_add = numpy.add.at
else:
scatter_add = cuda.cupyx.scatter_add
gx = xp.zeros(self.input_shape, dtype=gy.dtype)
gy = gy.reshape(B, C, -1)
scatter_add(gx, (slice(None), slice(None), v0, u0, t0),
gy * wu1 * wv1 * wt1)
scatter_add(gx, (slice(None), slice(None), v0, u1, t0),
gy * wu0 * wv1 * wt1)
scatter_add(gx, (slice(None), slice(None), v1, u0, t0),
gy * wu1 * wv0 * wt1)
scatter_add(gx, (slice(None), slice(None), v1, u1, t0),
gy * wu0 * wv0 * wt1)
scatter_add(gx, (slice(None), slice(None), v0, u0, t1),
gy * wu1 * wv1 * wt0)
scatter_add(gx, (slice(None), slice(None), v0, u1, t1),
gy * wu0 * wv1 * wt0)
scatter_add(gx, (slice(None), slice(None), v1, u0, t1),
gy * wu1 * wv0 * wt0)
scatter_add(gx, (slice(None), slice(None), v1, u1, t1),
gy * wu0 * wv0 * wt0)
return gx,
def backward(self, indexes, grad_outputs):
return ResizeImages3D(
(self.out_H, self.out_W, self.out_D)).apply(grad_outputs)
def resize_images_3d(x, output_shape):
"""Resize images to the given shape.
This function resizes 3D data to :obj:`output_shape`.
Currently, only bilinear interpolation is supported as the sampling method.
Notatition: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` is the number of the input channels.
- :math:`h`, :math:`w` and :math:`d` are the height, width and depth of the
input image, respectively.
- :math:`h_O`, :math:`w_O` and :math:`d_0` are the height, width and depth
of the output image.
Args:
x (~chainer.Variable):
Input variable of shape :math:`(n, c_I, h, w, d)`.
output_shape (tuple):
This is a tuple of length 3 whose values are :obj:`(h_O, w_O, d_O)`.
Returns:
~chainer.Variable: Resized image whose shape is \
:math:`(n, c_I, h_O, w_O, d_O)`.
"""
return ResizeImages3D(output_shape).apply((x,))[0]
| 33.549738 | 79 | 0.508583 | [
"MIT"
] | pfnet-research/label-efficient-brain-tumor-segmentation | src/links/model/resize_images_3d.py | 6,408 | Python |
# coding=utf-8
'''
author: ShiLei Miao
analyses and build model about NBA
'''
import numpy as np
from numpy import *
import pandas as pd
from pandas import *
import os
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import KFold
from sklearn import metrics
os.chdir(r'E:\PycharmProjects\Rong360\dta')
def loadDataSetT(path):
data = pd.read_csv(path)
dataSet = data.values[0:,2:]
dataLabel = data.values[0:,1:2]
return dataSet,dataLabel
def transLabel(Mat_Labels):
labels = []
for item in Mat_Labels:
labels.append(item[0])
labels = array(labels)
return labels
def P_YYYY(N_train, target_train, N_test, target_test):
clf = RandomForestClassifier(n_estimators=300, random_state=520341, max_depth=9,\
min_samples_split=3, class_weight='balanced_subsample')
clf = clf.fit(N_train, target_train)
pred = clf.predict_proba(N_test)
pred = DataFrame(pred)[0].values
N_auc = metrics.roc_auc_score(target_test, 1 - pred)
print N_auc
print '\n'
return N_auc, clf
def preds_calculate(Mat_Train,Mat_Labels):
kf = KFold(len(Mat_Train), n_folds=10)
NN_auc = []
for train_index, test_index in kf:
X_train, X_test = Mat_Train[train_index], Mat_Train[test_index]
y_train, y_test = Mat_Labels[train_index], Mat_Labels[test_index]
N_auc, clf = P_YYYY(X_train, y_train, X_test, y_test)
NN_auc.append(N_auc)
mean_auc = mean(NN_auc)
print 'AUC均值:',mean_auc
return mean_auc, clf
# 训练集
S_train_user_info = pd.read_csv(r'Generate_dta\S_train_user_info.csv')
N_train_user_info = pd.read_csv(r'Generate_dta\N_train_user_info.csv').drop(['lable'],axis=1)
relation1_train = pd.read_csv(r'Generate_dta\0909relation1_train.csv')
relation2_train = pd.read_csv(r'Generate_dta\0909relation2_train.csv')
N_train_consumption1 = pd.read_csv(r'Generate_dta\N_train_consumption1.csv').drop(['lable'],axis=1)
t_consumption = pd.read_csv(r'Generate_dta\t_consumption.csv')
#rong_tag 没有使用 【下面的数据是one-hot后的特征】
rong_tag_train = pd.read_csv(r'Generate_dta\N_rong_tag_train.csv').drop(['lable'],axis=1)
N_rong_tag_train_var = pd.read_excel(r'Stat_importance_var.xls')
N_rong_tag_train_var = N_rong_tag_train_var[N_rong_tag_train_var['Importance']>10]
N_rong_tag_train = rong_tag_train.reindex(columns = N_rong_tag_train_var['Feature'].values)
N_rong_tag_train['user_id'] = rong_tag_train['user_id']
N_rong_tag_train = N_rong_tag_train.replace([None], [-1])
train = merge(S_train_user_info,N_train_user_info,how="left", left_on='user_id', right_on='user_id')
train = merge(train,relation1_train,how="left", left_on='user_id', right_on='user_id')
train = merge(train,relation2_train,how="left", left_on='user_id', right_on='user_id')
train = merge(train,N_train_consumption1,how="left", left_on='user_id', right_on='user_id')
train = merge(train,t_consumption,how="left", left_on='user_id', right_on='user_id')
train = train.replace([None], [-1])
train['category_null'] = (train<0).sum(axis=1)
## 在统计的train跟test缺失的情况后,选择剔除用户的特征缺失个数为187的【基本都是product_id=2】
train = train[train['category_null'] < 187]
train = DataFrame(train.values,columns=train.columns)
train = merge(train,N_rong_tag_train,how="left", left_on='user_id', right_on='user_id')
Mat_Train = train.drop(['user_id','lable','category_null'],axis=1)
Mat_Train = array(Mat_Train)
Mat_Label = train['lable'].astype(int)
mean_auc, clf = preds_calculate(Mat_Train,Mat_Label)
| 33.537037 | 101 | 0.719216 | [
"MIT"
] | finlay-liu/rong360-dataanalysis2016 | Procedure/2_M1/train/m2-cv-rf.py | 3,732 | Python |
__author__ = 'miguel.freitas@checkmarx.com'
import os
import sys
import argparse
import pyodbc
import json
import array
DB = "CxDB"
def is_str(string):
return string is not None and isinstance(string, str) and len(string) > 0
def is_int(integer):
return not isinstance(integer, bool) and isinstance(integer, int)
def is_conn(conn):
return conn is not None and isinstance(conn, pyodbc.Connection)
def read_file(filename):
if is_str(filename):
if filename.endswith(".json"):
try:
filename = os.path.basename(filename)
if os.path.isfile(filename):
if os.access(filename, os.R_OK):
with open(filename, 'rb') as f:
return json.load(f)
else:
raise PermissionError("You don't have \
permissions to access this file")
else:
raise FileNotFoundError("File Not Found")
except FileNotFoundError:
raise FileNotFoundError("File Not Found")
else:
raise AttributeError("File should have \".json\" extension")
else:
raise AttributeError("No Filename provided")
def connect_to_db(driver, server, user, password, database):
if is_str(driver) and \
is_str(server) and \
is_str(user) and \
is_str(password) and \
is_str(database):
try:
conn = pyodbc.connect(
'DRIVER={' + driver + '};SERVER=' + server +
';DATABASE=' + database +
';UID=' + user +
';PWD=' + password,
timeout=3)
print("Connection to", database, "success")
return conn
except pyodbc.OperationalError or \
pyodbc.InterfaceError or \
pyodbc.Error as error:
raise ConnectionError(error)
else:
raise AttributeError(
"server | user | password | database were not provided")
def get_category_type_id_by_name(conn, category_type_name):
if is_conn(conn) and is_str(category_type_name):
cursor = conn.cursor()
category_type_id = -1
cursor.execute(
"SELECT id,Typename FROM dbo.CategoriesTypes WHERE TypeName=?",
category_type_name)
rows = cursor.fetchall()
if len(rows) > 0:
for row in rows:
category_type_id = row[0]
return category_type_id
else:
return category_type_id
else:
raise AttributeError(
"Connection object or Category Name \
was not provided")
def add_category_type_by_name(conn, category_type_name):
if is_conn(conn) and is_str(category_type_name):
cursor = conn.cursor()
cursor.execute("SET IDENTITY_INSERT dbo.CategoriesTypes ON")
conn.commit()
cursor.execute(
"INSERT INTO dbo.CategoriesTypes (Id, Typename) \
VALUES((SELECT max(Id)+1 FROM dbo.CategoriesTypes), ?)",
category_type_name)
conn.commit()
cursor.execute("SET IDENTITY_INSERT dbo.CategoriesTypes OFF")
conn.commit()
return True
else:
raise AttributeError(
"Connection object or Category Name \
was not provided")
def check_category_type_by_name(conn, category_type_name):
if is_conn(conn) and is_str(category_type_name):
category_type_id = get_category_type_id_by_name(
conn, category_type_name)
if category_type_id == -1:
print("Category Type ", category_type_name, " does not exist.")
add_category_type_by_name(conn, category_type_name)
category_type_id = get_category_type_id_by_name(
conn, category_type_name)
print("Creating category type :",
category_type_name, "- ID:", category_type_id)
else:
print("Category already exists :",
category_type_name, "- ID:", category_type_id)
return category_type_id
else:
raise AttributeError(
"Connection object or Category Name \
was not provided")
def delete_categories_by_category_type_id(conn, category_type_id):
if is_conn(conn) and is_int(category_type_id):
cursor = conn.cursor()
cursor.execute(
"DELETE FROM dbo.Categories WHERE CategoryType=?",
category_type_id)
conn.commit()
else:
raise AttributeError(
"Connection object or Category Type ID \
was not provided")
def delete_categories_for_queries_by_category_type_id(conn, category_type_id):
if is_conn(conn) and is_int(category_type_id):
cursor = conn.cursor()
cursor.execute(
"DELETE FROM dbo.CategoryForQuery WHERE CategoryId \
IN (SELECT id FROM dbo.Categories WHERE CategoryType=?)",
category_type_id)
conn.commit()
else:
raise AttributeError(
"Connection object or Category Type ID \
was not provided")
def clean_old_data(conn, category_type_id):
if is_conn(conn) and is_int(category_type_id):
delete_categories_for_queries_by_category_type_id(
conn, category_type_id)
delete_categories_by_category_type_id(conn, category_type_id)
print("Clearing old data...")
else:
raise AttributeError(
"Connection object or Category Type ID \
was not provided")
def add_category(conn, category_name, category_type_id):
if is_conn(conn) and is_str(category_name) and is_int(category_type_id):
cursor = conn.cursor()
cursor.execute("SET IDENTITY_INSERT dbo.Categories ON")
conn.commit()
cursor.execute("INSERT INTO dbo.Categories (Id, CategoryName,CategoryType) \
VALUES((SELECT max(Id)+1 FROM dbo.Categories),?,?)",
(category_name, category_type_id))
conn.commit()
cursor.execute("SET IDENTITY_INSERT dbo.Categories OFF")
conn.commit()
return True
else:
raise AttributeError(
"Connection object or Category Name or Category Type ID \
was not provided")
def get_category_id(conn, category_name, category_type_id):
if is_conn(conn) and is_str(category_name) and is_int(category_type_id):
cursor = conn.cursor()
cursor.execute("SELECT Id FROM dbo.Categories WHERE \
CategoryName=? AND CategoryType=?",
(category_name, category_type_id))
return cursor.fetchall()[0][0]
else:
raise AttributeError(
"Connection object or Category Name or Category Type ID \
was not provided")
def add_category_for_query(conn, category_id, query_id):
if is_conn(conn) and is_int(category_id) and is_int(query_id):
cursor = conn.cursor()
cursor.execute("SET IDENTITY_INSERT dbo.CategoryForQuery ON")
conn.commit()
cursor.execute(
"INSERT INTO dbo.CategoryForQuery (Id,QueryId,CategoryId) \
VALUES((SELECT max(Id)+1 FROM dbo.CategoryForQuery),?,?)",
(query_id, category_id))
conn.commit()
cursor.execute("SET IDENTITY_INSERT dbo.CategoryForQuery OFF")
conn.commit()
return True
else:
raise AttributeError(
"Connection object or Category ID or Query ID \
was not provided")
def get_categories_by_category_type_id_and_name(conn,
category_name,
category_type_id):
if is_conn(conn) and is_int(category_type_id) and is_str(category_name):
cursor = conn.cursor()
cursor.execute("SELECT * FROM dbo.Categories WHERE \
CategoryName=? AND CategoryType=?",
category_name, category_type_id)
rows = cursor.fetchall()
return rows
else:
raise AttributeError(
"Connection object or Category ID or Query ID \
was not provided")
def insert_new_categories(conn, category_type_id, group):
if is_conn(conn) and is_int(category_type_id):
if "name" in group:
category_name = group["name"]
add_category(conn, category_name, category_type_id)
category = get_categories_by_category_type_id_and_name(
conn, category_name, category_type_id)
print("\nNew Category Inserted : ", category[0])
category_id = category[0][0]
return category_id
else:
raise AttributeError(
"Connection object or Category Type ID \
was not provided")
def get_queries(conn, query_ids_list):
if is_conn(conn) and len(query_ids_list) > 0:
sanitized_list = []
for queryId in query_ids_list:
if is_int(queryId):
sanitized_list.append(queryId)
query_ids = str(sanitized_list).strip('[]')
if len(query_ids) > 0:
cursor = conn.cursor()
cursor.execute(
"SELECT * FROM dbo.Query WHERE QueryId IN (" + query_ids + ")")
return cursor.fetchall()
else:
raise AttributeError("Connection object or Query List \
was not provided")
def get_categories_ids_by_category_type(conn, category_type_id):
if is_conn(conn) and is_int(category_type_id):
cursor = conn.cursor()
cursor.execute(
"SELECT [Id] FROM dbo.Categories where CategoryType=?",
category_type_id)
rows = cursor.fetchall()
arr = array.array('i')
for row in rows:
category_id = row[0]
arr.append(category_id)
return arr
else:
raise AttributeError(
"Connection object or Category Type ID \
was not provided")
def insert_queries(conn, category_id, severity_id, queries):
if is_conn(conn) and is_int(category_id) and \
is_int(severity_id) and len(queries) > 0:
cursor = conn.cursor()
cursor.execute("SET IDENTITY_INSERT dbo.CategoryForQuery ON")
conn.commit()
i = 0
for query in queries:
query_id = query[0]
percentage = round((i * 100) / len(queries), 0)
print("Inserting Query", query_id, "...", percentage, "%")
cursor.execute("INSERT INTO dbo.CategoryForQuery \
(Id, QueryId,CategoryId) VALUES\
((SELECT max(Id)+1 FROM dbo.CategoryForQuery), ?,?)",
(query_id, category_id))
conn.commit()
update_customized_queries(conn, category_id, severity_id,
query_id)
i = i + 1
cursor.execute("SET IDENTITY_INSERT dbo.CategoryForQuery OFF")
conn.commit()
else:
raise AttributeError(
"Connection object or Category ID \
was not provided")
def get_args(args):
if isinstance(args, list) and len(args) > 0:
args_parser = argparse.ArgumentParser(
description='Add Custom Category to CxDB')
args_parser.add_argument(
'-dbd', '--dbdriver', help='Checkmarx MSSQL DB Driver',
required=False,
default="SQL Server")
args_parser.add_argument(
'-dbu', '--dbuser', help='Checkmarx MSSQL DB Username',
required=True)
args_parser.add_argument('-dbp', '--dbpassword',
help='Checkmarx MSSQL DB Password',
required=True)
args_parser.add_argument('-dbs', '--dbserver',
help='Checkmarx MSSQL DB Server URL',
required=True)
args_parser.add_argument('-fg', '--file_groups',
help='Categories and Queries Mapping File',
required=True)
return args_parser.parse_args(args)
else:
raise AttributeError("args should be a non-empty array")
def update_category_severity_mapping(conn, severity_id,
category_name, group_name):
if is_conn(conn) and is_int(severity_id) and is_str(category_name) and \
is_str(group_name):
cursor = conn.cursor()
cursor.execute("UPDATE Queries \
SET Queries.Severity = ? \
FROM dbo.Categories Categories \
JOIN dbo.CategoryForQuery CategoriesForQuery \
ON Categories.Id=CategoriesForQuery.CategoryId \
JOIN dbo.Query Queries \
ON CategoriesForQuery.QueryId=Queries.QueryId \
JOIN dbo.CategoriesTypes CategoriesTypes \
ON Categories.CategoryType = CategoriesTypes.Id \
WHERE Categories.CategoryName = ? \
AND CategoriesTypes.TypeName = ?",
(severity_id, group_name, category_name))
conn.commit()
cursor.execute("UPDATE QueryVersions \
SET QueryVersions.Severity = ? \
FROM dbo.Categories Categories \
JOIN dbo.CategoryForQuery CategoriesForQuery \
ON Categories.Id=CategoriesForQuery.CategoryId \
JOIN dbo.QueryVersion QueryVersions \
ON CategoriesForQuery.QueryId=QueryVersions.QueryId \
JOIN dbo.CategoriesTypes CategoriesTypes \
ON Categories.CategoryType = CategoriesTypes.Id \
WHERE Categories.CategoryName = ? \
AND CategoriesTypes.TypeName = ?",
(severity_id, group_name, category_name))
conn.commit()
print("Updating Severity Mapping for Severity", severity_id,
"-", group_name, "-", category_name)
else:
raise AttributeError(
"Connection object was not provided")
def update_customized_queries(conn, category_id, severity_id, query_id):
if is_conn(conn) and is_int(category_id) and is_int(severity_id) \
and is_int(query_id):
cursor = conn.cursor()
cursor.execute("SELECT QueryId FROM dbo.Query \
WHERE PackageId IN (\
SELECT DISTINCT PackageId FROM dbo.QueryGroup \
WHERE Name = (\
SELECT Name FROM dbo.QueryGroup \
WHERE PackageId = (\
SELECT DISTINCT PackageId FROM dbo.Query \
WHERE QueryId = ?)\
) and PackageId > 100000\
) and Name = (\
SELECT DISTINCT Name FROM dbo.Query \
WHERE QueryId = ?)",
(query_id, query_id))
customized_queries_list = cursor.fetchall()
if len(customized_queries_list) > 0:
for customized_query in customized_queries_list:
cursor.execute("INSERT INTO dbo.CategoryForQuery \
(Id, QueryId,CategoryId) VALUES\
((SELECT max(Id)+1 FROM dbo.CategoryForQuery), ?,?)",
(customized_query[0], category_id))
conn.commit()
cursor.execute("UPDATE dbo.QueryVersion SET Severity = ? \
WHERE QueryId IN (\
SELECT QueryId FROM dbo.QueryVersion \
WHERE PackageId IN (\
SELECT DISTINCT PackageId FROM dbo.QueryGroup \
WHERE Name = (\
SELECT Name FROM dbo.QueryGroup \
WHERE PackageId = (\
SELECT DISTINCT PackageId FROM dbo.QueryVersion \
WHERE QueryId = ?)\
) and PackageId > 100000\
) and Name = (\
SELECT DISTINCT Name FROM dbo.QueryVersion \
WHERE QueryId = ?)\
)",
(severity_id, query_id, query_id))
conn.commit()
cursor.execute("UPDATE dbo.Query SET Severity = ? \
WHERE QueryId IN (\
SELECT QueryId FROM dbo.Query \
WHERE PackageId IN (\
SELECT DISTINCT PackageId FROM dbo.QueryGroup \
WHERE Name = (\
SELECT Name FROM dbo.QueryGroup \
WHERE PackageId = (\
SELECT DISTINCT PackageId FROM dbo.Query \
WHERE QueryId = ?)\
) and PackageId > 100000\
) and Name = (\
SELECT DISTINCT Name FROM dbo.Query \
WHERE QueryId = ?)\
)",
(severity_id, query_id, query_id))
conn.commit()
print("Updating Customized Queries Severity", severity_id,
"- Query ID -", query_id)
else:
print("No existing customized queries for", query_id)
return True
else:
raise AttributeError(
"Connection object bwas not provided")
def main(args):
if args is not None and hasattr(args, "file_groups"):
file_groups = args.file_groups
if is_str(file_groups):
file_content = read_file(file_groups)
category = file_content["category"]
category_name = category["name"]
groups = category["groups"]
if hasattr(args, "dbdriver") and \
hasattr(args, "dbserver") and \
hasattr(args, "dbuser") and \
hasattr(args, "dbpassword"):
db_server = args.dbserver
db_user = args.dbuser
db_pwd = args.dbpassword
db_driver = args.dbdriver
if is_str(db_driver) and \
is_str(db_server) and \
is_str(db_user) and \
is_str(db_pwd):
conn = connect_to_db(
db_driver, db_server, db_user, db_pwd, DB)
if is_conn(conn):
category_type_id = check_category_type_by_name(
conn, category_name)
clean_old_data(conn, category_type_id)
for group in groups:
category_id = insert_new_categories(
conn, category_type_id, group)
if "query_ids" in group and "name" in group and \
"severity_id" in group:
severity_id = group["severity_id"]
group_name = group["name"]
queries = get_queries(conn, group["query_ids"])
print(group_name, ":", len(queries),
"queries to change")
insert_queries(conn, category_id,
severity_id, queries)
update_category_severity_mapping(
conn,
severity_id,
category_name,
group_name)
else:
print("Group has 1 missing attribute: name\
query_ids or severity_id")
else:
raise Exception("Cannot connect to Database")
else:
raise Exception(
"db_server | db_user | db_pwd \
are not valid strings")
else:
raise Exception(
"db_server | db_user | db_pwd \
was not provided as an argument")
else:
raise TypeError("file_groups is not a string")
else:
raise AttributeError("args does not has file_groups as attribute")
if __name__ == "__main__":
main(get_args(sys.argv[1:]))
| 39.265504 | 84 | 0.554069 | [
"MIT"
] | cxpsemea/CxAddCustomCategory | add_custom_category.py | 20,261 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
class Post(models.Model):
status_ITEMS = (
(1, '上线'),
(2, '草稿'),
(3, '删除'),
)
title = models.CharField(max_length=50, verbose_name='标题')
desc = models.CharField(max_length=255, blank=True, verbose_name='摘要')
category = models.ForeignKey('Category', verbose_name='分类')
tags = models.ManyToManyField('Tag', related_name="posts", verbose_name='标签')
content = models.TextField(verbose_name='内容', help_text='注:目前仅支持Markdown格式')
status = models.PositiveIntegerField(default=1, choices=status_ITEMS, verbose_name='状态')
owner = models.ForeignKey(User, verbose_name='作者')
created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
lasted_update_time = models.DateTimeField(auto_now=True, verbose_name='更新时间')
def status_show(self):
return '当前状态:%s'%(self.status)
status_show.short_description = '展示站台'
def __unicode__(self):
return self.title
class Meta:
verbose_name = verbose_name_plural = '文章'
class Category(models.Model):
status_ITEMS = (
(1, '可用'),
(2, '删除'),
)
name = models.CharField(max_length=50,verbose_name='名称')
status = models.PositiveIntegerField(default=1, choices=status_ITEMS, verbose_name='状态')
owner = models.ForeignKey(User, verbose_name='作者')
is_nav = models.BooleanField(default=False, verbose_name="是否为导航")
created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
# parent = models.ForeignKey('Category', verbose_name='分类')
def __unicode__(self):
return self.name
class Meta:
verbose_name = verbose_name_plural = '分类'
class Tag(models.Model):
status_ITEMS= (
(1, '正常'),
(2, '删除'),
)
name = models.CharField(max_length=50,verbose_name='名称')
status = models.PositiveIntegerField(default=1, choices=status_ITEMS, verbose_name='状态')
owner = models.ForeignKey(User, verbose_name='作者')
created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
def __unicode__(self):
return self.name
class Meta:
verbose_name = verbose_name_plural = '标签'
| 28.878378 | 89 | 0.732335 | [
"MIT"
] | liangtaos/typeidea | typeidea/blog/models.py | 2,307 | Python |
#!/usr/bin/env python2.7
# Advanced Multi-Mission Operations System (AMMOS) Instrument Toolkit (AIT)
# Bespoke Link to Instruments and Small Satellites (BLISS)
#
# Copyright 2016, by the California Institute of Technology. ALL RIGHTS
# RESERVED. United States Government Sponsorship acknowledged. Any
# commercial use must be negotiated with the Office of Technology Transfer
# at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws. By accepting
# this software, the user agrees to comply with all applicable U.S. export
# laws and regulations. User has the responsibility to obtain export licenses,
# or other export authority as may be required before exporting such
# information to foreign countries or providing access to foreign persons.
import time
import datetime
import mock
import os
import os.path
import nose
import nose.tools
import ait.core
from ait.core import dmc
LEAPSECOND_DATA_RESPONSE = '''#
# Updated through IERS Bulletin C55
# File expires on: 28 December 2018
#
#@ 3754944000
#
2272060800 10 # 1 Jan 1972
2287785600 11 # 1 Jul 1972
2303683200 12 # 1 Jan 1973
2335219200 13 # 1 Jan 1974
2366755200 14 # 1 Jan 1975
2398291200 15 # 1 Jan 1976
2429913600 16 # 1 Jan 1977
2461449600 17 # 1 Jan 1978
2492985600 18 # 1 Jan 1979
2524521600 19 # 1 Jan 1980
2571782400 20 # 1 Jul 1981
2603318400 21 # 1 Jul 1982
2634854400 22 # 1 Jul 1983
2698012800 23 # 1 Jul 1985
2776982400 24 # 1 Jan 1988
2840140800 25 # 1 Jan 1990
2871676800 26 # 1 Jan 1991
2918937600 27 # 1 Jul 1992
2950473600 28 # 1 Jul 1993
2982009600 29 # 1 Jul 1994
3029443200 30 # 1 Jan 1996
3076704000 31 # 1 Jul 1997
'''
class MockResponse:
def __init__(self, text, status_code):
self.text = text
self.status_code = status_code
def test_getTimestampUTC():
expected = time.strftime('%Y-%j', time.gmtime())
actual = time.strftime('%Y-%j', time.gmtime(dmc.getTimestampUTC()[0]))
assert actual == expected
def test_getUTCDatetimeDOY_w_days():
days = 1
t = datetime.datetime.utcnow() + datetime.timedelta(days=days)
timestamp = t.timetuple()
exp_year = timestamp.tm_year
exp_day = '%03d' % timestamp.tm_yday
dtime = dmc.getUTCDatetimeDOY(days=days).split('T')[0].split('-')
assert str(exp_year) == dtime[0]
assert str(exp_day) == dtime[1]
def test_leap_second_attrs():
ait.config.leapseconds._config['filename'] = os.path.join(
os.path.dirname(__file__), "testdata", "dmc", "leapseconds.dat"
)
ls = dmc.LeapSeconds
ls._load_leap_second_data()
assert ls.leapseconds == ls._data['leapseconds']
assert ls.valid_date == ls._data['valid']
assert ls.get_current_GPS_offset() == ls.leapseconds[-1][-1]
@nose.tools.raises(ValueError)
def test_leap_second_by_date_invalid_gps_date():
ait.config.leapseconds._config['filename'] = os.path.join(
os.path.dirname(__file__), "testdata", "dmc", "leapseconds.dat"
)
dmc.LeapSeconds._load_leap_second_data()
dmc.LeapSeconds.get_GPS_offset_for_date(datetime.datetime(1980, 1, 1))
def test_leap_second_by_date():
ait.config.leapseconds._config['filename'] = os.path.join(
os.path.dirname(__file__), "testdata", "dmc", "leapseconds.dat"
)
ls = dmc.LeapSeconds
ls._load_leap_second_data()
assert ls.get_GPS_offset_for_date(datetime.datetime(1981, 1, 1)) == 0
assert ls.get_GPS_offset_for_date(datetime.datetime(1981, 7, 1)) == 1
assert ls.get_GPS_offset_for_date(datetime.datetime(1982, 7, 1)) == 2
assert ls.get_GPS_offset_for_date(datetime.datetime(1983, 7, 1)) == 3
assert ls.get_GPS_offset_for_date(datetime.datetime(1985, 7, 1)) == 4
assert ls.get_GPS_offset_for_date(datetime.datetime(1988, 1, 1)) == 5
assert ls.get_GPS_offset_for_date(datetime.datetime(1990, 1, 1)) == 6
assert ls.get_GPS_offset_for_date(datetime.datetime(1991, 1, 1)) == 7
assert ls.get_GPS_offset_for_date(datetime.datetime(1992, 7, 1)) == 8
assert ls.get_GPS_offset_for_date(datetime.datetime(1993, 7, 1)) == 9
assert ls.get_GPS_offset_for_date(datetime.datetime(1994, 7, 1)) == 10
assert ls.get_GPS_offset_for_date(datetime.datetime(1996, 1, 1)) == 11
assert ls.get_GPS_offset_for_date(datetime.datetime(1997, 7, 1)) == 12
assert ls.get_GPS_offset_for_date(datetime.datetime(1999, 1, 1)) == 13
assert ls.get_GPS_offset_for_date(datetime.datetime(2006, 1, 1)) == 14
assert ls.get_GPS_offset_for_date(datetime.datetime(2009, 1, 1)) == 15
assert ls.get_GPS_offset_for_date(datetime.datetime(2012, 7, 1)) == 16
assert ls.get_GPS_offset_for_date(datetime.datetime(2015, 7, 1)) == 17
assert ls.get_GPS_offset_for_date(datetime.datetime(2017, 1, 1)) == 18
# Make sure not supplying a date returns the offset for the current date
assert (ls.get_GPS_offset_for_date(datetime.datetime.utcnow()) ==
ls.get_GPS_offset_for_date())
def test_leap_second_data_load():
ait.config.leapseconds._config['filename'] = os.path.join(
os.path.dirname(__file__), "testdata", "dmc", "leapseconds.dat"
)
assert type(dmc.LeapSeconds.leapseconds) == type([])
assert dmc.LeapSeconds.leapseconds[0] == (datetime.datetime(1981, 7, 1), 1)
assert type(dmc.LeapSeconds.valid_date) == type(datetime.datetime.now())
@nose.tools.raises(ValueError)
@mock.patch('requests.get', mock.MagicMock(return_value=MockResponse(LEAPSECOND_DATA_RESPONSE, 400)))
def test_failed_leapsecond_load_and_update():
ait.config.leapseconds._config['filename'] = os.path.join(
os.path.dirname(__file__), "invalidpath", "leapseconds.dat"
)
dmc.LeapSeconds._data = None
dmc.LeapSeconds._load_leap_second_data()
@mock.patch('requests.get', mock.MagicMock(return_value=MockResponse(LEAPSECOND_DATA_RESPONSE, 200)))
def test_update_leap_second_data():
ait.config.leapseconds._config['filename'] = os.path.join(
os.path.dirname(__file__), "testdata", "dmc", "tmp_leapseconds.out"
)
dmc.LeapSeconds._data = None
dmc.LeapSeconds._update_leap_second_data()
assert type(dmc.LeapSeconds.leapseconds) == type([])
assert dmc.LeapSeconds.leapseconds[0] == (datetime.datetime(1981, 7, 1), 1)
assert type(dmc.LeapSeconds.valid_date) == type(datetime.datetime.now())
assert os.path.isfile(ait.config.leapseconds.filename)
os.remove(ait.config.leapseconds.filename)
@nose.tools.raises(ValueError)
@mock.patch('requests.get', mock.MagicMock(return_value=MockResponse(LEAPSECOND_DATA_RESPONSE, 400)))
def test_unable_to_pull_leapsecond_data():
ait.config.leapseconds._config['filename'] = os.path.join(
os.path.dirname(__file__), "testdata", "dmc", "tmp_leapseconds.out"
)
dmc.LeapSeconds._data = None
dmc.LeapSeconds._update_leap_second_data()
if __name__ == '__main__':
nose.main()
| 38.324022 | 101 | 0.726531 | [
"MIT"
] | nttoole/AIT-Core | ait/core/test/test_dmc.py | 6,860 | Python |
# Copyright 2018 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numpy.random import randn
import tensorflow as tf
import pytest
import gpflow
from gpflow import logdensities, settings
from gpflow.test_util import session_tf
from scipy.stats import multivariate_normal as mvn
from numpy.testing import assert_allclose
rng = np.random.RandomState(1)
@pytest.mark.parametrize("x", [randn(4,10), randn(4,1)])
@pytest.mark.parametrize("mu", [randn(4,10), randn(4,1)])
@pytest.mark.parametrize("cov_sqrt", [randn(4,4), np.eye(4)])
def test_multivariate_normal(session_tf, x, mu, cov_sqrt):
cov = np.dot(cov_sqrt, cov_sqrt.T)
L = np.linalg.cholesky(cov)
x_tf = tf.placeholder(settings.float_type)
mu_tf = tf.placeholder(settings.float_type)
gp_result = logdensities.multivariate_normal(
x_tf, mu_tf, tf.convert_to_tensor(L))
gp_result = session_tf.run(gp_result, feed_dict={x_tf: x, mu_tf: mu})
if mu.shape[1] > 1:
if x.shape[1] > 1:
sp_result = [mvn.logpdf(x[:,i], mu[:,i], cov) for i in range(mu.shape[1])]
else:
sp_result = [mvn.logpdf(x.ravel(), mu[:, i], cov) for i in range(mu.shape[1])]
else:
sp_result = mvn.logpdf(x.T, mu.ravel(), cov)
assert_allclose(gp_result, sp_result)
def test_shape_asserts(session_tf):
A = np.random.randn(5)
B = np.random.randn(5)
L = np.tril(np.random.randn(5, 5))
# Static shape check:
with pytest.raises(ValueError):
tA = tf.identity(A)
tB = tf.identity(B)
tL = tf.identity(L)
res = logdensities.multivariate_normal(tA, tB, tL)
# Dynamic shape check:
# the following results in a segfault before PR#964
with pytest.raises(tf.errors.InvalidArgumentError):
vA = tf.placeholder(tf.float64)
vB = tf.placeholder(tf.float64)
vL = tf.placeholder(tf.float64)
res = logdensities.multivariate_normal(vA, vB, vL)
session_tf.run(res, {vA: A, vB: B, vL: L})
| 35.013889 | 90 | 0.688616 | [
"Apache-2.0"
] | a-z-e-r-i-l-a/GPflow | tests/test_logdensities.py | 2,521 | Python |
# -*- coding: utf-8 -*-
"""
wsproto/handshake
~~~~~~~~~~~~~~~~~~
An implementation of WebSocket handshakes.
"""
from collections import deque
from typing import Deque, Dict, Generator, List, Optional, Union
import h11
from .connection import Connection, ConnectionState, ConnectionType
from .events import AcceptConnection, Event, RejectConnection, RejectData, Request
from .extensions import Extension
from .typing import Headers
from .utilities import (
generate_accept_token,
generate_nonce,
LocalProtocolError,
normed_header_dict,
RemoteProtocolError,
split_comma_header,
)
# RFC6455, Section 4.2.1/6 - Reading the Client's Opening Handshake
WEBSOCKET_VERSION = b"13"
class H11Handshake:
"""A Handshake implementation for HTTP/1.1 connections."""
def __init__(self, connection_type: ConnectionType) -> None:
self.client = connection_type is ConnectionType.CLIENT
self._state = ConnectionState.CONNECTING
if self.client:
self._h11_connection = h11.Connection(h11.CLIENT)
else:
self._h11_connection = h11.Connection(h11.SERVER)
self._connection: Optional[Connection] = None
self._events: Deque[Event] = deque()
self._initiating_request: Optional[Request] = None
self._nonce: Optional[bytes] = None
@property
def state(self) -> ConnectionState:
return self._state
@property
def connection(self) -> Optional[Connection]:
"""Return the established connection.
This will either return the connection or raise a
LocalProtocolError if the connection has not yet been
established.
:rtype: h11.Connection
"""
return self._connection
def initiate_upgrade_connection(self, headers: Headers, path: str) -> None:
"""Initiate an upgrade connection.
This should be used if the request has already be received and
parsed.
:param list headers: HTTP headers represented as a list of 2-tuples.
:param str path: A URL path.
"""
if self.client:
raise LocalProtocolError(
"Cannot initiate an upgrade connection when acting as the client"
)
upgrade_request = h11.Request(method=b"GET", target=path, headers=headers)
h11_client = h11.Connection(h11.CLIENT)
self.receive_data(h11_client.send(upgrade_request))
def send(self, event: Event) -> bytes:
"""Send an event to the remote.
This will return the bytes to send based on the event or raise
a LocalProtocolError if the event is not valid given the
state.
:returns: Data to send to the WebSocket peer.
:rtype: bytes
"""
data = b""
if isinstance(event, Request):
data += self._initiate_connection(event)
elif isinstance(event, AcceptConnection):
data += self._accept(event)
elif isinstance(event, RejectConnection):
data += self._reject(event)
elif isinstance(event, RejectData):
data += self._send_reject_data(event)
else:
raise LocalProtocolError(
"Event {} cannot be sent during the handshake".format(event)
)
return data
def receive_data(self, data: bytes) -> None:
"""Receive data from the remote.
A list of events that the remote peer triggered by sending
this data can be retrieved with :meth:`events`.
:param bytes data: Data received from the WebSocket peer.
"""
self._h11_connection.receive_data(data)
while True:
try:
event = self._h11_connection.next_event()
except h11.RemoteProtocolError:
raise RemoteProtocolError(
"Bad HTTP message", event_hint=RejectConnection()
)
if (
isinstance(event, h11.ConnectionClosed)
or event is h11.NEED_DATA
or event is h11.PAUSED
):
break
if self.client:
if isinstance(event, h11.InformationalResponse):
if event.status_code == 101:
self._events.append(self._establish_client_connection(event))
else:
self._events.append(
RejectConnection(
headers=event.headers,
status_code=event.status_code,
has_body=False,
)
)
self._state = ConnectionState.CLOSED
elif isinstance(event, h11.Response):
self._state = ConnectionState.REJECTING
self._events.append(
RejectConnection(
headers=event.headers,
status_code=event.status_code,
has_body=True,
)
)
elif isinstance(event, h11.Data):
self._events.append(
RejectData(data=event.data, body_finished=False)
)
elif isinstance(event, h11.EndOfMessage):
self._events.append(RejectData(data=b"", body_finished=True))
self._state = ConnectionState.CLOSED
else:
if isinstance(event, h11.Request):
self._events.append(self._process_connection_request(event))
def events(self) -> Generator[Event, None, None]:
"""Return a generator that provides any events that have been generated
by protocol activity.
:returns: a generator that yields H11 events.
"""
while self._events:
yield self._events.popleft()
############ Server mode methods
def _process_connection_request(self, event: h11.Request) -> Request:
if event.method != b"GET":
raise RemoteProtocolError(
"Request method must be GET", event_hint=RejectConnection()
)
connection_tokens = None
extensions: List[str] = []
host = None
key = None
subprotocols: List[str] = []
upgrade = b""
version = None
headers: Headers = []
for name, value in event.headers:
name = name.lower()
if name == b"connection":
connection_tokens = split_comma_header(value)
elif name == b"host":
host = value.decode("ascii")
continue # Skip appending to headers
elif name == b"sec-websocket-extensions":
extensions = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-key":
key = value
elif name == b"sec-websocket-protocol":
subprotocols = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-version":
version = value
elif name == b"upgrade":
upgrade = value
headers.append((name, value))
if connection_tokens is None or not any(
token.lower() == "upgrade" for token in connection_tokens
):
raise RemoteProtocolError(
"Missing header, 'Connection: Upgrade'", event_hint=RejectConnection()
)
if version != WEBSOCKET_VERSION:
raise RemoteProtocolError(
"Missing header, 'Sec-WebSocket-Version'",
event_hint=RejectConnection(
headers=[(b"Sec-WebSocket-Version", WEBSOCKET_VERSION)],
status_code=426,
),
)
if key is None:
raise RemoteProtocolError(
"Missing header, 'Sec-WebSocket-Key'", event_hint=RejectConnection()
)
if upgrade.lower() != b"websocket":
raise RemoteProtocolError(
"Missing header, 'Upgrade: WebSocket'", event_hint=RejectConnection()
)
if version is None:
raise RemoteProtocolError(
"Missing header, 'Sec-WebSocket-Version'", event_hint=RejectConnection()
)
self._initiating_request = Request(
extensions=extensions,
extra_headers=headers,
host=host,
subprotocols=subprotocols,
target=event.target.decode("ascii"),
)
return self._initiating_request
def _accept(self, event: AcceptConnection) -> bytes:
request_headers = normed_header_dict(self._initiating_request.extra_headers)
nonce = request_headers[b"sec-websocket-key"]
accept_token = generate_accept_token(nonce)
headers = [
(b"Upgrade", b"WebSocket"),
(b"Connection", b"Upgrade"),
(b"Sec-WebSocket-Accept", accept_token),
]
if event.subprotocol is not None:
if event.subprotocol not in self._initiating_request.subprotocols:
raise LocalProtocolError(
"unexpected subprotocol {}".format(event.subprotocol)
)
headers.append(
(b"Sec-WebSocket-Protocol", event.subprotocol.encode("ascii"))
)
if event.extensions:
accepts = server_extensions_handshake( # type: ignore
self._initiating_request.extensions, event.extensions
)
if accepts:
headers.append((b"Sec-WebSocket-Extensions", accepts))
response = h11.InformationalResponse(
status_code=101, headers=headers + event.extra_headers
)
self._connection = Connection(
ConnectionType.CLIENT if self.client else ConnectionType.SERVER,
event.extensions,
)
self._state = ConnectionState.OPEN
return self._h11_connection.send(response)
def _reject(self, event: RejectConnection) -> bytes:
if self.state != ConnectionState.CONNECTING:
raise LocalProtocolError(
"Connection cannot be rejected in state %s" % self.state
)
headers = event.headers
if not event.has_body:
headers.append((b"content-length", b"0"))
response = h11.Response(status_code=event.status_code, headers=headers)
data = self._h11_connection.send(response)
self._state = ConnectionState.REJECTING
if not event.has_body:
data += self._h11_connection.send(h11.EndOfMessage())
self._state = ConnectionState.CLOSED
return data
def _send_reject_data(self, event: RejectData) -> bytes:
if self.state != ConnectionState.REJECTING:
raise LocalProtocolError(
"Cannot send rejection data in state {}".format(self.state)
)
data = self._h11_connection.send(h11.Data(data=event.data))
if event.body_finished:
data += self._h11_connection.send(h11.EndOfMessage())
self._state = ConnectionState.CLOSED
return data
############ Client mode methods
def _initiate_connection(self, request: Request) -> bytes:
self._initiating_request = request
self._nonce = generate_nonce()
headers = [
(b"Host", request.host.encode("ascii")),
(b"Upgrade", b"WebSocket"),
(b"Connection", b"Upgrade"),
(b"Sec-WebSocket-Key", self._nonce),
(b"Sec-WebSocket-Version", WEBSOCKET_VERSION),
]
if request.subprotocols:
headers.append(
(
b"Sec-WebSocket-Protocol",
(", ".join(request.subprotocols)).encode("ascii"),
)
)
if request.extensions:
offers = {e.name: e.offer() for e in request.extensions} # type: ignore
extensions = []
for name, params in offers.items():
name = name.encode("ascii")
if params is True:
extensions.append(name)
elif params:
extensions.append(
b"%s; %s" % (name, params.encode("ascii")) # type: ignore
)
if extensions:
headers.append((b"Sec-WebSocket-Extensions", b", ".join(extensions)))
upgrade = h11.Request(
method=b"GET",
target=request.target.encode("ascii"),
headers=headers + request.extra_headers,
)
return self._h11_connection.send(upgrade)
def _establish_client_connection(
self, event: h11.InformationalResponse
) -> AcceptConnection: # noqa: MC0001
accept = None
connection_tokens = None
accepts: List[str] = []
subprotocol = None
upgrade = b""
headers: Headers = []
for name, value in event.headers:
name = name.lower()
if name == b"connection":
connection_tokens = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-extensions":
accepts = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-accept":
accept = value
continue # Skip appending to headers
elif name == b"sec-websocket-protocol":
subprotocol = value
continue # Skip appending to headers
elif name == b"upgrade":
upgrade = value
continue # Skip appending to headers
headers.append((name, value))
if connection_tokens is None or not any(
token.lower() == "upgrade" for token in connection_tokens
):
raise RemoteProtocolError(
"Missing header, 'Connection: Upgrade'", event_hint=RejectConnection()
)
if upgrade.lower() != b"websocket":
raise RemoteProtocolError(
"Missing header, 'Upgrade: WebSocket'", event_hint=RejectConnection()
)
accept_token = generate_accept_token(self._nonce)
if accept != accept_token:
raise RemoteProtocolError("Bad accept token", event_hint=RejectConnection())
if subprotocol is not None:
subprotocol = subprotocol.decode("ascii")
if subprotocol not in self._initiating_request.subprotocols:
raise RemoteProtocolError(
"unrecognized subprotocol {}".format(subprotocol),
event_hint=RejectConnection(),
)
extensions = client_extensions_handshake( # type: ignore
accepts, self._initiating_request.extensions
)
self._connection = Connection(
ConnectionType.CLIENT if self.client else ConnectionType.SERVER,
extensions,
self._h11_connection.trailing_data[0],
)
self._state = ConnectionState.OPEN
return AcceptConnection(
extensions=extensions, extra_headers=headers, subprotocol=subprotocol
)
def __repr__(self) -> str:
return "{}(client={}, state={})".format(
self.__class__.__name__, self.client, self.state
)
def server_extensions_handshake(
requested: List[str], supported: List[Extension]
) -> Optional[bytes]:
"""Agree on the extensions to use returning an appropriate header value.
This returns None if there are no agreed extensions
"""
accepts: Dict[str, Union[bool, bytes]] = {}
for offer in requested:
name = offer.split(";", 1)[0].strip()
for extension in supported:
if extension.name == name:
accept = extension.accept(offer)
if accept is True:
accepts[extension.name] = True
elif accept is not False and accept is not None:
accepts[extension.name] = accept.encode("ascii") # type: ignore
if accepts:
extensions: List[bytes] = []
for name, params in accepts.items():
name = name.encode("ascii") # type: ignore
if params is True:
extensions.append(name) # type: ignore
else:
if params == b"":
extensions.append(b"%s" % (name))
else:
extensions.append(b"%s; %s" % (name, params))
return b", ".join(extensions)
return None
def client_extensions_handshake(
accepted: List[str], supported: List[Extension]
) -> List[Extension]:
# This raises RemoteProtocolError is the accepted extension is not
# supported.
extensions = []
for accept in accepted:
name = accept.split(";", 1)[0].strip()
for extension in supported:
if extension.name == name:
extension.finalize(accept)
extensions.append(extension)
break
else:
raise RemoteProtocolError(
"unrecognized extension {}".format(name), event_hint=RejectConnection()
)
return extensions
| 37.212314 | 88 | 0.570605 | [
"MIT"
] | bluetech/wsproto | wsproto/handshake.py | 17,527 | Python |
import numpy as np
from astropy.io import fits
from scipy.interpolate import interp1d
# Fitting Sline3
def fit_spline3(y, x, order=3, nsum=3):
y_resampled = [np.median(y[i:i + nsum]) for i in range(0, len(y) - len(y) % nsum, nsum)]
x_resampled = np.linspace(0, len(y), len(y_resampled))
# Fitting
f = interp1d(x_resampled, y_resampled, kind=order, bounds_error=True)
# Return function to be constructed with any other x array
return f
# Local Minima and Maxima
def local_minmax(data, nmin=2, nmax=2):
# Identifying indices of local minima-maxima points
id_min = (np.gradient(np.sign(np.gradient(data))) > 0).nonzero()[0] # index of local min
id_max = (np.gradient(np.sign(np.gradient(data))) < 0).nonzero()[0] # index of local max
# Taking values at min/max points
list_min, list_max = data[id_min], data[id_max]
# Sorting minima-maxima values (bigger --> lower)
list_min, id_min = (list(p) for p in zip(*sorted(zip(list_min, id_min), reverse=False)))
list_max, id_max = (list(p) for p in zip(*sorted(zip(list_max, id_max), reverse=True)))
# Taking the desired number of local minima-maxima points
list_min, list_max, id_min, id_max = list_min[0:nmin], list_max[0:nmax], id_min[0:nmin], id_max[0:nmax]
return list_min, list_max, id_min, id_max
def trim_slitedge(flat, plot=True):
# Getting input data
ccddata = fits.getdata(flat, ignore_missing_end=True)
# Collapse flat in the dispersion direction
flat_collapsed = fits.getdata(flat, ignore_missing_end=True).sum(axis=1) / ccddata.shape[1]
lines = np.arange(0, flat_collapsed.size, 1)
# Excluding first pixels in the spatial direction
cut = 3
c_flat = flat_collapsed[cut:-cut]
c_lines = np.arange(0, c_flat.size, 1)
# Fittin cubic spline. It's working very well with order=5, nsum=2
func_splin3 = fit_spline3(c_flat, c_lines, order=5, nsum=2)
smooth_flat = func_splin3(c_lines)
# Compute 1st and flat smoothed
dy = np.gradient(smooth_flat)
dy2 = np.gradient(dy)
# Regions to compute local minina-maxima
# Region one: it represent first 40 percent of all data
# Region two: ... last 40%
pixa, pixb = int(len(c_flat) * 0.4), int(len(c_flat) * 0.6)
dy2_one, dy2_two = dy2[0:pixa], dy2[pixb:]
# Reg. 1: Compute local min/max of the 2nd derivative
list_min_1, list_max_1, id_min_1, id_max_1 = local_minmax(dy2_one, nmin=1, nmax=1)
list_min_2, list_max_2, id_min_2, id_max_2 = local_minmax(dy2_two, nmin=1, nmax=1)
# Indice have to be reshifted to the original indices of the function dy2
id_min_2 = np.array(id_min_2) + pixb
# Slit edges are the local maxima/minima 1/2 [accounting the cutted pixels]
slit_1, slit_2 = int(np.array(id_min_1) + cut), int(np.array(id_min_2) + cut)
print slit_1, slit_2
if plot is True:
import matplotlib.pyplot as plt
c_lines += cut
plt.plot(lines, flat_collapsed, 'k-', label='Flat Collapsed')
plt.plot(lines[slit_1:slit_2], flat_collapsed[slit_1:slit_2], 'r-', label = 'Cutted Flat')
plt.plot(c_lines, dy, 'g-', label="Dy/dx")
plt.plot(c_lines, dy2, 'y-', label="Dy2/dx")
plt.plot(slit_1, list_min_1, 'bo', label='Slit Edge 1 ')
plt.plot(slit_2, list_min_2, 'ro', label='Slit Edge 2')
plt.xlim(lines.min() - 50, lines.max() + 50)
plt.legend(loc='best')
plt.show()
return slit_1, slit_2
flat = '/home/davidsanm/PyCharmProjects/GoodmanDataReduction/2016-03-20/RED/master_flat_600.fits'
trim_slitedge(flat, plot = True) | 37.510417 | 107 | 0.677312 | [
"MIT"
] | simontorres/goodman_ccdreduction | trim_slitedge.py | 3,601 | Python |
# -*- encoding: utf-8 -*-
# $Id: __init__.py,v 1.8.2.2 2007/05/22 21:06:52 customdesigned Exp $
#
# This file is part of the pydns project.
# Homepage: http://pydns.sourceforge.net
#
# This code is covered by the standard Python License.
#
# __init__.py for DNS class.
__version__ = '2.3.1'
import Type,Opcode,Status,Class
from Base import DnsRequest, DNSError
from Lib import DnsResult
from Base import *
from Lib import *
Error=DNSError
from lazy import *
Request = DnsRequest
Result = DnsResult
from Serialization import Serialize,DeSerialize
#
# $Log: __init__.py,v $
# Revision 1.8.2.2 2007/05/22 21:06:52 customdesigned
# utf-8 in __init__.py
#
# Revision 1.8.2.1 2007/05/22 20:39:20 customdesigned
# Release 2.3.1
#
# Revision 1.8 2002/05/06 06:17:49 anthonybaxter
# found that the old README file called itself release 2.2. So make
# this one 2.3...
#
# Revision 1.7 2002/05/06 06:16:15 anthonybaxter
# make some sort of reasonable version string. releasewards ho!
#
# Revision 1.6 2002/03/19 13:05:02 anthonybaxter
# converted to class based exceptions (there goes the python1.4 compatibility :)
#
# removed a quite gross use of 'eval()'.
#
# Revision 1.5 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.4 2001/11/26 17:57:51 stroeder
# Added __version__
#
# Revision 1.3 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.2 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#
| 25.881356 | 80 | 0.722986 | [
"BSD-2-Clause"
] | levush/hipl | tools/hipdnsproxy/DNS/__init__.py | 1,527 | Python |
import os
import sys
import setuptools
# To prevent importing about and thereby breaking the coverage info we use this
# exec hack
about = {}
with open('python_utils/__about__.py') as fp:
exec(fp.read(), about)
if os.path.isfile('README.rst'):
long_description = open('README.rst').read()
else:
long_description = 'See http://pypi.python.org/pypi/python-utils/'
needs_pytest = set(['ptr', 'pytest', 'test']).intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
if __name__ == '__main__':
setuptools.setup(
name='python-utils',
version=about['__version__'],
author=about['__author__'],
author_email=about['__author_email__'],
description=about['__description__'],
url=about['__url__'],
license='BSD',
packages=setuptools.find_packages(),
long_description=long_description,
install_requires=['six'],
tests_require=['pytest'],
setup_requires=[] + pytest_runner,
classifiers=['License :: OSI Approved :: BSD License'],
)
| 27.615385 | 79 | 0.659239 | [
"BSD-3-Clause"
] | dvzrv/python-utils | setup.py | 1,077 | Python |
import logging as log
import cv2
import sys
import numpy as np
class LandmarksDetectionModel:
'''
Class for the Face Landmarks Detection Model.
Load and configure inference plugins for the specified target devices,
and performs either synchronous or asynchronous modes for the
specified infer requests.
'''
def __init__(self, model_name, device='CPU', extensions=None, async_infer=True):
'''
Set instance variables.
'''
self.plugin = None
self.network = None
self.exec_network = None
self.infer_request_handle = None
self.input_blob = None
self.input_shape = None
self.output_blob = None
self.output_shape = None
self.model_name = model_name
self.device = device
self.extensions = extensions
self.async_infer = async_infer
def load_model(self, plugin):
'''
This method is for loading the model (in IR format) to the device specified by the user.
Default device is CPU.
'''
# Get model
model_structure = self.model_name + '.xml'
model_weights = self.model_name + '.bin'
# Initialize the plugin - load the inference engine API
# Plugin is the one already created for the Face Detection model
self.plugin = plugin
# Add a CPU extension, if applicable
if self.extensions and 'CPU' in self.device:
self.plugin.add_extension(self.extensions, self.device)
# Read the IR as IENetwork
try:
self.network = self.plugin.read_network(model=model_structure, weights=model_weights)
except:
raise ValueError("Could not initialise the network. Have you entered the correct model path?")
# Check if model and CPU plugin are supported
if self.device == 'CPU':
self.check_model()
# Load the IENetwork into the plugin
self.exec_network = self.plugin.load_network(network=self.network, device_name=self.device, num_requests=1)
# Get the input and output layers
self.input_blob = next(iter(self.network.inputs))
self.input_shape = self.network.inputs[self.input_blob].shape
self.output_blob = next(iter(self.network.outputs))
self.output_shape = self.network.outputs[self.output_blob].shape
return
def predict(self, image):
'''
This method is meant for running predictions on the input image.
'''
if np.all(np.array(image.shape)):
# Create input image to feed into the network
net_input = {self.input_blob: self.preprocess_input(image)}
# Start inference. Infer mode (async/sync) is input by user
if self.async_infer:
self.infer_request_handle = self.exec_network.start_async(request_id=0, inputs=net_input)
# Wait for the result of the inference
if self.exec_network.requests[0].wait(-1) == 0:
# Get result of the inference request
outputs = self.infer_request_handle.outputs[self.output_blob]
eyes_coords, crop_left, crop_right = self.preprocess_output(outputs, image)
else:
self.infer_request_handle = self.exec_network.infer(inputs=net_input)
# Get result of the inference request
outputs = self.infer_request_handle[self.output_blob]
eyes_coords, crop_left, crop_right = self.preprocess_output(outputs, image)
else:
eyes_coords = []
crop_left = []
crop_right = []
return eyes_coords, crop_left, crop_right
def check_model(self):
'''
This method check whether the model (along with the plugin) is support on the CPU device.
If anything is missing (such as a CPU extension), let the user know and exit the programm.
'''
supported_layers = self.plugin.query_network(network=self.network, device_name='CPU')
unsupported_layers = [l for l in self.network.layers.keys() if l not in supported_layers]
if len(unsupported_layers) != 0:
log.error("Unsupported layers found: {}".format(unsupported_layers))
if self.extensions:
log.error("The extensions specified do not support some layers. Please specify a new extension.")
else:
log.error(
"Please try to specify an extension library path by using the --extensions command line argument.")
sys.exit(1)
return
def preprocess_input(self, image):
'''
Method to process inputs before feeding them into the model for inference.
'''
image = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))
image = image.transpose((2, 0, 1))
image = image.reshape(1, *image.shape)
return image
def preprocess_output(self, outputs, image):
'''
Method to process outputs before feeding them into the next model for
inference or for the last step of the app.
'''
w = image.shape[1]
h = image.shape[0]
outputs = outputs[0]
xl, yl = int(outputs[0][0][0] * w), int(outputs[1][0][0] * h)
xr, yr = int(outputs[2][0][0] * w), int(outputs[3][0][0] * h)
eyes_coords = [xl, yl, xr, yr]
# Using the fact that eyes take 1/5 of your face width
# define bounding boxes around the eyes according to this
square_size = int(w / 10)
left_eye_box = [xl - square_size, yl - square_size, xl + square_size, yl + square_size]
right_eye_box = [xr - square_size, yr - square_size, xr + square_size, yr + square_size]
crop_left = image[left_eye_box[1]:left_eye_box[3], left_eye_box[0]:left_eye_box[2]]
crop_right = image[right_eye_box[1]:right_eye_box[3], right_eye_box[0]:right_eye_box[2]]
return eyes_coords, crop_left, crop_right
| 38.74359 | 119 | 0.627895 | [
"MIT"
] | ElisaCovato/Computer-pointer-controller---Intel-Edge-AI-Nanodegree | src/facial_landmarks_detection.py | 6,044 | Python |
"""
ASGI config for FYP project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'FYP.settings')
application = get_asgi_application()
| 22.529412 | 78 | 0.780679 | [
"BSD-3-Clause"
] | MustafaAbbas110/FinalProject | src/FYP/FYP/asgi.py | 383 | Python |
# -*- coding: utf-8 -*-
from __future__ import print_function
from IPython import get_ipython
from IPython.display import (
display,
Javascript,
)
from IPython.core import magic_arguments
from IPython.core.magic import (
Magics,
magics_class,
cell_magic,
)
from IPython.utils.importstring import import_item
import yaml
__version__ = "0.2.0"
@magics_class
class YAMLMagics(Magics):
"""
Write and load YAML in the IPython Notebook. Uses SafeLoader by default.
Example:
%%yaml x -lyaml.Loader
foo:
bar: baz
"""
def __init__(self, shell):
super(YAMLMagics, self).__init__(shell)
@cell_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument(
"var_name",
default=None,
nargs="?",
help="""Name of local variable to set to parsed value"""
)
@magic_arguments.argument(
"-l", "--loader",
default="yaml.SafeLoader",
help="""Dotted-notation class to use for loading"""
)
def yaml(self, line, cell):
line = line.strip()
args = magic_arguments.parse_argstring(self.yaml, line)
display(Javascript(
"""
require(
[
"notebook/js/codecell",
"codemirror/mode/yaml/yaml"
],
function(cc){
cc.CodeCell.options_default.highlight_modes.magic_yaml = {
reg: ["^%%yaml"]
}
}
);
"""))
loader = get_ipython().user_global_ns.get(args.loader, None)
if loader is None:
loader = import_item(args.loader)
try:
val = yaml.load(cell, Loader=loader)
except yaml.YAMLError as err:
print(err)
return
if args.var_name is not None:
get_ipython().user_ns[args.var_name] = val
else:
return val
def load_ipython_extension(ip):
ip = get_ipython()
ip.register_magics(YAMLMagics)
| 23.222222 | 78 | 0.558373 | [
"BSD-3-Clause"
] | bollwyvl/yamlmagic | yamlmagic.py | 2,090 | Python |
import h5py
import pickle
import numpy as np
# import read_affect_data as r
# from tqdm import tqdm
import random
from PIL import Image, ImageOps, ImageEnhance
import colorsys
# def read_h5_data_set(path):
# f = h5py.File(path, 'r')
# time_stamps = list(f[list(f.keys())[0]].keys())
# d = {time : dict() for time in time_stamps}
# for feature in list(f.keys()):
# if hasattr(f[feature], 'keys'):
# for time in tqdm(list(f[feature].keys())):
# k = list(f[feature][time].keys())[0]
# d[time][feature] = np.array(f[feature][time][k])
# return d
# def read_pkl_data_set(path):
# f = r.load_pickle(path)
# time_stamps = list(f[list(f.keys())[0]].keys())
# d = {time : dict() for time in time_stamps}
# for feature in list(f.keys()):
# if hasattr(f[feature], 'keys'):
# for time in tqdm(list(f[feature].keys())):
# if hasattr(f[feature][time], 'keys'):
# for k in list(f[feature][time].keys()):
# d[time][feature] = np.array(f[feature][time][k])
# return d
##############################################################################
# Visual
def visual_robustness(tests, noise_level=0.3, gray=True, contrast=True, s_and_p=True, gaus=True, rot=True, crop=True):
noises = []
if gray:
noises.append(grayscale)
if contrast:
noises.append(low_contrast)
if s_and_p:
noises.append(salt_and_pepper)
if gaus:
noises.append(gaussian)
if rot:
noises.append(rotate)
if crop:
noises.append(random_crop)
robustness_tests = []
for i in range(len(tests)):
img = Image.fromarray(tests[i])
for noise in noises:
img = noise(img, noise_level)
robustness_tests.append(np.array(img))
return robustness_tests
def grayscale(img, p):
if np.random.sample() <= p:
return ImageOps.grayscale(img)
else:
return img
def low_contrast(img, factor):
if np.random.sample() <= p:
enhancer = ImageEnhance.Contrast(img)
return enhancer.enhance(factor)
else:
return img
def inversion(img, p):
if np.random.sample() <= p:
return ImageOps.invert(img)
else:
return img
def WB(img, p):
if np.random.sample() <= p:
kelvin_table = {1000: (255, 56, 0), 1500: (255, 109, 0), 2000: (255, 137, 18), 2500: (255, 161, 72), 3000: (255, 180, 107), 3500: (255, 196, 137), 4000: (255, 209, 163), 4500: (255, 219, 186), 5000: (255, 228, 206), 5500: (
255, 236, 224), 6000: (255, 243, 239), 6500: (255, 249, 253), 7000: (245, 243, 255), 7500: (235, 238, 255), 8000: (227, 233, 255), 8500: (220, 229, 255), 9000: (214, 225, 255), 9500: (208, 222, 255), 10000: (204, 219, 255)}
temp = np.random.choice(kelvin_table.keys())
r, g, b = kelvin_table[temp]
matrix = (r / 255.0, 0.0, 0.0, 0.0,
0.0, g / 255.0, 0.0, 0.0,
0.0, 0.0, b / 255.0, 0.0)
return img.convert('RGB', matrix)
else:
return img
def colorize(img, p):
if np.random.sample() <= p:
color = np.random.choice(['red', 'blue', 'green'])
layer = Image.new('RGB', img.size, color)
return Image.blend(img, layer, 0.3)
else:
return img
def salt_and_pepper(img, p):
if np.random.sample() <= p:
output = np.copy(np.array(img))
nb_salt = np.ceil(p*output.size*0.5)
coords = [np.random.randint(0, i-1, int(nb_salt))
for i in output.shape]
for i in coords:
output[i] = 1
nb_pepper = np.ceil(p*output.size*0.5)
coords = [np.random.randint(0, i-1, int(nb_pepper))
for i in output.shape]
for i in coords:
output[i] = 0
return Image.fromarray(output)
else:
return img
def gaussian(img, p):
if np.random.sample() <= p:
height, width = np.array(img).shape
gauss = np.random.normal(0, p, (height, width))
return Image.fromarray((np.array(img)+gauss).astype('uint8'))
else:
return img
def rotate(img, p):
if np.random.sample() <= p:
angle = np.random.random_sample()*40-20
return img.rotate(angle, Image.BILINEAR)
else:
return img
def horizontal_flip(img, p):
if np.random.sample() <= p:
return img.transpose(Image.FLIP_LEFT_RIGHT)
else:
return img
def random_crop(img, p):
if np.random.sample() <= p:
dim = np.array(img).shape
height = dim[0]
width = dim[1]
cropped_height = height / 5
cropped_width = width / 5
init_height = np.random.random_sample() * cropped_height
init_width = np.random.random_sample() * cropped_width
end_height = height - cropped_height + init_height
end_width = width - cropped_width + init_width
return img.crop((init_width, init_height, end_width, end_height)).resize((height, width))
else:
return img
def periodic(img, periodic_noise_filename="periodic_noise"):
height = img.height
width = img.width
output = []
for i in range(6):
noise = Image.open("{}_{}.png".format(
periodic_noise_filename, i+1)).convert("RGBA")
noise = random_crop(rotate(noise.resize(
(width*2, height*2)), np.random.random_sample()*360, 'white'), height, width)
output.append(Image.blend(img.convert("RGBA"), noise, 0.3))
return output
##############################################################################
# Text
def text_robustness(tests, noise_level=0.3, swap=True, rand_mid=True, typo=True, sticky=True, omit=True):
noises = []
if swap:
noises.append(swap_letter)
if rand_mid:
noises.append(random_mid)
if typo:
noises.append(qwerty_typo)
if sticky:
noises.append(sticky_keys)
if omit:
noises.append(omission)
robustness_tests = []
for i in range(len(tests)):
newtext = []
text = tests[i].lower().split()
for word in text:
if len(word) > 3 and np.random.sample() <= noise_level:
mode = np.random.randint(len(noises))
newtext.append(noises[mode](word))
else:
newtext.append(word)
robustness_tests.append(' '.join(newtext))
return np.array(robustness_tests)
def last_char(word):
for i in range(len(word)):
if word[len(word)-1-i].isalpha():
return len(word) - 1 - i
def swap_letter(word):
# swap two random adjacent letters
last = last_char(word)
pos = np.random.randint(last-2) + 1
return word[:pos] + word[pos+1] + word[pos] + word[pos+2:]
def random_mid(word):
# randomly permute the middle chunk of a word (all letters except the first and last letter)
last = last_char(word)
mid = [char for char in word[1:last]]
np.random.shuffle(mid)
return word[0]+''.join(mid)+word[last:]
def qwerty_typo(word, num_typo=1):
# randomly replace num_typo number of letters of a word to a one adjacent to it on qwerty keyboard
qwerty = {'q': ['w'], 'w': ['q', 'e', 's'], 'e': ['w', 'r', 'd'], 'r': ['e', 't', 'f'], 't': ['r', 'g', 'y'], 'y': ['t', 'u', 'h'], 'u': ['y', 'i', 'j'], 'i': ['u', 'o', 'k'], 'o': ['i', 'p', 'l'], 'p': ['o'], 'a': ['q', 's', 'z'], 's': ['a', 'w', 'd', 'x', 'z'], 'd': ['s', 'e', 'f', 'x', 'c'], 'f': ['d', 'r', 'g', 'c', 'v'], 'g': [
'f', 't', 'h', 'v', 'b'], 'h': ['g', 'y', 'j', 'b', 'n'], 'j': ['h', 'u', 'k', 'n', 'm'], 'k': ['j', 'i', 'l', 'm'], 'l': ['k', 'o'], 'z': ['a', 's', 'x'], 'x': ['z', 's', 'd', 'c'], 'c': ['x', 'd', 'f', 'v'], 'v': ['c', 'f', 'g', 'b'], 'b': ['v', 'g', 'h', 'n'], 'n': ['b', 'h', 'm', 'j'], 'm': ['n', 'j', 'k']}
last = last_char(word)
typos = np.arange(last+1)
np.random.shuffle(typos)
for i in range(num_typo):
typo = qwerty[word[typos[i]]]
key = typo[np.random.randint(len(typo))]
word = word[:typos[i]] + key + word[typos[i]+1:]
return word
def sticky_keys(word, num_sticky=1):
# randomly repeat num_sticky number of letters of a word
last = last_char(word)
sticky = np.arange(last+1)
np.random.shuffle(sticky)
for i in range(num_sticky):
word = word[:sticky[i]] + word[sticky[i]] + word[sticky[i]:]
return word
def omission(word, num_omit=1):
# randomly omit num_omit number of letters of a word
last = last_char(word)
for i in range(num_omit):
omit = np.random.randint(last-1) + 1
word = word[:omit] + word[omit+1:]
last -= 1
return word
##############################################################################
# Audio
def audio_robustness(tests, noise_level=0.3, noises=None):
if noises == None:
noises = [additive_white_gaussian_noise,
audio_random_dropout, audio_structured_dropout]
robustness_tests = np.zeros(tests.shape)
for i in range(len(tests)):
if np.random.sample() <= noise_level:
mode = np.random.randint(len(noises))
robustness_tests[i] = noises[mode](tests[i], noise_level)
return robustness_tests
def additive_white_gaussian_noise(signal, noise_level):
# SNR = 10 * log((RMS of signal)^2 / (RMS of noise)^2)
# RMS_s = np.sqrt(np.mean(signal*signal))
# RMS_n = np.sqrt(RMS_s*RMS_s / (np.power(10, SNR/10)))
noise = np.random.normal(0, noise_level, signal.shape[0])
return signal + noise
def audio_structured_dropout(sig, p, step=10):
# each consecutive time steps are chosen with probability p to be dropped
res = [sig[i] for i in range(len(sig))]
for i in range(len(res)-step+1):
if (res[i] != 0) and np.random.random_sample() < p:
for j in range(step):
res[i+j] = 0
return res
def audio_random_dropout(sig, p):
return audio_structured_dropout(sig, 1, p)
##############################################################################
# Time-Series
def timeseries_robustness(tests, noise_level=0.3, noise=True, rand_drop=True, struct_drop=True, modality_map=None):
robust_tests = np.array(tests)
if noise:
robust_tests = white_noise(robust_tests, noise_level)
if rand_drop:
robust_tests = random_drop(robust_tests, noise_level)
if struct_drop:
robust_tests = structured_drop(robust_tests, noise_level, modality_map)
return robust_tests
# add noise sampled from zero-mean Gaussian with standard deviation p at every time step
def white_noise(data, p):
for i in range(len(data)):
for time in range(len(data[i])):
data[i][time] += np.random.normal(0, p)
return data
# each entry is dropped independently with probability p
def random_drop(data, p):
for i in range(len(data)):
for time in range(len(data[i])):
for feature in range(len(data[i][time])):
if np.random.random_sample() < p:
data[i][time][feature] = 0
# else:
# result = dict()
# for time in data:
# for feature in data[time]:
# if np.random.random_sample() < p:
# result[time][feature] = np.zeros(data[time][feature].shape)
# else:
# result[time][feature] = data[time][feature]
return data
# independently for each modality, each time step is chosen with probability p
# at which all feature dimensions are dropped
def structured_drop(data, p, modality_map):
for i in range(len(data)):
for time in range(len(data[i])):
if np.random.random_sample() < p:
data[i][time] = np.zeros(data[i][time].shape)
# else:
# result = dict()
# for time in data:
# for modality in modality_map.keys():
# if np.random.random_sample() < p:
# for feature in modality_map[modality]:
# result[time][feature] = np.zeros(data[time][feature].shape)
# else:
# for feature in modality_map[modality]:
# result[time][feature] = data[time][feature]
return data
##############################################################################
# Tabular
def add_tabular_noise(tests, noise_level=0.3, drop=True, swap=True):
robust_tests = np.array(tests)
if drop:
robust_tests = drop_entry(robust_tests, noise_level)
if swap:
robust_tests = swap_entry(robust_tests, noise_level)
return robust_tests
def drop_entry(data, p):
for i in range(len(data)):
for j in range(len(data[i])):
if np.random.random_sample() < p:
data[i][j] = 0
else:
data[i][j] = data[i][j]
return data
def swap_entry(data, p):
for i in range(len(data)):
for j in range(1, len(data[i])):
if np.random.random_sample() < p:
data[i][j] = data[i][j-1]
data[i][j-1] = data[i][j]
return data
if __name__ == '__main__':
print('='*5 + 'Multi Affect' + '='*5)
print('1. CMU-MOSI, Aligned')
print('2. CMU-MOSI, Unaligned')
print('3. CMU-MOSEI, Aligned')
print('4. CMU-MOSEI, Unaligned')
print('5. CMU-POM, Aligned')
print('6. CMU-POM, Unaligned')
print('7. UR-Funny')
print('8. Sarcasm')
print('9. Deception')
opt = int(input('Input option: '))
print('='*22)
if opt == 1:
data = read_h5_data_set('./mosi/mosi.hdf5')
modality_map = {'vision': ['FACET_4.2', 'OpenFace_1'], 'text': [
'words'], 'vocal': ['COVAREP', 'OpenSmile_emobase2010']}
elif opt == 2:
print("To be implemented!")
# data = read_h5_data_set('./mosi/mosi_unalign.hdf5')
elif opt == 3:
data = read_h5_data_set('./mosei/mosei.hdf5')
modality_map = {'vision': ['OpenFace_2'],
'text': ['words'], 'vocal': ['COVAREP']}
elif opt == 4:
print("To be implemented!")
# data = read_h5_data_set('./mosei/mosei_unalign.hdf5')
elif opt == 5:
data = read_h5_data_set('./pom/pom.hdf5')
modality_map = {'vision': ['FACET_4.2', 'OpenFace2'], 'text': [
'words'], 'vocal': ['COVAREP']}
elif opt == 6:
print("To be implemented!")
# data = read_h5_data_set('./pom/pom_unalign.hdf5')
elif opt == 7:
data = read_pkl_data_set('./urfunny/urfunny.pkl')
# time = data[list(data.keys())[0]]
# k = data[list(data[time].keys())[0]]
elif opt == 8:
print("To be implemented!")
# display_sarcasm_data_set('./sarcasm/sarcasm.pkl')
elif opt == 9:
print("To be implemented!")
# display_pkl_data_set('./deception/deception.pkl')
else:
print('Wrong Input!')
| 34.130137 | 338 | 0.553415 | [
"MIT"
] | HughMun/MultiBench | deprecated/robustness_tests_draft.py | 14,949 | Python |
import .rotor
| 7 | 13 | 0.785714 | [
"MIT"
] | HydrogenC/neox-tools | scripts/__init__.py | 14 | Python |
import os, sys
import ROOT
from ROOT import TH1F,TH2F,TFile,TTree,TCanvas, TProfile, TNtuple, gErrorIgnoreLevel, kInfo, kWarning
from tqdm import tqdm
from particle import Particle, PDGID
tqdm_disable = False
ROOT.gErrorIgnoreLevel = kWarning;
File = TFile("/home/kshi/Zprime/Zp_data_Ntuple/WmTo3l_ZpM45.root","READ")
tree = File.Get("Ana/passedEvents")
nEntries = tree.GetEntries()
W, p, none, other = 0, 0, 0, 0
others = []
for i in tqdm(range(0, nEntries)):
tree.GetEntry(i)
#for j in range(0,tree.lep_matchedR03_MomMomId.size()):
# if abs(tree.lep_matchedR03_MomMomId[j])>=11 and abs(tree.lep_matchedR03_MomMomId[j])<=18:
# print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " MomMomid is: " + lepton#str(tree.lep_matchedR03_MomMomId[j])
#for j in range(0,tree.lep_matchedR03_PdgId.size()):
# if (abs(tree.lep_matchedR03_PdgId[j])<11 or abs(tree.lep_matchedR03_PdgId[j]>18)) and tree.lep_matchedR03_PdgId[j]!=0:
# print "Event:" + str(tree.Event) + " has lepton id of " + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).name
#for j in range(0,tree.GENlep_id.size()):
# if PDGID(tree.GENlep_id[j]).is_valid==False:
# print "Invalid lep id " + str(tree.GENlep_id[j])
# if PDGID(tree.GENlep_MomId[j]).is_valid==False:
# print "Invalid lep mom id " + str(tree.GENlep_MomId[j])
# if PDGID(tree.GENlep_MomMomId[j]).is_valid==False:
# print "Invalid lep mom mom id " + str(tree.GENlep_MomMomId[j])
# else:
# print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " is a " + Particle.from_pdgid(tree.GENlep_id[j]).name + " that came from a " + Particle.from_pdgid(tree.GENlep_MomId[j]).name + " which came from a " + Particle.from_pdgid(tree.GENlep_MomMomId[j]).name
for j in range(0,tree.lep_matchedR03_PdgId.size()):
#if PDGID(tree.lep_matchedR03_PdgId[j]).is_valid==False:
# print "Invalid lep id " + str(tree.lep_matchedR03_PdgId[j])
#if PDGID(tree.lep_matchedR03_MomId[j]).is_valid==False:
# print "Invalid lep mom id " + str(tree.lep_matchedR03_MomId[j])
#if PDGID(tree.lep_matchedR03_MomMomId[j]).is_valid==False:
# print "Invalid lep mom mom id " + str(tree.lep_matchedR03_MomMomId[j])
##if tree.lep_matchedR03_PdgId[j]!=999888 and tree.lep_matchedR03_MomId!=999888 and tree.lep_matchedR03_MomMomId[j]!=999888:
## print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " is a " + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).name + " that came from a " + Particle.from_pdgid(tree.lep_matchedR03_MomId[j]).name + " which came from a " + Particle.from_pdgid(tree.lep_matchedR03_MomMomId[j]).name
#elif tree.lep_matchedR03_MomId[j]==999888:
# print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " is a " + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).name + " that came from a " + str(tree.lep_matchedR03_MomId[j]) + " which came from a " + Particle.from_pdgid(tree.lep_matchedR03_MomMomId[j]).name
if tree.lep_matchedR03_MomId[j]==999888:
if abs(tree.lep_matchedR03_MomMomId[j])==24:
W+=1
elif abs(tree.lep_matchedR03_MomMomId[j])==2212:
p+=1
elif abs(tree.lep_matchedR03_MomMomId[j])==0:
none+=1
else:
other+=1
others.append(tree.lep_matchedR03_MomMomId[j])
print "Sources of Z':"
print "W = " + str(W) + ", p = " + str(p) + ", none = " + str(none) + ", other = " + str(other)
for i in range(0, len(others)):
print "Other MomMomId: " + str(others[i])
| 52.636364 | 295 | 0.68365 | [
"MIT"
] | Nik-Menendez/PyCudaAnalyzer | Wto3l/mom_counting.py | 3,474 | Python |
import logging
from korbit.client.korbit_client import KorbitClient
logging.basicConfig(level=logging.INFO)
properties_sandbox_file = '../properties_sandbox_test.json'
context_sandbox_file = '../context_sandbox.json'
kbclient = KorbitClient(properties_sandbox_file, context_sandbox_file)
print(kbclient.getUserInfo())
# 매수 Buy
# print( kbclient.buy(price=300000, coin_amount=1) )
# # 매도 Sell
# print( kbclient.sell(price=300000, coin_amount=1) )
print( kbclient.getOpenOrders() )
# Wallet Test
wallet = kbclient.getWallet()
balance = wallet['balance']
pending_orders = wallet['pendingOrders']
available = wallet['available']
print(balance)
print(pending_orders)
print(available) | 24.5 | 70 | 0.78863 | [
"MIT"
] | 0kim/korbit_client | test/korbit/client/korbit_client_tests.py | 694 | Python |
import asyncio
import socket
from stor.server.server import StorServer
from stor.types.peer_info import PeerInfo
def start_reconnect_task(server: StorServer, peer_info_arg: PeerInfo, log, auth: bool):
"""
Start a background task that checks connection and reconnects periodically to a peer.
"""
# If peer_info_arg is already an address, use it, otherwise resolve it here.
if peer_info_arg.is_valid():
peer_info = peer_info_arg
else:
peer_info = PeerInfo(socket.gethostbyname(peer_info_arg.host), peer_info_arg.port)
async def connection_check():
while True:
peer_retry = True
for _, connection in server.all_connections.items():
if connection.get_peer_info() == peer_info or connection.get_peer_info() == peer_info_arg:
peer_retry = False
if peer_retry:
log.info(f"Reconnecting to peer {peer_info}")
try:
await server.start_client(peer_info, None, auth=auth)
except Exception as e:
log.info(f"Failed to connect to {peer_info} {e}")
await asyncio.sleep(3)
return asyncio.create_task(connection_check())
| 37.424242 | 106 | 0.647773 | [
"Apache-2.0"
] | Stor-Network/stor-blockchain | stor/server/reconnect_task.py | 1,235 | Python |
"""
Generates code metrics for a given project. Whereas code_metrics.py operates
on a single stream of source code input, this program walks a project tree and
generates reports based on all of the source code found.
TODO: project config should be supplied as input, not imported
"""
import os, shutil
import code_metrics, metrics_formatter, stats, config
def find_available_filename(filename):
if not os.path.exists(filename):
return filename
attempts = 1
filename += str(attempts)
while os.path.exists(filename):
attempts += 1
if (attempts > 999):
print('error: could not find available filename', filename)
exit()
filename = filename[:len(filename)-1] + str(attempts)
return filename
def is_code_file(path):
filename, file_ext = os.path.splitext(path)
return file_ext in config.code_filename_extensions
def find_files(root_path, filter):
result = []
for root, dirs, files in os.walk(root_path):
for file_name in files:
if not filter(file_name):
continue
path = os.path.join(root, file_name)
result.append(path)
return result
def add_project_totals(project_report, file_reports):
project_report['file_count'] = len(file_reports)
project_report['function_count'] = 0
project_report['line_count'] = 0
project_report['lines_ending_in_whitespace_count'] = 0
project_report['line_length_distribution'] = {}
project_report['line_indent_distribution'] = {}
for filename, file_report in file_reports.items():
if file_report == {}:
continue
project_report['function_count'] += len(file_report['functions'])
project_report['line_count'] += file_report['line_count']
# TODO: figure out how to aggregate project stats like this
#project_report['lines_ending_in_whitespace_count'] += file_report['lines_ending_in_whitespace_count']
#stats.merge_into_distribution(project_report['line_length_distribution'], file_report['line_length_distribution'])
#stats.merge_into_distribution(project_report['line_indent_distribution'], file_report['line_indent_distribution'])
def report(project_root):
file_reports = {}
for path in find_files(project_root, is_code_file):
target_lang = code_metrics.file_ext_lang(path)
with open(path, 'r') as input_file:
try:
file_reports[path] = code_metrics.report(path, input_file.read(), target_lang)
except IOError:
continue
project_report = {
'source_path': project_root,
'files': file_reports
}
add_project_totals(project_report, file_reports)
return project_report
def write_report_file(report, path, target_dir):
if report == {}:
return
filename = metrics_formatter.convert_path_to_report_filename(path)
out_file_path = target_dir + '/' + filename
out_file_path = find_available_filename(out_file_path)
with open(out_file_path, 'w') as output_file:
metrics_formatter.write_report(report, 'html', output_file)
def write_report(project_report, target_dir):
if os.path.exists(target_dir):
print('error: cannot create output dir', target_dir)
exit()
os.mkdir(target_dir)
with open(target_dir + '/' + 'index.html', 'w') as output_file:
metrics_formatter.write_project_index(project_report, 'html', output_file)
for path, report in project_report['files'].items():
write_report_file(report, path, target_dir)
if __name__ == '__main__':
# TODO: make output format configurable
output_dir = config.project_report_output_dir # TODO: also accept command line flag
output_dir = find_available_filename(output_dir)
write_report(report(config.project_root), output_dir)
shutil.copy('Chart.min.js', output_dir)
| 34.466019 | 117 | 0.770423 | [
"MIT"
] | parappayo/code-metrics | project_metrics.py | 3,550 | Python |
"""
switchboard.manager
~~~~~~~~~~~~~~~~
:copyright: (c) 2015 Kyle Adams.
:license: Apache License 2.0, see LICENSE for more details.
"""
import logging
import sqlalchemy as sqla
from .base import ModelDict
from .models import (
Model,
Switch,
DISABLED, SELECTIVE, GLOBAL, INHERIT,
INCLUDE, EXCLUDE,
)
from .proxy import SwitchProxy
from .settings import settings, Settings
from .store import SQLAlchemyStore
log = logging.getLogger(__name__)
# These are (mostly) read-only module variables since we want it shared among
# any and all threads. The only exception to read-only is when they are
# populated on Switchboard startup (i.e., operator.register()).
registry = {}
registry_by_namespace = {}
def nested_config(config):
cfg = {}
token = 'switchboard.'
for k, v in config.iteritems():
if k.startswith(token):
cfg[k.replace(token, '')] = v
return cfg
def configure(config={}, nested=False, cache=None):
"""Useful for when you need to control Switchboard's setup."""
if nested:
config = nested_config(config)
# Re-read settings to make sure we have everything.
Settings.init(cache=cache, **config)
operator.cache = cache
# Establish the connection to the database.
timeout = getattr(settings, 'SWITCHBOARD_TIMEOUT', 10)
dburl = settings.SWITCHBOARD_DBURL
if dburl:
engine = sqla.create_engine(
dburl, connect_args={'connect_timeout': timeout})
Switch.store = SQLAlchemyStore(engine, settings.SWITCHBOARD_DBTABLE)
# Register the builtins.
__import__('switchboard.builtins')
class SwitchManager(ModelDict):
DISABLED = DISABLED
SELECTIVE = SELECTIVE
GLOBAL = GLOBAL
INHERIT = INHERIT
INCLUDE = INCLUDE
EXCLUDE = EXCLUDE
def __init__(self, *args, **kwargs):
# Inject args and kwargs that are known quantities; the SwitchManager
# will always deal with the Switch model and so on.
new_args = [Switch]
new_args.extend(args)
kwargs['key'] = 'key'
kwargs['value'] = 'value'
self.result_cache = None
self.context = {}
super(SwitchManager, self).__init__(*new_args, **kwargs)
def __unicode__(self):
return "<%s: %s (%s)>" % (self.__class__.__name__,
getattr(self, 'model', ''),
registry.values())
def __getitem__(self, key):
"""
Returns a SwitchProxy, rather than a Switch. It allows us to
easily extend the Switches method and automatically include our
manager instance.
"""
return SwitchProxy(self, super(SwitchManager, self).__getitem__(key))
def with_result_cache(func):
"""
Decorator specifically for is_active. If self.result_cache is set to a {}
the is_active results will be cached for each set of params.
"""
def inner(self, *args, **kwargs):
dic = self.result_cache
cache_key = None
if dic is not None:
cache_key = (args, tuple(kwargs.items()))
try:
result = dic.get(cache_key)
except TypeError as e: # not hashable
log.debug('Switchboard result cache not active for this "%s" check due to: %s within args: %s',
args[0], e, repr(cache_key)[:200])
cache_key = None
else:
if result is not None:
return result
result = func(self, *args, **kwargs)
if cache_key is not None:
dic[cache_key] = result
return result
return inner
@with_result_cache
def is_active(self, key, *instances, **kwargs):
"""
Returns ``True`` if any of ``instances`` match an active switch.
Otherwise returns ``False``.
>>> operator.is_active('my_feature', request) #doctest: +SKIP
"""
try:
default = kwargs.pop('default', False)
# Check all parents for a disabled state
parts = key.split(':')
if len(parts) > 1:
child_kwargs = kwargs.copy()
child_kwargs['default'] = None
result = self.is_active(':'.join(parts[:-1]), *instances,
**child_kwargs)
if result is False:
return result
elif result is True:
default = result
try:
switch = self[key]
except KeyError:
# switch is not defined, defer to parent
return default
if switch.status == GLOBAL:
return True
elif switch.status == DISABLED:
return False
elif switch.status == INHERIT:
return default
conditions = switch.value
# If no conditions are set, we inherit from parents
if not conditions:
return default
instances = list(instances) if instances else []
instances.extend(self.context.values())
# check each switch to see if it can execute
return_value = False
for namespace, condition in conditions.iteritems():
condition_set = registry_by_namespace.get(namespace)
if not condition_set:
continue
result = condition_set.has_active_condition(condition,
instances)
if result is False:
return False
elif result is True:
return_value = True
except:
log.exception('Error checking if switch "%s" is active', key)
return_value = False
# there were no matching conditions, so it must not be enabled
return return_value
def register(self, condition_set):
"""
Registers a condition set with the manager.
>>> condition_set = MyConditionSet() #doctest: +SKIP
>>> operator.register(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
registry[condition_set.get_id()] = condition_set
registry_by_namespace[condition_set.get_namespace()] = condition_set
def unregister(self, condition_set):
"""
Unregisters a condition set with the manager.
>>> operator.unregister(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
registry.pop(condition_set.get_id(), None)
registry_by_namespace.pop(condition_set.get_namespace(), None)
def get_condition_set_by_id(self, switch_id):
"""
Given the identifier of a condition set (described in
ConditionSet.get_id()), returns the registered instance.
"""
return registry[switch_id]
def get_condition_sets(self):
"""
Returns a generator yielding all currently registered
ConditionSet instances.
"""
return registry.itervalues()
def get_all_conditions(self):
"""
Returns a generator which yields groups of lists of conditions.
>>> for set_id, label, field in operator.get_all_conditions(): #doctest: +SKIP
>>> print "%(label)s: %(field)s" % (label, field.label) #doctest: +SKIP
"""
cs = self.get_condition_sets()
for condition_set in sorted(cs, key=lambda x: x.get_group_label()):
group = unicode(condition_set.get_group_label())
for field in condition_set.fields.itervalues():
yield condition_set.get_id(), group, field
def as_request(self, user=None, ip_address=None):
from .helpers import MockRequest
return MockRequest(user, ip_address)
auto_create = getattr(settings, 'SWITCHBOARD_AUTO_CREATE', True)
operator = SwitchManager(auto_create=auto_create)
| 33.752066 | 115 | 0.585945 | [
"Apache-2.0"
] | juju/switchboard | switchboard/manager.py | 8,168 | Python |
""" Player commands """
from .command import Command, ModelId, command
@command
class CreatePlayer(Command):
playlist_id: ModelId
@command
class PlayVideo(Command):
video_id: ModelId
@command
class StopPlayer(Command):
pass
@command
class TogglePlayerState(Command):
pass
@command
class SeekVideo(Command):
duration: int
@command
class UpdateVolume(Command):
volume: int
@command
class ToggleSubtitle(Command):
pass
@command
class UpdateSubtitleDelay(Command):
delay: int
| 11.840909 | 46 | 0.727447 | [
"MIT"
] | Tastyep/RaspberryCast | OpenCast/app/command/player.py | 521 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# Created By Rodrigo Wilkens
# Last update 27/March/2022
# version ='1.0'
# ---------------------------------------------------------------------------
def join_institution(institution):
if len(institution)==0:
return None
if len(institution)==1:
return institution[0]
res = ", ".join(institution[:-1])
res += " and " + institution[-1]
return res
def get_user(or_id,client_acl, force_institution=False):
c = None
try:
c = client_acl.get_profile(or_id)
except:
print("\nERROR: or_id not found", or_id)
return {"first_name":or_id, "last_name":or_id,"name":or_id, "username":or_id, "emails":or_id, "institution":"NA"}, True
try:
if or_id[0] == "~":
emails = client_acl.search_profiles(ids=[or_id])
assert len(emails) >= 1
else:
emails = client_acl.search_profiles(ids=[c.id])
assert len(emails) >= 1
# emails = [or_id]
except:
print("\nERROR: or_id not associated to an email", or_id)
return {"first_name":or_id, "last_name":or_id,"name":or_id, "username":or_id, "emails":or_id, "institution":"NA"}, True
# try:
if True:
c = c.content
namePrefered = None
for name in c["names"]:
if namePrefered==None or ('preferred' in name and name['preferred']):
namePrefered = name
name = " ".join([namePrefered['first'] if type(namePrefered['first'])==str else '',
namePrefered['middle'] if namePrefered['middle']!=None else '',
namePrefered['last'] if namePrefered['last']!=None else '' ]).replace(" ", " ")
first_name = namePrefered['first'].strip() if type(namePrefered['first'])==str else ''
middle_name = namePrefered['middle'].strip() if namePrefered['middle']!=None else ''
last_name = namePrefered['last'].strip() if namePrefered['last']!=None else ''
username = namePrefered['username'].strip()
if len(first_name)>2:
first_name = " ".join([n[0].upper() + n[1:].lower() if (n==n.upper() or n==n.lower()) else n for n in first_name.split(" ")])
if len(middle_name)>2:
middle_name = " ".join([n[0].upper() + n[1:].lower() if (n==n.upper() or n==n.lower()) else n for n in middle_name.split(" ")])
if len(last_name)>2:
last_name = " ".join([n[0].upper() + n[1:].lower() if (n==n.upper() or n==n.lower()) else n for n in last_name.split(" ")])
if 'preferredEmail' in emails[0].content:
emails = emails[0].content['preferredEmail']
else:
emails = emails[0].content['emails'][0]
emails = emails.replace("_","\\_")
institution = []
if 'history' in c:
for h in c['history']:
if 'end' not in h or h['end'] == None:
institution.append(h['institution']["name"])
ret = {"first_name":first_name, "last_name":last_name,"name":name, "username":username, "emails":emails}
institution = join_institution(institution)
if institution:
ret["institution"] = institution
else:
if force_institution:
ret["institution"] = "NA"
if len(middle_name)>0:
ret["middle_name"]=middle_name
if "gscholar" in c:
ret["google_scholar_id"] = c["gscholar"]
if 'dblp' in c:
ret['dblp_id'] = c['dblp']
if 'homepage' in c:
ret['homepage'] = c['homepage']
if 'orcid'in c:
ret['orcid'] = c['orcid']
if 'semanticScholar' in c:
ret["semantic_scholar_id"] = c['semanticScholar']
return ret, False
| 42.67033 | 139 | 0.532578 | [
"MIT"
] | nueffing/ECNLP5_aclpub2 | openreview/util.py | 3,883 | Python |
from pdf_reports import ReportWriter
# DEFINE A WRITER WITH DEFAULT TEMPLATE AND VALUES
report_writer = ReportWriter(
default_stylesheets=["style.css"],
default_template="template.pug",
title="My default title",
version="0.1.2"
)
# THEN LATER IN YOUR CODE:
html = report_writer.pug_to_html(my_name="Zulko", my_organization="EGF")
report_writer.write_report(html, "example_reportwriter.pdf") | 31.384615 | 72 | 0.762255 | [
"MIT"
] | Edinburgh-Genome-Foundry/pdf_reports | examples/example_reportwriter/example_reportwriter.py | 408 | Python |
from .interpreter_utils import (
SPEAKERLOOK,
SPEAKERPOS,
AGENTPOS,
is_loc_speakerlook,
process_spans_and_remove_fixed_value,
coref_resolve,
backoff_where,
strip_prefix,
ref_obj_lf_to_selector,
)
from .interpret_reference_objects import (
ReferenceObjectInterpreter,
interpret_reference_object,
special_reference_search_data,
get_eid_from_special,
filter_by_sublocation,
)
from .interpret_location import ReferenceLocationInterpreter, interpret_relative_direction
from .interpreter import InterpreterBase, Interpreter
from .get_memory_handler import GetMemoryHandler
from .interpret_conditions import ConditionInterpreter, get_repeat_num
from .interpret_filters import (
FilterInterpreter,
interpret_dance_filter,
interpret_where_backoff,
maybe_apply_selector,
)
from .interpret_attributes import AttributeInterpreter
__all__ = [
SPEAKERLOOK,
SPEAKERPOS,
AGENTPOS,
ref_obj_lf_to_selector,
is_loc_speakerlook,
coref_resolve,
process_spans_and_remove_fixed_value,
backoff_where,
strip_prefix,
special_reference_search_data,
get_eid_from_special,
interpret_dance_filter,
ReferenceObjectInterpreter,
interpret_reference_object,
filter_by_sublocation,
ReferenceLocationInterpreter,
interpret_relative_direction,
ConditionInterpreter,
get_repeat_num,
interpret_where_backoff,
maybe_apply_selector,
FilterInterpreter,
AttributeInterpreter,
GetMemoryHandler,
InterpreterBase,
Interpreter,
]
| 24.453125 | 90 | 0.787859 | [
"MIT"
] | 1heart/fairo | droidlet/interpreter/__init__.py | 1,565 | Python |
# coding=utf-8
# *** WARNING: this file was generated by test. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'Foo',
]
@pulumi.input_type
class Foo:
def __init__(__self__, *,
a: Optional[bool] = None):
if a is not None:
pulumi.set(__self__, "a", a)
@property
@pulumi.getter
def a(self) -> Optional[bool]:
return pulumi.get(self, "a")
@a.setter
def a(self, value: Optional[bool]):
pulumi.set(self, "a", value)
| 21.65625 | 80 | 0.620491 | [
"Apache-2.0"
] | BearerPipelineTest/pulumi | pkg/codegen/testing/test/testdata/plain-schema-gh6957/python/pulumi_xyz/_inputs.py | 693 | Python |
import gorp
from gorp.readfiles import *
import unittest
ogdir = os.getcwd()
newdir = os.path.join(gorpdir, "testDir")
os.chdir(newdir)
is_version_2p0 = gorp.__version__[:3] == "2.0"
class XOptionTester(unittest.TestCase):
session = GorpSession(print_output=False)
@unittest.skipIf(
is_version_2p0,
"this test fails but the '-x' option with css selectors still works fine in normal use",
)
def test_css_selectors(self):
fname = os.path.join(newdir, "bluddGame.htm")
query = f"-x 'img.Bludd' /{fname}"
self.session.receive_query(query)
correct_output = {
f"{fname}": [
'b\'<img class="Bludd" id="Bludd" src=".\\\\viking pics\\\\Bludd.png" height="100" width="100" alt="Bludd, Blood God" title="Bludd, the Blood God (of Blood!)"/> \\n\''
]
}
self.assertEqual(self.session.resultset, correct_output)
@unittest.skipIf(
is_version_2p0,
"this test fails but the '-x' option with XPath selectors still works fine in normal use",
)
def test_xpath_multi_results(self):
fname = os.path.join(newdir, "books.xml")
query = f"-x -n '//bookstore//book[@category]' /{fname}"
self.session.receive_query(query)
correct_output = {
fname: {
(
"bookstore",
0,
): 'b\'<book category="cooking">\\n <title lang="en">Everyday Italian</title>\\n <author>Giada De Laurentiis</author>\\n <year>2005</year>\\n <price>30.00</price>\\n </book>\\n \'',
(
"bookstore",
1,
): 'b\'<book category="children">\\n <title lang="en">Harry Potter</title>\\n <author>J K. Rowling</author>\\n <year>2005</year>\\n <price>29.99</price>\\n </book>\\n \'',
(
"bookstore",
2,
): 'b\'<book category="web">\\n <title lang="en">Learning XML</title>\\n <author>Erik T. Ray</author>\\n <year>2003</year>\\n <price>39.95</price>\\n </book>\\n\'',
}
}
self.assertEqual(self.session.resultset, correct_output)
def zzzz_cleanup(self):
os.chdir(ogdir)
session.close()
if __name__ == "__main__":
unittest.main(verbosity=2)
| 38.354839 | 211 | 0.547519 | [
"MIT"
] | molsonkiko/gorpy | gorp/test/test_x_option.py | 2,378 | Python |
import os
import os.path as osp
import numpy as np
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Training options
#
__C.TRAIN = edict()
# Online hard negative mining
__C.TRAIN.HARD_POSITIVE_MINING = True
__C.TRAIN.HARD_NEGATIVE_MINING = True
__C.TRAIN.BG_THRESH_LOW = 0.0
__C.TRAIN.ORIG_SIZE = False
# Initial learning rate
__C.TRAIN.LEARNING_RATE = 0.001
# Momentum
__C.TRAIN.MOMENTUM = 0.9
# Weight decay, for regularization
__C.TRAIN.WEIGHT_DECAY = 0.0005
# Factor for reducing the learning rate
__C.TRAIN.GAMMA = 0.1
# Step size for reducing the learning rate, currently only support one step
__C.TRAIN.STEPSIZE = [30000]
# Iteration intervals for showing the loss during training, on command line interface
__C.TRAIN.DISPLAY = 50
# Iteration intervals for save check point
__C.TRAIN.CHECKPOINT = 500
# Whether to double the learning rate for bias
__C.TRAIN.DOUBLE_BIAS = True
# Whether to initialize the weights with truncated normal distribution
__C.TRAIN.TRUNCATED = False
# Whether to have weight decay on bias as well
__C.TRAIN.BIAS_DECAY = False
# Whether to add ground truth boxes to the pool when sampling regions
__C.TRAIN.USE_GT = False
# Whether to use aspect-ratio grouping of training images, introduced merely for saving
# GPU memory
__C.TRAIN.ASPECT_GROUPING = False
# The number of snapshots kept, older ones are deleted to save space
__C.TRAIN.SNAPSHOT_KEPT = 3
# The time interval for saving tensorflow summaries
__C.TRAIN.SUMMARY_INTERVAL = 180
# Scale to use during training (can list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES = (600,800)
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE = 1200
# Trim size for input images to create minibatch
__C.TRAIN.TRIM_HEIGHT = 600
__C.TRAIN.TRIM_WIDTH = 600
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 1
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 256
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.0
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = 0.5
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 5000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_PREFIX = 'res101_faster_rcnn'
# __C.TRAIN.SNAPSHOT_INFIX = ''
# Use a prefetch thread in roi_data_layer.layer
# So far I haven't found this useful; likely more engineering work is required
# __C.TRAIN.USE_PREFETCH = False
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
# Deprecated (inside weights)
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = True
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# Train using these proposals
__C.TRAIN.PROPOSAL_METHOD = 'gt'
# Make minibatches from images that have similar aspect ratios (i.e. both
# tall and thin or both short and wide) in order to avoid wasting computation
# on zero-padding.
# Use RPN to detect objects
__C.TRAIN.HAS_RPN = True
# IOU >= thresh: positive example
__C.TRAIN.ANCHOR_POSITIVE_OVERLAP = 0.5
# IOU < thresh: negative example
__C.TRAIN.ANCHOR_NEGATIVE_OVERLAP = 0.3
# If an anchor statisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.25
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 384
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TRAIN.RPN_MIN_SIZE = 4
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# Whether to use all ground truth bounding boxes for training,
# For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd''
__C.TRAIN.USE_ALL_GT = True
# Whether to tune the batch normalization parameters during training
__C.TRAIN.BN_TRAIN = False
#
# Testing options
#
__C.TEST = edict()
# Scale to use during testing (can NOT list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TEST.SCALES = (1200,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1600
__C.TEST.ORIG_SIZE = False
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Propose boxes
__C.TEST.HAS_RPN = False
# Test using these proposals
__C.TEST.PROPOSAL_METHOD = 'gt'
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.3
## Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
## Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TEST.RPN_MIN_SIZE = 16
# Testing mode, default to be 'nms', 'top' is slower but better
# See report for details
__C.TEST.MODE = 'nms'
# Only useful when TEST.MODE is 'top', specifies the number of top proposals to select
__C.TEST.RPN_TOP_N = 5000
#
# ResNet options
#
__C.RESNET = edict()
# Option to set if max-pooling is appended after crop_and_resize.
# if true, the region will be resized to a square of 2xPOOLING_SIZE,
# then 2x2 max-pooling is applied; otherwise the region will be directly
# resized to a square of POOLING_SIZE
__C.RESNET.MAX_POOL = False
# Number of fixed blocks during training, by default the first of all 4 blocks is fixed
# Range: 0 (none) to 3 (all)
__C.RESNET.FIXED_BLOCKS = 1
#
# MobileNet options
#
__C.MOBILENET = edict()
# Whether to regularize the depth-wise filters during training
__C.MOBILENET.REGU_DEPTH = False
# Number of fixed layers during training, by default the first of all 14 layers is fixed
# Range: 0 (none) to 12 (all)
__C.MOBILENET.FIXED_LAYERS = 5
# Weight decay for the mobilenet weights
__C.MOBILENET.WEIGHT_DECAY = 0.00004
# Depth multiplier
__C.MOBILENET.DEPTH_MULTIPLIER = 1.
#
# MISC
#
# The mapping from image coordinates to feature map coordinates might cause
# some boxes that are distinct in image space to become identical in feature
# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor
# for identifying duplicate boxes.
# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16
__C.DEDUP_BOXES = 1. / 16.
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# A small number that's used many times
__C.EPS = 1e-14
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
# Name (or path to) the matlab executable
__C.MATLAB = 'matlab'
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Use GPU implementation of non-maximum suppression
__C.USE_GPU_NMS = True
# Default GPU device id
__C.GPU_ID = 0
__C.POOLING_MODE = 'crop'
# Size of the pooled region after RoI pooling
__C.POOLING_SIZE = 7
# Maximal number of gt rois in an image during Training
__C.MAX_NUM_GT_BOXES = 20
# Anchor scales for RPN
__C.ANCHOR_SCALES = [8, 16, 32]
# Anchor ratios for RPN
__C.ANCHOR_RATIOS = [0.5, 1, 2]
# Feature stride for RPN
__C.FEAT_STRIDE = [16, ]
__C.CUDA = False
__C.CROP_RESIZE_WITH_MAX_POOL = True
import pdb
def get_output_dir(imdb_name, net_name=None,output_dir='output'):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(cfg.ROOT_DIR, output_dir, cfg.EXP_DIR, imdb_name))
if net_name is not None:
outdir = osp.join(outdir, net_name)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def get_output_tb_dir(imdb, weights_filename):
"""Return the directory where tensorflow summaries are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))
if weights_filename is None:
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.items():
# a must specify keys that are in b
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print(('Error under config key: {}'.format(k)))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_list(cfg_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d
d = d[subkey]
subkey = key_list[-1]
assert subkey in d
try:
value = literal_eval(v)
except:
# handle the case when v is a string literal
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value
| 30.031863 | 91 | 0.713458 | [
"MIT"
] | Juggernaut93/SSH-pytorch | model/utils/config.py | 12,253 | Python |
# Code in this file is copied and adapted from
# https://github.com/berkeleydeeprlcourse
import json
"""
Some simple logging functionality, inspired by rllab's logging.
Assumes that each diagnostic gets logged each iteration
Call logz.configure_output_dir() to start logging to a
tab-separated-values file (some_folder_name/log.txt)
"""
import os.path as osp, shutil, time, atexit, os, subprocess
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
def colorize(string, color, bold=False, highlight=False):
attr = []
num = color2num[color]
if highlight: num += 10
attr.append(str(num))
if bold: attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
class G(object):
output_dir = None
output_file = None
first_row = True
log_headers = []
log_current_row = {}
def configure_output_dir(d=None):
"""
Set output directory to d, or to /tmp/somerandomnumber if d is None
"""
G.first_row = True
G.log_headers = []
G.log_current_row = {}
G.output_dir = d or "/tmp/experiments/%i"%int(time.time())
if not osp.exists(G.output_dir):
os.makedirs(G.output_dir)
G.output_file = open(osp.join(G.output_dir, "log.txt"), 'w')
atexit.register(G.output_file.close)
print(colorize("Logging data to %s"%G.output_file.name, 'green', bold=True))
def log_tabular(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
"""
if G.first_row:
G.log_headers.append(key)
else:
assert key in G.log_headers, "Trying to introduce a new key %s that you didn't include in the first iteration"%key
assert key not in G.log_current_row, "You already set %s this iteration. Maybe you forgot to call dump_tabular()"%key
G.log_current_row[key] = val
def save_params(params):
with open(osp.join(G.output_dir, "params.json"), 'w') as out:
out.write(json.dumps(params, separators=(',\n','\t:\t'), sort_keys=True))
def dump_tabular():
"""
Write all of the diagnostics from the current iteration
"""
vals = []
key_lens = [len(key) for key in G.log_headers]
max_key_len = max(15,max(key_lens))
keystr = '%'+'%d'%max_key_len
fmt = "| " + keystr + "s | %15s |"
n_slashes = 22 + max_key_len
print("-"*n_slashes)
for key in G.log_headers:
val = G.log_current_row.get(key, "")
if hasattr(val, "__float__"): valstr = "%8.3g"%val
else: valstr = val
print(fmt%(key, valstr))
vals.append(val)
print("-"*n_slashes)
if G.output_file is not None:
if G.first_row:
G.output_file.write("\t".join(G.log_headers))
G.output_file.write("\n")
G.output_file.write("\t".join(map(str,vals)))
G.output_file.write("\n")
G.output_file.flush()
G.log_current_row.clear()
G.first_row=False
| 28.67619 | 122 | 0.63733 | [
"MIT"
] | CoAxLab/AdaptiveDecisionMaking_2018 | ADMCode/snuz/ars/logz.py | 3,011 | Python |
# !/usr/bin/env python
# -*-coding: utf-8 -*-
__author__ = 'wtq'
LOG_PATH = "monitor_logging.log"
REDIS_HOST = "127.0.0.1"
REDIS_PORT = 6379
# 采集的间隔与间断时长
MONITOR_INTERVAL = 1
MONITOR_PEROID = 3
# 监控的读写速率的网卡
NET_NAME = 'eth0'
# 系统内各台机器的名字,以此来计算系统的平均负载信息
SYSTEM_MACHINE_NAME = ["storage1", "storage2"]
# 用来计算客户端链接数的机器名字,一般为master
CLIENT_LINK_MACNHIE = ["storage1"]
DISK_ALL_SPACE = 100
CPU_KERNEL_NUMS = 32
MEM_ALL_SPACE = 100
FASTDFSPORT = '8000'
REDIS_SYSTEM_KEY = 'system'
FASTDFS_PEROID = 3
| 15.71875 | 46 | 0.735586 | [
"Apache-2.0"
] | wangtianqi1993/fuzzy_monitor | config/config.py | 631 | Python |
#!/usr/bin/env python
# (works in both Python 2 and Python 3)
# Offline HTML Indexer v1.32 (c) 2013-15,2020 Silas S. Brown.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a Python program for creating large indices of
# HTML text which can be queried using simple Javascript
# that works on many mobile phone browsers without needing
# an Internet connection or a Web server. This is useful if
# you want to load a dictionary or other reference onto your
# phone (or computer) for use when connectivity is not
# available.
# The input HTML should be interspersed with anchors like
# this: <a name="xyz"></a> where xyz is the index heading
# for the following text. There should be one such anchor
# before each entry and an extra anchor at the end of the
# text; everything before the first anchor is counted as the
# "header" and everything after the last as the "footer". If
# these are empty, a default "mobile friendly" HTML header
# and footer specifying UTF-8 encoding will be
# added. Anchors may be linked from other entries; these
# links are changed as necessary.
# Opening any of the resulting HTML files should display a
# textbox that lets you type the first few letters of the
# word you wish to look up; the browser will then jump to
# whatever heading is alphabetically nearest to the typed-in
# text.
# Configuration
# -------------
infile = None # None = standard input, or set a "filename"
outdir = "." # current directory by default
alphabet = "abcdefghijklmnopqrstuvwxyz" # set to None for all characters and case-sensitive
ignore_text_in_parentheses = True # or False, for parentheses in index headings
more_sensible_punctuation_sort_order = True
remove_utf8_diacritics = True # or False, for removing diacritics in index headings (not in main text);
# assumes UTF-8. (Letters with diacritics will be treated as though they did not have any.)
max_filesize = 64*1024 # of each HTML file
# (max_filesize can be exceeded by 1 very large entry)
# Where to find history:
# on GitHub at https://github.com/ssb22/indexer
# and on GitLab at https://gitlab.com/ssb22/indexer
# and on BitBucket https://bitbucket.org/ssb22/indexer
# and at https://gitlab.developers.cam.ac.uk/ssb22/indexer
# and in China: https://gitee.com/ssb22/indexer
# ---------------------------------------------------------------
import re,sys,os,time
if type("")==type(u""): izip = zip # Python 3
else: from itertools import izip # Python 2
if infile:
sys.stderr.write("Reading from "+infile+"... ")
infile = open(infile)
else:
sys.stderr.write("Reading from standard input... ")
infile = sys.stdin
fragments = re.split(r'<a name="([^"]*)"></a>',infile.read())
# odd indices should be the tag names, even should be the HTML in between
assert len(fragments)>3, "Couldn't find 2 or more hash tags (were they formatted correctly?)"
assert len(fragments)%2, "re.split not returning groups??"
header,footer = fragments[0],fragments[-1]
if not header.strip(): header="""<html><head><meta name="mobileoptimized" content="0"><meta name="viewport" content="width=device-width"><meta http-equiv="Content-Type" content="text/html; charset=utf-8"></head><body>"""
if not footer.strip(): footer = "</body></html>"
fragments = fragments[1:-1]
sys.stderr.write("%d entries\n" % len(fragments))
def alphaOnly(x):
if ignore_text_in_parentheses: x=re.sub(r"\([^)]*\)[;, ]*","",x)
if alphabet: x=''.join(c for c in x.lower() if c in alphabet)
return re.sub(r"^[@,;]*","",x) # see ohi_latex.py
if more_sensible_punctuation_sort_order:
_ao1 = alphaOnly
alphaOnly = lambda x: _ao1(re.sub('([;,]);+',r'\1',x.replace('-',' ').replace(',','~COM~').replace(';',',').replace('~COM~',';').replace(' ',';'))) # gives ; < , == space (useful if ; is used to separate definitions and , is used before extra words to be added at the start; better set space EQUAL to comma, not higher, or will end up in wrong place if user inputs something forgetting the comma)
if alphabet:
for c in '@,;':
if not c in alphabet: alphabet += c
if remove_utf8_diacritics:
_ao = alphaOnly ; import unicodedata
def S(s):
if type(u"")==type(""): return s # Python 3
else: return s.encode('utf-8') # Python 2
def U(s):
if type(s)==type(u""): return s
return s.decode('utf-8')
alphaOnly = lambda x: _ao(S(u''.join((c for c in unicodedata.normalize('NFD',U(x)) if not unicodedata.category(c).startswith('M')))))
fragments = list(zip(map(alphaOnly,fragments[::2]), fragments[1::2]))
fragments.sort()
class ChangedLetters:
def __init__(self): self.lastText = ""
def __call__(self,text):
"Find shortest prefix of text that differentiates it from previous item (empty string if no difference)"
assert text >= self.lastText, "input must have been properly sorted"
i = 0
for c1,c2 in izip(self.lastText+chr(0),text):
i += 1
if not c1==c2:
self.lastText = text
return text[:i]
assert text==self.lastText, repr(text)+"!="+repr(self.lastText)
return "" # no difference from lastText
changedLetters = ChangedLetters() ; f2 = []
fragments.reverse()
sys.stderr.write("Minimizing prefixes... ")
while fragments:
x,y = fragments.pop()
x = changedLetters(x)
if f2 and not x: f2[-1] = (f2[-1][0], f2[-1][1]+y) # combine effectively-identical ones
else: f2.append((x,y))
sys.stderr.write("done\n")
fragments = f2
def tag(n):
if n: return '<a name="%s"></a>' % n
else: return ''
def old_javascript_array(array):
"in case the browser doesn't support JSON, and to save some separator bytes"
array = list(array) # in case it was an iterator
sepChar = ord(' ')
chars_used = set(''.join(array))
assert '"' not in chars_used and '\\' not in chars_used and '<' not in chars_used and '&' not in chars_used, "Can't use special chars (unless you change this code to escape them)"
while True:
if chr(sepChar) not in chars_used and not chr(sepChar) in r'\"<&': break
sepChar += 1
assert sepChar < 127, "can't find a suitable separator char (hard-code the array instead?)"
return '"'+chr(sepChar).join(array)+'".split("'+chr(sepChar)+'")'
js_binchop = """function(a,i) {
function inner(a,i,lo,hi) {
var mid=lo+Math.floor((hi-lo)/2);
if(mid==lo || a[mid]==i) return a[mid];
if(a[mid] > i) return inner(a,i,lo,mid);
return inner(a,i,mid,hi);
} return inner(a,i,0,a.length);
}"""
js_binchop_dx = js_binchop.replace("return a[mid]","return mid")
def js_hashjump(hashtags): return """<script><!--
var h=location.hash; if(h.length > 1) { if(h!='#_h' && h!='#_f') { var n="#"+%s(%s,h.slice(1)); if (h!=n) location.hash=n; } } else location.href="index.html"
//--></script>""" % (js_binchop,old_javascript_array(hashtags)) # (the h!=n test is needed to avoid loop on some browsers e.g. PocketIE7)
# #_h and #_f are special hashes for header and footer, used for "Next page" and "Previous page" links
# (HTML5 defaults type to text/javascript, as do all pre-HTML5 browsers including NN2's 'script language="javascript"' thing, so we might as well save a few bytes)
__lastStartEnd = None
def htmlDoc(start,end,docNo):
"Returns an HTML document containing fragments[start:end]. docNo is used to generate previous/next page links as appropriate. Caches its return value in case called again with same start,end (in which case docNo is ignored on second call)."
global __lastStartEnd,__lastDoc
if not (start,end) == __lastStartEnd:
__lastStartEnd = (start,end)
__lastDoc = header+js_hashjump(x for x,y in fragments[start:end] if x)
if start:
assert docNo, "Document 0 should start at 0"
__lastDoc += '<p><a name="_h" href="%d.html#_f">Previous page</a></p>' % (docNo-1,)
__lastDoc += ''.join(tag(x)+y for x,y in fragments[start:end])
if end<len(fragments): __lastDoc += '<p><a name="_f" href="%d.html#_h">Next page</a></p>' % (docNo+1,)
__lastDoc += footer
return linkSub(__lastDoc)
def linkSub(txt): return re.sub(r'(?i)<a href=("?)#',r'<a href=\1index.html#',txt) # (do link to index.html#whatever rather than directly, so link still works if docs change)
def findEnd(start,docNo):
"Given 'start' (an index into 'fragments'), find an 'end' that produces the largest possible htmlDoc less than max_filesize. docNo is used to generate previous/next page links as appropriate."
eTry = len(fragments)-start
assert eTry, "must start before the end"
sLen = len(htmlDoc(start,start+eTry,docNo))
if sLen > max_filesize:
eTry = int(eTry / int(sLen / max_filesize)) # rough start point
while eTry > 1 and len(htmlDoc(start,start+eTry,docNo)) > max_filesize:
eTry = int(eTry/2)
if eTry < 1: eTry = 1
while eTry < len(fragments)-start and len(htmlDoc(start,start+eTry,docNo)) < max_filesize: eTry += 1
return start + max(1,eTry-1)
def allRanges():
start = docNo = 0
while start < len(fragments):
end = findEnd(start,docNo)
sys.stderr.write("\rSegmenting (%d/%d)" % (end,len(fragments)))
yield start,end
start = end ; docNo += 1
sys.stderr.write("Segmenting")
startsList = []
for start,end in allRanges():
open(("%s%s%d.html" % (outdir,os.sep,len(startsList))),"w").write(htmlDoc(start,end,len(startsList)))
startsList.append(start)
if alphabet:
assert not '"' in alphabet and not '\\' in alphabet and not '&' in alphabet and not '<' in alphabet, "Can't use special characters in alphabet (unless js_alphabet is modified to quote them)"
js_alphabet = """var a=val.toLowerCase(),i; val="";
for(i=0; i < a.length; i++) { var c=a.charAt(i); if("%s".indexOf(c)>-1) val += c }
""" % alphabet # TODO: what if user types letters with diacritics, when remove_utf8_diacritics is set?
else: js_alphabet = ""
if more_sensible_punctuation_sort_order: js_alphabet = "val = val.replace(/-/g,' ').replace(/,/g,'~COM~').replace(/;/g,',').replace(/~COM~/g,';').replace(/ /g,';').replace(/([;,]);+/g,'$1');" + js_alphabet
def hashReload(footer):
# If a footer refers to index.html#example, need to
# make sure the hash script runs when clicking there
# from the index page itself.
strToFind = '<a href="index.html#'
# TODO: what if it's quoted differently and/or has extra attributes? (ohi.html does specify using " quoting though)
while True:
i = footer.lower().find(strToFind)
if i==-1: return footer
footer = footer[:i]+'<a onclick="document.forms[0].q.value=\''+footer[i+len(strToFind):footer.index('"',i+len(strToFind))]+'\';jump()" href="index.html#'+footer[i+len(strToFind):]
open(outdir+os.sep+"index.html","w").write("""%s<script><!--
function jump() {
var val=document.forms[0].q.value; %s
location.href=%s(%s,val)+".html#"+val
}
if(navigator.userAgent.indexOf("Opera/9.50" /* sometimes found on WM6.1 phones from 2008 */) >= 0) document.write("<p><b>WARNING:</"+"b> Your version of Opera may have trouble jumping to anchors; please try Opera 10 or above.</"+"p>")
//--></script><noscript><p><b>ERROR:</b> Javascript needs to be switched on for this form to work.</p></noscript>
<form action="#" onSubmit="jump();return false">Lookup: <input type="text" name="q"><input type="submit" value="ok"></form><script><!--
if(location.hash.length > 1) { document.forms[0].q.value = location.hash.slice(1).replace(/(\+|%%20)/g,' '); jump(); } else document.forms[0].q.focus();
//--></script>%s""" % (hashReload(linkSub(header)),js_alphabet,js_binchop_dx,old_javascript_array(fragments[s][0] for s in startsList),hashReload(linkSub(footer))))
sys.stderr.write(" %d files\n" % (len(startsList)+1))
| 53.635965 | 400 | 0.674053 | [
"Apache-2.0"
] | ssb22/indexer | ohi.py | 12,229 | Python |
import environ
from pathlib import Path
env = environ.Env(
# Sets debug to False if it cannot find .env
DEBUG=(bool, False)
)
environ.Env.read_env()
# GENERAL
# ------------------------------------------------------------------------------
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-SECRET_KEY
SECRET_KEY = env.str('SECRET_KEY')
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DEBUG')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = tuple(env.list('ALLOWED_HOSTS'))
# APPS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
'django.contrib.sites',
# Third-party
'allauth',
'allauth.account',
'crispy_forms',
'debug_toolbar',
# Local
'accounts',
'pages',
'snacks',
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# INTERNATIONALIZATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/topics/i18n/
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'UTC'
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-USE_I18N
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(BASE_DIR.joinpath('staticfiles'))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(BASE_DIR.joinpath('static'))]
# http://whitenoise.evans.io/en/stable/django.html#add-compression-and-caching-support
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# DJANGO-CRISPY-FORMS CONFIGS
# ------------------------------------------------------------------------------
# https://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap4"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# DJANGO-DEBUG-TOOLBAR CONFIGS
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html
# https://docs.djangoproject.com/en/dev/ref/settings/#internal-ips
INTERNAL_IPS = ['127.0.0.1']
# CUSTOM USER MODEL CONFIGS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/topics/auth/customizing/#substituting-a-custom-user-model
AUTH_USER_MODEL = 'accounts.CustomUser'
# DJANGO-ALLAUTH CONFIGS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = 'home'
# https://django-allauth.readthedocs.io/en/latest/views.html#logout-account-logout
ACCOUNT_LOGOUT_REDIRECT_URL = 'home'
# https://django-allauth.readthedocs.io/en/latest/installation.html?highlight=backends
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
| 37.266304 | 97 | 0.615429 | [
"MIT"
] | okayjones/django-x | config/settings.py | 6,857 | Python |
# model settings
model = dict(
type='CenterNet',
pretrained='modelzoo://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_eval=False,
add_summay_every_n_step=200,
style='pytorch'),
neck=dict(type='None'),
bbox_head=dict(
type='CXTHead',
inplanes=(64, 128, 256, 512),
head_conv=128,
wh_conv=64,
use_deconv=False,
norm_after_upsample=False,
hm_head_conv_num=2,
wh_head_conv_num=2,
ct_head_conv_num=1,
fovea_hm=False,
num_classes=81,
use_exp_wh=False,
wh_offset_base=16,
wh_area_process='norm',
shortcut_cfg=(1, 2, 3),
shortcut_attention=(False, False, False),
norm_cfg=dict(type='BN'),
norm_wh=False,
avg_wh_weightv3=False,
center_ratio=0.2,
hm_init_value=None,
giou_weight=5.,
merge_weight=1.,
hm_weight=1.,
ct_weight=1.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.05,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=16,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0003,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[18, 22])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=18)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 24
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'eft18_o16_v1norm_3lr_alpha2_wd3e4_s123_nos_2x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 30.117647 | 87 | 0.62793 | [
"Apache-2.0"
] | mrsempress/mmdetection | configs/centernext/eft18_o16_v1norm_3lr_alpha2_wd3e4_s123_nos_2x.py | 4,096 | Python |
import multiprocessing as mp
import itertools
import traceback
import pickle
import numpy as np
from numba import cuda
from numba.cuda.testing import (skip_on_cudasim, skip_under_cuda_memcheck,
ContextResettingTestCase, ForeignArray)
import unittest
def core_ipc_handle_test(the_work, result_queue):
try:
arr = the_work()
# Catch anything going wrong in the worker function
except: # noqa: E722
# FAILED. propagate the exception as a string
succ = False
out = traceback.format_exc()
else:
# OK. send the ndarray back
succ = True
out = arr
result_queue.put((succ, out))
def base_ipc_handle_test(handle, size, result_queue):
def the_work():
dtype = np.dtype(np.intp)
with cuda.open_ipc_array(handle, shape=size // dtype.itemsize,
dtype=dtype) as darr:
# copy the data to host
return darr.copy_to_host()
core_ipc_handle_test(the_work, result_queue)
def serialize_ipc_handle_test(handle, result_queue):
def the_work():
dtype = np.dtype(np.intp)
darr = handle.open_array(cuda.current_context(),
shape=handle.size // dtype.itemsize,
dtype=dtype)
# copy the data to host
arr = darr.copy_to_host()
handle.close()
return arr
core_ipc_handle_test(the_work, result_queue)
def ipc_array_test(ipcarr, result_queue):
try:
with ipcarr as darr:
arr = darr.copy_to_host()
try:
# should fail to reopen
with ipcarr:
pass
except ValueError as e:
if str(e) != 'IpcHandle is already opened':
raise AssertionError('invalid exception message')
else:
raise AssertionError('did not raise on reopen')
# Catch any exception so we can propagate it
except: # noqa: E722
# FAILED. propagate the exception as a string
succ = False
out = traceback.format_exc()
else:
# OK. send the ndarray back
succ = True
out = arr
result_queue.put((succ, out))
@skip_under_cuda_memcheck('Hangs cuda-memcheck')
@skip_on_cudasim('Ipc not available in CUDASIM')
class TestIpcMemory(ContextResettingTestCase):
def test_ipc_handle(self):
# prepare data for IPC
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
# create IPC handle
ctx = cuda.current_context()
ipch = ctx.get_ipc_handle(devarr.gpu_data)
# manually prepare for serialization as bytes
handle_bytes = bytes(ipch.handle)
size = ipch.size
# spawn new process for testing
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
args = (handle_bytes, size, result_queue)
proc = ctx.Process(target=base_ipc_handle_test, args=args)
proc.start()
succ, out = result_queue.get()
if not succ:
self.fail(out)
else:
np.testing.assert_equal(arr, out)
proc.join(3)
def variants(self):
# Test with no slicing and various different slices
indices = (None, slice(3, None), slice(3, 8), slice(None, 8))
# Test with a Numba DeviceNDArray, or an array from elsewhere through
# the CUDA Array Interface
foreigns = (False, True)
return itertools.product(indices, foreigns)
def check_ipc_handle_serialization(self, index_arg=None, foreign=False):
# prepare data for IPC
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
if index_arg is not None:
devarr = devarr[index_arg]
if foreign:
devarr = cuda.as_cuda_array(ForeignArray(devarr))
expect = devarr.copy_to_host()
# create IPC handle
ctx = cuda.current_context()
ipch = ctx.get_ipc_handle(devarr.gpu_data)
# pickle
buf = pickle.dumps(ipch)
ipch_recon = pickle.loads(buf)
self.assertIs(ipch_recon.base, None)
self.assertEqual(tuple(ipch_recon.handle), tuple(ipch.handle))
self.assertEqual(ipch_recon.size, ipch.size)
# spawn new process for testing
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
args = (ipch, result_queue)
proc = ctx.Process(target=serialize_ipc_handle_test, args=args)
proc.start()
succ, out = result_queue.get()
if not succ:
self.fail(out)
else:
np.testing.assert_equal(expect, out)
proc.join(3)
def test_ipc_handle_serialization(self):
for index, foreign, in self.variants():
with self.subTest(index=index, foreign=foreign):
self.check_ipc_handle_serialization(index, foreign)
def check_ipc_array(self, index_arg=None, foreign=False):
# prepare data for IPC
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
# Slice
if index_arg is not None:
devarr = devarr[index_arg]
if foreign:
devarr = cuda.as_cuda_array(ForeignArray(devarr))
expect = devarr.copy_to_host()
ipch = devarr.get_ipc_handle()
# spawn new process for testing
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
args = (ipch, result_queue)
proc = ctx.Process(target=ipc_array_test, args=args)
proc.start()
succ, out = result_queue.get()
if not succ:
self.fail(out)
else:
np.testing.assert_equal(expect, out)
proc.join(3)
def test_ipc_array(self):
for index, foreign, in self.variants():
with self.subTest(index=index, foreign=foreign):
self.check_ipc_array(index, foreign)
def staged_ipc_handle_test(handle, device_num, result_queue):
def the_work():
with cuda.gpus[device_num]:
this_ctx = cuda.devices.get_context()
deviceptr = handle.open_staged(this_ctx)
arrsize = handle.size // np.dtype(np.intp).itemsize
hostarray = np.zeros(arrsize, dtype=np.intp)
cuda.driver.device_to_host(
hostarray, deviceptr, size=handle.size,
)
handle.close()
return hostarray
core_ipc_handle_test(the_work, result_queue)
def staged_ipc_array_test(ipcarr, device_num, result_queue):
try:
with cuda.gpus[device_num]:
with ipcarr as darr:
arr = darr.copy_to_host()
try:
# should fail to reopen
with ipcarr:
pass
except ValueError as e:
if str(e) != 'IpcHandle is already opened':
raise AssertionError('invalid exception message')
else:
raise AssertionError('did not raise on reopen')
# Catch any exception so we can propagate it
except: # noqa: E722
# FAILED. propagate the exception as a string
succ = False
out = traceback.format_exc()
else:
# OK. send the ndarray back
succ = True
out = arr
result_queue.put((succ, out))
@skip_under_cuda_memcheck('Hangs cuda-memcheck')
@skip_on_cudasim('Ipc not available in CUDASIM')
class TestIpcStaged(ContextResettingTestCase):
def test_staged(self):
# prepare data for IPC
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
# spawn new process for testing
mpctx = mp.get_context('spawn')
result_queue = mpctx.Queue()
# create IPC handle
ctx = cuda.current_context()
ipch = ctx.get_ipc_handle(devarr.gpu_data)
# pickle
buf = pickle.dumps(ipch)
ipch_recon = pickle.loads(buf)
self.assertIs(ipch_recon.base, None)
self.assertEqual(tuple(ipch_recon.handle), tuple(ipch.handle))
self.assertEqual(ipch_recon.size, ipch.size)
# Test on every CUDA devices
for device_num in range(len(cuda.gpus)):
args = (ipch, device_num, result_queue)
proc = mpctx.Process(target=staged_ipc_handle_test, args=args)
proc.start()
succ, out = result_queue.get()
proc.join(3)
if not succ:
self.fail(out)
else:
np.testing.assert_equal(arr, out)
def test_ipc_array(self):
for device_num in range(len(cuda.gpus)):
# prepare data for IPC
arr = np.random.random(10)
devarr = cuda.to_device(arr)
ipch = devarr.get_ipc_handle()
# spawn new process for testing
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
args = (ipch, device_num, result_queue)
proc = ctx.Process(target=staged_ipc_array_test, args=args)
proc.start()
succ, out = result_queue.get()
proc.join(3)
if not succ:
self.fail(out)
else:
np.testing.assert_equal(arr, out)
if __name__ == '__main__':
unittest.main()
| 32.929825 | 77 | 0.591689 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | Emilka1604/numba | numba/cuda/tests/cudapy/test_ipc.py | 9,385 | Python |
#!/usr/bin/env python2
# Copyright 2016 Vimal Manohar
# 2016 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0
from __future__ import print_function
import argparse
import logging
import sys
from collections import defaultdict
"""
This script reads and writes the 'ctm-edits' file that is
produced by get_ctm_edits.py.
It modifies the ctm-edits so that non-scored words
are not counted as errors: for instance, if there are things like
[COUGH] and [NOISE] in the transcript, deletions, insertions and
substitutions involving them are allowed, and we modify the reference
to correspond to the hypothesis.
If you supply the <lang> directory (the one that corresponds to
how you decoded the data) to this script, it assumes that the <lang>
directory contains phones/align_lexicon.int, and it uses this to work
out a reasonable guess of the non-scored phones, based on which have
a single-word pronunciation that maps to a silence phone.
It then uses the words.txt to work out the written form of those words.
Alternatively, you may specify a file containing the non-scored words one
per line, with the --non-scored-words option.
Non-scored words that were deleted (i.e. they were in the ref but not the
hyp) are simply removed from the ctm. For non-scored words that
were inserted or substituted, we change the reference word to match the
hyp word, but instead of marking the operation as 'cor' (correct), we
mark it as 'fix' (fixed), so that it will not be positively counted as a correct
word for purposes of finding the optimal segment boundaries.
e.g.
<file-id> <channel> <start-time> <duration> <conf> <hyp-word> <ref-word> <edit-type>
[note: the <channel> will always be 1].
AJJacobs_2007P-0001605-0003029 1 0 0.09 <eps> 1.0 <eps> sil
AJJacobs_2007P-0001605-0003029 1 0.09 0.15 i 1.0 i cor
AJJacobs_2007P-0001605-0003029 1 0.24 0.25 thought 1.0 thought cor
AJJacobs_2007P-0001605-0003029 1 0.49 0.14 i'd 1.0 i'd cor
AJJacobs_2007P-0001605-0003029 1 0.63 0.22 tell 1.0 tell cor
AJJacobs_2007P-0001605-0003029 1 0.85 0.11 you 1.0 you cor
AJJacobs_2007P-0001605-0003029 1 0.96 0.05 a 1.0 a cor
AJJacobs_2007P-0001605-0003029 1 1.01 0.24 little 1.0 little cor
AJJacobs_2007P-0001605-0003029 1 1.25 0.5 about 1.0 about cor
AJJacobs_2007P-0001605-0003029 1 1.75 0.48 [UH] 1.0 [UH] cor
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s [%(filename)s:%(lineno)s - '
'%(funcName)s - %(levelname)s ] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
parser = argparse.ArgumentParser(
description = "This program modifies the reference in the ctm-edits which "
"is output by steps/cleanup/internal/get_ctm_edits.py, to allow insertions, deletions and "
"substitutions of non-scored words, and [if --allow-repetitions=true], "
"duplications of single words or pairs of scored words (to account for dysfluencies "
"that were not transcribed). Note: deletions and substitutions of non-scored words "
"after the reference is corrected, will be marked as operation 'fix' rather than "
"'cor' (correct) so that the downstream processing knows that this was not in "
"the original reference. Also by defaults tags non-scored words as such when "
"they are correct; see the --tag-non-scored option.")
parser.add_argument("--verbose", type = int, default = 1,
choices=[0,1,2,3],
help = "Verbose level, higher = more verbose output")
parser.add_argument("--allow-repetitions", type = str, default = 'true',
choices=['true','false'],
help = "If true, allow repetitions in the transcript of one or "
"two-word sequences: for instance if the ref says 'i' but "
"the hyp says 'i i', or the ref says 'but then' and the hyp says "
"'but then but then', fix the reference accordingly. Intervening "
"non-scored words are allowed between the repetitions. These "
"fixes will be marked as 'cor', not as 'fix', since there is "
"generally no way to tell which repetition was the 'real' one "
"(and since we're generally confident that such things were "
"actually uttered).")
parser.add_argument("non_scored_words_in", metavar = "<non-scored-words-file>",
help="Filename of file containing a list of non-scored words, "
"one per line. See steps/cleanup/get_nonscored_words.py.")
parser.add_argument("ctm_edits_in", metavar = "<ctm-edits-in>",
help = "Filename of input ctm-edits file. "
"Use /dev/stdin for standard input.")
parser.add_argument("ctm_edits_out", metavar = "<ctm-edits-out>",
help = "Filename of output ctm-edits file. "
"Use /dev/stdout for standard output.")
args = parser.parse_args()
def ReadNonScoredWords(non_scored_words_file):
global non_scored_words
try:
f = open(non_scored_words_file)
except:
sys.exit("modify_ctm_edits.py: error opening file: "
"--non-scored-words=" + non_scored_words_file)
for line in f.readlines():
a = line.split()
if not len(line.split()) == 1:
sys.exit("modify_ctm_edits.py: bad line in non-scored-words "
"file {0}: {1}".format(non_scored_words_file, line))
non_scored_words.add(a[0])
f.close()
# The ctm-edits file format is as follows [note: file-id is really utterance-id
# in this context].
# <file-id> <channel> <start-time> <duration> <conf> <hyp-word> <ref-word> <edit>
# e.g.:
# AJJacobs_2007P-0001605-0003029 1 0 0.09 <eps> 1.0 <eps> sil
# AJJacobs_2007P-0001605-0003029 1 0.09 0.15 i 1.0 i cor
# ...
# This function processes a single line of ctm-edits input for fixing
# "non-scored" words. The input 'a' is the split line as an array of fields.
# It modifies the object 'a'. This function returns the modified array,
# and please note that it is destructive of its input 'a'.
# If it returnso the empty array then the line is to be deleted.
def ProcessLineForNonScoredWords(a):
global num_lines, num_correct_lines, ref_change_stats
try:
assert len(a) == 8
num_lines += 1
# we could do:
# [ file, channel, start, duration, hyp_word, confidence, ref_word, edit_type ] = a
duration = a[3]
hyp_word = a[4]
ref_word = a[6]
edit_type = a[7]
if edit_type == 'ins':
assert ref_word == '<eps>'
if hyp_word in non_scored_words:
# insert this non-scored word into the reference.
ref_change_stats[ref_word + ' -> ' + hyp_word] += 1
ref_word = hyp_word
edit_type = 'fix'
elif edit_type == 'del':
assert hyp_word == '<eps>' and float(duration) == 0.0
if ref_word in non_scored_words:
ref_change_stats[ref_word + ' -> ' + hyp_word] += 1
return []
elif edit_type == 'sub':
assert hyp_word != '<eps>'
if hyp_word in non_scored_words and ref_word in non_scored_words:
# we also allow replacing one non-scored word with another.
ref_change_stats[ref_word + ' -> ' + hyp_word] += 1
ref_word = hyp_word
edit_type = 'fix'
else:
assert edit_type == 'cor' or edit_type == 'sil'
num_correct_lines += 1
a[4] = hyp_word
a[6] = ref_word
a[7] = edit_type
return a
except Exception:
logger.error("bad line in ctm-edits input: "
"{0}".format(a))
raise RuntimeError
# This function processes the split lines of one utterance (as a
# list of lists of fields), to allow repetitions of words, so if the
# reference says 'i' but the hyp says 'i i', or the ref says
# 'you know' and the hyp says 'you know you know', we change the
# ref to match.
# It returns the modified list-of-lists [but note that the input
# is actually modified].
def ProcessUtteranceForRepetitions(split_lines_of_utt):
global non_scored_words, repetition_stats
# The array 'selected_lines' will contain the indexes of of selected
# elements of 'split_lines_of_utt'. Consider split_line =
# split_lines_of_utt[i]. If the hyp and ref words in split_line are both
# either '<eps>' or non-scoreable words, we discard the index.
# Otherwise we put it into selected_lines.
selected_line_indexes = []
# selected_edits will contain, for each element of selected_line_indexes, the
# corresponding edit_type from the original utterance previous to
# this function call ('cor', 'ins', etc.).
#
# As a special case, if there was a substitution ('sub') where the
# reference word was a non-scored word and the hyp word was a real word,
# we mark it in this array as 'ins', because for purposes of this algorithm
# it behaves the same as an insertion.
#
# Whenever we do any operation that will change the reference, we change
# all the selected_edits in the array to None so that they won't match
# any further operations.
selected_edits = []
# selected_hyp_words will contain, for each element of selected_line_indexes, the
# corresponding hyp_word.
selected_hyp_words = []
for i in range(len(split_lines_of_utt)):
split_line = split_lines_of_utt[i]
hyp_word = split_line[4]
ref_word = split_line[6]
# keep_this_line will be True if we are going to keep this line in the
# 'selected lines' for further processing of repetitions. We only
# eliminate lines involving non-scored words or epsilon in both hyp
# and reference position
# [note: epsilon in hyp position for non-empty segments indicates
# optional-silence, and it does make sense to make this 'invisible',
# just like non-scored words, for the purposes of this code.]
keep_this_line = True
if (hyp_word == '<eps>' or hyp_word in non_scored_words) and \
(ref_word == '<eps>' or ref_word in non_scored_words):
keep_this_line = False
if keep_this_line:
selected_line_indexes.append(i)
edit_type = split_line[7]
if edit_type == 'sub' and ref_word in non_scored_words:
assert not hyp_word in non_scored_words
# For purposes of this algorithm, substitution of, say,
# '[COUGH]' by 'hello' behaves like an insertion of 'hello',
# since we're willing to remove the '[COUGH]' from the
# transript.
edit_type = 'ins'
selected_edits.append(edit_type)
selected_hyp_words.append(hyp_word)
# indexes_to_fix will be a list of indexes into 'selected_indexes' where we
# plan to fix the ref to match the hyp.
indexes_to_fix = []
# This loop scans for, and fixes, two-word insertions that follow,
# or precede, the corresponding correct words.
for i in range(0, len(selected_line_indexes) - 3):
this_indexes = selected_line_indexes[i:i+4]
this_hyp_words = selected_hyp_words[i:i+4]
if this_hyp_words[0] == this_hyp_words[2] and \
this_hyp_words[1] == this_hyp_words[3] and \
this_hyp_words[0] != this_hyp_words[1]:
# if the hyp words were of the form [ 'a', 'b', 'a', 'b' ]...
this_edits = selected_edits[i:i+4]
if this_edits == [ 'cor', 'cor', 'ins', 'ins' ] or \
this_edits == [ 'ins', 'ins', 'cor', 'cor' ]:
if this_edits[0] == 'cor':
indexes_to_fix += [ i+2, i+3 ]
else:
indexes_to_fix += [ i, i+1 ]
# the next line prevents this region of the text being used
# in any further edits.
selected_edits[i:i+4] = [ None, None, None, None ]
word_pair = this_hyp_words[0] + ' ' + this_hyp_words[1]
# e.g. word_pair = 'hi there'
# add 2 because these stats are of words.
repetition_stats[word_pair] += 2
# the next line prevents this region of the text being used
# in any further edits.
selected_edits[i:i+4] = [ None, None, None, None ]
# This loop scans for, and fixes, one-word insertions that follow,
# or precede, the corresponding correct words.
for i in range(0, len(selected_line_indexes) - 1):
this_indexes = selected_line_indexes[i:i+2]
this_hyp_words = selected_hyp_words[i:i+2]
if this_hyp_words[0] == this_hyp_words[1]:
# if the hyp words were of the form [ 'a', 'a' ]...
this_edits = selected_edits[i:i+2]
if this_edits == [ 'cor', 'ins' ] or this_edits == [ 'ins', 'cor' ]:
if this_edits[0] == 'cor':
indexes_to_fix.append(i+1)
else:
indexes_to_fix.append(i)
repetition_stats[this_hyp_words[0]] += 1
# the next line prevents this region of the text being used
# in any further edits.
selected_edits[i:i+2] = [ None, None ]
for i in indexes_to_fix:
j = selected_line_indexes[i]
split_line = split_lines_of_utt[j]
ref_word = split_line[6]
hyp_word = split_line[4]
assert ref_word == '<eps>' or ref_word in non_scored_words
# we replace reference with the decoded word, which will be a
# repetition.
split_line[6] = hyp_word
split_line[7] = 'cor'
return split_lines_of_utt
# note: split_lines_of_utt is a list of lists, one per line, each containing the
# sequence of fields.
# Returns the same format of data after processing.
def ProcessUtterance(split_lines_of_utt):
new_split_lines_of_utt = []
for split_line in split_lines_of_utt:
new_split_line = ProcessLineForNonScoredWords(split_line)
if new_split_line != []:
new_split_lines_of_utt.append(new_split_line)
if args.allow_repetitions == 'true':
new_split_lines_of_utt = ProcessUtteranceForRepetitions(new_split_lines_of_utt)
return new_split_lines_of_utt
def ProcessData():
try:
f_in = open(args.ctm_edits_in)
except:
sys.exit("modify_ctm_edits.py: error opening ctm-edits input "
"file {0}".format(args.ctm_edits_in))
try:
f_out = open(args.ctm_edits_out, 'w')
except:
sys.exit("modify_ctm_edits.py: error opening ctm-edits output "
"file {0}".format(args.ctm_edits_out))
num_lines_processed = 0
# Most of what we're doing in the lines below is splitting the input lines
# and grouping them per utterance, before giving them to ProcessUtterance()
# and then printing the modified lines.
first_line = f_in.readline()
if first_line == '':
sys.exit("modify_ctm_edits.py: empty input")
split_pending_line = first_line.split()
if len(split_pending_line) == 0:
sys.exit("modify_ctm_edits.py: bad input line " + first_line)
cur_utterance = split_pending_line[0]
split_lines_of_cur_utterance = []
while True:
if len(split_pending_line) == 0 or split_pending_line[0] != cur_utterance:
split_lines_of_cur_utterance = ProcessUtterance(split_lines_of_cur_utterance)
for split_line in split_lines_of_cur_utterance:
print(' '.join(split_line), file = f_out)
split_lines_of_cur_utterance = []
if len(split_pending_line) == 0:
break
else:
cur_utterance = split_pending_line[0]
split_lines_of_cur_utterance.append(split_pending_line)
next_line = f_in.readline()
split_pending_line = next_line.split()
if len(split_pending_line) == 0:
if next_line != '':
sys.exit("modify_ctm_edits.py: got an empty or whitespace input line")
try:
f_out.close()
except:
sys.exit("modify_ctm_edits.py: error closing ctm-edits output "
"(broken pipe or full disk?)")
def PrintNonScoredStats():
if args.verbose < 1:
return
if num_lines == 0:
print("modify_ctm_edits.py: processed no input.", file = sys.stderr)
num_lines_modified = sum(ref_change_stats.values())
num_incorrect_lines = num_lines - num_correct_lines
percent_lines_incorrect= '%.2f' % (num_incorrect_lines * 100.0 / num_lines)
percent_modified = '%.2f' % (num_lines_modified * 100.0 / num_lines);
if num_incorrect_lines > 0:
percent_of_incorrect_modified = '%.2f' % (num_lines_modified * 100.0 /
num_incorrect_lines)
else:
percent_of_incorrect_modified = float('nan')
print("modify_ctm_edits.py: processed {0} lines of ctm ({1}% of which incorrect), "
"of which {2} were changed fixing the reference for non-scored words "
"({3}% of lines, or {4}% of incorrect lines)".format(
num_lines, percent_lines_incorrect, num_lines_modified,
percent_modified, percent_of_incorrect_modified),
file = sys.stderr)
keys = sorted(ref_change_stats.keys(), reverse=True,
key = lambda x: ref_change_stats[x])
num_keys_to_print = 40 if args.verbose >= 2 else 10
print("modify_ctm_edits.py: most common edits (as percentages "
"of all such edits) are:\n" +
('\n'.join([ '%s [%.2f%%]' % (k, ref_change_stats[k]*100.0/num_lines_modified)
for k in keys[0:num_keys_to_print]]))
+ '\n...'if num_keys_to_print < len(keys) else '',
file = sys.stderr)
def PrintRepetitionStats():
if args.verbose < 1 or sum(repetition_stats.values()) == 0:
return
num_lines_modified = sum(repetition_stats.values())
num_incorrect_lines = num_lines - num_correct_lines
percent_lines_incorrect= '%.2f' % (num_incorrect_lines * 100.0 / num_lines)
percent_modified = '%.2f' % (num_lines_modified * 100.0 / num_lines);
if num_incorrect_lines > 0:
percent_of_incorrect_modified = '%.2f' % (num_lines_modified * 100.0 /
num_incorrect_lines)
else:
percent_of_incorrect_modified = float('nan')
print("modify_ctm_edits.py: processed {0} lines of ctm ({1}% of which incorrect), "
"of which {2} were changed fixing the reference for repetitions ({3}% of "
"lines, or {4}% of incorrect lines)".format(
num_lines, percent_lines_incorrect, num_lines_modified,
percent_modified, percent_of_incorrect_modified),
file = sys.stderr)
keys = sorted(repetition_stats.keys(), reverse=True,
key = lambda x: repetition_stats[x])
num_keys_to_print = 40 if args.verbose >= 2 else 10
print("modify_ctm_edits.py: most common repetitions inserted into reference (as percentages "
"of all words fixed in this way) are:\n" +
('\n'.join([ '%s [%.2f%%]' % (k, repetition_stats[k]*100.0/num_lines_modified)
for k in keys[0:num_keys_to_print]]))
+ '\n...' if num_keys_to_print < len(keys) else '',
file = sys.stderr)
non_scored_words = set()
ReadNonScoredWords(args.non_scored_words_in)
num_lines = 0
num_correct_lines = 0
# ref_change_stats will be a map from a string like
# 'foo -> bar' to an integer count; it keeps track of how much we changed
# the reference.
ref_change_stats = defaultdict(int)
# repetition_stats will be a map from strings like
# 'a', or 'a b' (the repeated strings), to an integer count; like
# ref_change_stats, it keeps track of how many changes we made
# in allowing repetitions.
repetition_stats = defaultdict(int)
ProcessData()
PrintNonScoredStats()
PrintRepetitionStats()
| 45.164811 | 97 | 0.643178 | [
"Apache-2.0"
] | oplatek/kaldi | egs/wsj/s5/steps/cleanup/internal/modify_ctm_edits.py | 20,279 | Python |
P1 = float(input('Informe o primeiro preço: '))
P2 = float(input('Informe o primeiro preço: '))
P3 = float(input('Informe oprimeiro preço: '))
if (P1<P2) and (P1<P3):
print('O preço menor é {}'.format(P1))
elif (P2<P1) and (P2<P3):
print('O menor preço é {}'.format(P2))
else:
print('O menor preço é {}'.format(P3))
| 29.909091 | 47 | 0.62614 | [
"Apache-2.0"
] | kauaas/ATIVIDADES-PYTHON-N2 | ATIV07.py | 338 | Python |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""An extension to the Core and Topology package that models information on the electrical characteristics of Transmission and Distribution networks. This package is used by network applications such as State Estimation, Load Flow and Optimal Power Flow.
"""
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Fuse import Fuse
from CIM15.CDPSM.Connectivity.IEC61970.Wires.EnergyConsumer import EnergyConsumer
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Switch import Switch
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Disconnector import Disconnector
from CIM15.CDPSM.Connectivity.IEC61970.Wires.ACLineSegment import ACLineSegment
from CIM15.CDPSM.Connectivity.IEC61970.Wires.SynchronousMachine import SynchronousMachine
from CIM15.CDPSM.Connectivity.IEC61970.Wires.BusbarSection import BusbarSection
from CIM15.CDPSM.Connectivity.IEC61970.Wires.LoadBreakSwitch import LoadBreakSwitch
from CIM15.CDPSM.Connectivity.IEC61970.Wires.TransformerTank import TransformerTank
from CIM15.CDPSM.Connectivity.IEC61970.Wires.GroundDisconnector import GroundDisconnector
from CIM15.CDPSM.Connectivity.IEC61970.Wires.PowerTransformerEnd import PowerTransformerEnd
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Junction import Junction
from CIM15.CDPSM.Connectivity.IEC61970.Wires.SeriesCompensator import SeriesCompensator
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Breaker import Breaker
from CIM15.CDPSM.Connectivity.IEC61970.Wires.TransformerTankEnd import TransformerTankEnd
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Sectionaliser import Sectionaliser
from CIM15.CDPSM.Connectivity.IEC61970.Wires.DCLineSegment import DCLineSegment
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Line import Line
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Conductor import Conductor
from CIM15.CDPSM.Connectivity.IEC61970.Wires.PowerTransformer import PowerTransformer
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Ground import Ground
from CIM15.CDPSM.Connectivity.IEC61970.Wires.TransformerEnd import TransformerEnd
from CIM15.CDPSM.Connectivity.IEC61970.Wires.ShuntCompensator import ShuntCompensator
from CIM15.CDPSM.Connectivity.IEC61970.Wires.EnergySource import EnergySource
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Jumper import Jumper
nsURI = "http://iec.ch/TC57/2010/CIM-schema-cim15?profile=http://iec.ch/TC57/2011/iec61968-13/CDPSM/Connectivity#Wires"
nsPrefix = "cimWires"
| 65.396226 | 254 | 0.841027 | [
"MIT"
] | MaximeBaudette/PyCIM | CIM15/CDPSM/Connectivity/IEC61970/Wires/__init__.py | 3,466 | Python |
from django.apps import AppConfig
class BooksConfig(AppConfig):
name = 'bookstudio.books'
verbose_name = 'books'
def ready(self):
"""Override this to put in:
Users system checks
Users signal registration
"""
pass
| 19.714286 | 37 | 0.597826 | [
"MIT"
] | sudoabhinav/bookstudio | bookstudio/books/apps.py | 276 | Python |
# pylint: disable=too-few-public-methods, no-member
"""API for scheduling learning rate."""
from .. import symbol as sym
class LRScheduler(object):
"""Base class of a learning rate scheduler.
A scheduler returns a new learning rate based on the number of updates that have
been performed.
Parameters
----------
base_lr : float, optional
The initial learning rate.
"""
def __init__(self, base_lr=0.01, name='LRScheduler'):
self.name = name
self.base_lr = base_lr
def __call__(self, num_update):
"""Return a new learning rate based on number of updates.
Parameters
----------
num_update: nnvm Symbol
the number of updates applied to weight.
"""
raise NotImplementedError("__call__ method must be overridden.")
class FactorScheduler(LRScheduler):
"""Reduce the learning rate by a factor for every *n* steps.
It returns a new learning rate by::
base_lr * pow(factor, num_update/step)
Parameters
----------
step : int
Changes the learning rate for every n updates.
factor : float, optional
The factor to change the learning rate.
stop_factor_lr : float, optional
Stop updating the learning rate if it is less than this value.
"""
def __init__(self, step, factor=1, stop_factor_lr=1e-8, name='FactorScheduler', **kwargs):
super(FactorScheduler, self).__init__(name=name, **kwargs)
if step < 1:
raise ValueError("Schedule step must be greater or equal than 1 round")
if factor > 1.0:
raise ValueError("Factor must be no more than 1 to make lr reduce")
self.step = step
self.factor = factor
self.stop_factor_lr = stop_factor_lr
def __call__(self, num_update):
updated_lr = self.base_lr * self.factor ** (num_update / self.step)
return sym.clip(updated_lr, a_min=self.stop_factor_lr, a_max=self.base_lr)
| 33.644068 | 94 | 0.645844 | [
"Apache-2.0"
] | 00liujj/tvm | nnvm/python/nnvm/compiler/lr_scheduler.py | 1,985 | Python |
import sys
import os
import re
import tempfile
import auto_editor
import auto_editor.vanparse as vanparse
from auto_editor.utils.log import Log
from auto_editor.ffwrapper import FFmpeg
def grep_options(parser):
parser.add_argument('--no-filename', action='store_true',
help='Never print filenames with output lines.')
parser.add_argument('--max-count', '-m', type=int, default=None,
help='Stop reading a file after NUM matching lines.')
parser.add_argument('--count', '-c', action='store_true',
help='Suppress normal output; instead print count of matching lines for each file.')
parser.add_argument('--ignore-case', '-i', action='store_true',
help='Ignore case distinctions for the PATTERN.')
parser.add_argument('--timecode', action='store_true',
help="Print the match's timecode.")
parser.add_argument('--time', action='store_true',
help="Print when the match happens. (Ignore ending).")
parser.add_argument('--ffmpeg-location', default=None,
help='Point to your custom ffmpeg file.')
parser.add_argument('--my-ffmpeg', action='store_true',
help='Use the ffmpeg on your PATH instead of the one packaged.')
parser.add_argument('--help', '-h', action='store_true',
help='Print info about the program or an option and exit.')
parser.add_required('input', nargs='*', help='The path to a file you want inspected.')
return parser
# stackoverflow.com/questions/9662346/python-code-to-remove-html-tags-from-a-string
def cleanhtml(raw_html: str) -> str:
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
def grep_core(
media_file: str, add_prefix: bool, ffmpeg: FFmpeg, args, log: Log, TEMP: str
) -> None:
"""
We're using the WEBVTT subtitle format. It's better than srt
because it doesn't emit line numbers and the time code is in
(hh:mm:ss.sss) instead of (dd:hh:mm:ss,sss)
"""
out_file = os.path.join(TEMP, 'media.vtt')
ffmpeg.run(['-i', media_file, out_file])
count = 0
flags = 0
if args.ignore_case:
flags = re.IGNORECASE
prefix = ''
if add_prefix:
prefix = '{}:'.format(os.path.splitext(os.path.basename(media_file))[0])
if args.max_count is None:
args.max_count = float('inf')
timecode = ''
line_number = -1
with open(out_file, 'r') as file:
while True:
line = file.readline()
line_number += 1
if line_number == 0:
continue
if not line or count >= args.max_count:
break
if line.strip() == '':
continue
if re.match(r'\d*:\d\d.\d*\s-->\s\d*:\d\d.\d*', line):
if args.time:
timecode = line.split('-->')[0].strip() + ' '
else:
timecode = line.strip() + '; '
continue
line = cleanhtml(line)
match = re.search(args.input[0], line, flags)
line = line.strip()
if match:
count += 1
if not args.count:
if args.timecode or args.time:
print(prefix + timecode + line)
else:
print(prefix + line)
if args.count:
print(prefix + str(count))
def main(sys_args=sys.argv[1:]):
parser = vanparse.ArgumentParser('grep', auto_editor.version,
description='Read and match subtitle tracks in media files.',
)
parser = grep_options(parser)
TEMP = tempfile.mkdtemp()
log = Log(temp=TEMP)
try:
args = parser.parse_args(sys_args)
except vanparse.ParserError as e:
log.error(str(e))
ffmpeg = FFmpeg(args.ffmpeg_location, args.my_ffmpeg, debug=False)
media_files = args.input[1:]
add_prefix = (len(media_files) > 1 or os.path.isdir(media_files[0])) and not args.no_filename
for media_file in media_files:
if not os.path.exists(media_file):
log.error(f'{media_file}: File does not exist.')
if os.path.isdir(media_file):
for _, _, files in os.walk(media_file):
for file in files:
if file == '.DS_Store':
continue
grep_core(os.path.join(media_file, file), add_prefix, ffmpeg, args,
log, TEMP)
else:
grep_core(media_file, add_prefix, ffmpeg, args, log, TEMP)
log.cleanup()
if __name__ == '__main__':
main()
| 31.923611 | 97 | 0.58995 | [
"Unlicense"
] | chancat87/auto-editor | auto_editor/subcommands/grep.py | 4,597 | Python |
from project import app, socketio
if __name__ == "__main__":
print('Running BabyMonitorSoS \n')
socketio.run(app)
| 17.714286 | 38 | 0.701613 | [
"BSD-2-Clause"
] | BabyMonitorSimulation/BabyMonitorSoS | run.py | 124 | Python |
"""
Create a blueprint with endpoints for logins from configured identity providers.
The identity providers include, for example, Google, Shibboleth, or another
fence instance. See the other files in this directory for the definitions of
the endpoints for each provider.
"""
from authlib.common.urls import add_params_to_uri
import flask
import requests
from cdislogging import get_logger
from fence.blueprints.login.cilogon import CilogonLogin, CilogonCallback
from fence.blueprints.login.cognito import CognitoLogin, CognitoCallback
from fence.blueprints.login.fence_login import FenceLogin, FenceCallback
from fence.blueprints.login.google import GoogleLogin, GoogleCallback
from fence.blueprints.login.shib import ShibbolethLogin, ShibbolethCallback
from fence.blueprints.login.microsoft import MicrosoftLogin, MicrosoftCallback
from fence.blueprints.login.okta import OktaLogin, OktaCallback
from fence.blueprints.login.orcid import ORCIDLogin, ORCIDCallback
from fence.blueprints.login.ras import RASLogin, RASCallback
from fence.blueprints.login.synapse import SynapseLogin, SynapseCallback
from fence.errors import InternalError
from fence.resources.audit.utils import enable_audit_logging
from fence.restful import RestfulApi
from fence.config import config
logger = get_logger(__name__)
# Mapping from IDP ID to the name in the URL on the blueprint (see below).
IDP_URL_MAP = {
"fence": "fence",
"google": "google",
"shibboleth": "shib",
"orcid": "orcid",
"synapse": "synapse",
"microsoft": "microsoft",
"okta": "okta",
"cognito": "cognito",
"ras": "ras",
"cilogon": "cilogon",
}
def absolute_login_url(provider_id, fence_idp=None, shib_idp=None):
"""
Args:
provider_id (str): provider to log in with; an IDP_URL_MAP key.
fence_idp (str, optional): if provider_id is "fence"
(multi-tenant Fence setup), fence_idp can be any of the
providers supported by the other Fence. If not specified,
will default to NIH login.
shib_idp (str, optional): if provider_id is "fence" and
fence_idp is "shibboleth", shib_idp can be any Shibboleth/
InCommon provider. If not specified, will default to NIH
login.
Returns:
str: login URL for this provider, including extra query
parameters if fence_idp and/or shib_idp are specified.
"""
try:
base_url = config["BASE_URL"].rstrip("/")
login_url = base_url + "/login/{}".format(IDP_URL_MAP[provider_id])
except KeyError as e:
raise InternalError("identity provider misconfigured: {}".format(str(e)))
params = {}
if fence_idp:
params["idp"] = fence_idp
if shib_idp:
params["shib_idp"] = shib_idp
login_url = add_params_to_uri(login_url, params)
return login_url
def provider_info(login_details):
"""
Args:
login_details (dict):
{ name, desc, idp, fence_idp, shib_idps, secondary }
- "idp": a configured provider.
Multiple options can be configured with the same idp.
- if provider_id is "fence", "fence_idp" can be any of the
providers supported by the other Fence. If not specified, will
default to NIH login.
- if provider_id is "fence" and fence_idp is "shibboleth", a
list of "shib_idps" can be configured for InCommon login. If
not specified, will default to NIH login.
- Optional parameters: "desc" (description) and "secondary"
(boolean - can be used by the frontend to display secondary
buttons differently).
Returns:
dict: { name, desc, idp, urls, secondary }
- urls: list of { name, url } dictionaries
"""
info = {
# "id" deprecated, replaced by "idp"
"id": login_details["idp"],
"idp": login_details["idp"],
"name": login_details["name"],
# "url" deprecated, replaced by "urls"
"url": absolute_login_url(login_details["idp"]),
"desc": login_details.get("desc", None),
"secondary": login_details.get("secondary", False),
}
# for Fence multi-tenant login
fence_idp = None
if login_details["idp"] == "fence":
fence_idp = login_details.get("fence_idp")
# handle Shibboleth IDPs: InCommon login can either be configured
# directly in this Fence, or through multi-tenant Fence
if (
login_details["idp"] == "shibboleth" or fence_idp == "shibboleth"
) and "shib_idps" in login_details:
# get list of all available shib IDPs
if not hasattr(flask.current_app, "all_shib_idps"):
flask.current_app.all_shib_idps = get_all_shib_idps()
requested_shib_idps = login_details["shib_idps"]
if requested_shib_idps == "*":
shib_idps = flask.current_app.all_shib_idps
elif isinstance(requested_shib_idps, list):
# get the display names for each requested shib IDP
shib_idps = []
for requested_shib_idp in requested_shib_idps:
shib_idp = next(
(
available_shib_idp
for available_shib_idp in flask.current_app.all_shib_idps
if available_shib_idp["idp"] == requested_shib_idp
),
None,
)
if not shib_idp:
raise InternalError(
'Requested shib_idp "{}" does not exist'.format(
requested_shib_idp
)
)
shib_idps.append(shib_idp)
else:
raise InternalError(
'fence provider misconfigured: "shib_idps" must be a list or "*", got {}'.format(
requested_shib_idps
)
)
info["urls"] = [
{
"name": shib_idp["name"],
"url": absolute_login_url(
login_details["idp"], fence_idp, shib_idp["idp"]
),
}
for shib_idp in shib_idps
]
# non-Shibboleth provider
else:
info["urls"] = [
{
"name": login_details["name"],
"url": absolute_login_url(login_details["idp"], fence_idp),
}
]
return info
def get_login_providers_info():
# default login option
if config.get("DEFAULT_LOGIN_IDP"):
default_idp = config["DEFAULT_LOGIN_IDP"]
elif "default" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on ENABLED_IDENTITY_PROVIDERS.default
default_idp = config["ENABLED_IDENTITY_PROVIDERS"]["default"]
else:
logger.warning("DEFAULT_LOGIN_IDP not configured")
default_idp = None
# other login options
if config["LOGIN_OPTIONS"]:
login_options = config["LOGIN_OPTIONS"]
elif "providers" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on "providers" and convert to "login_options" format
enabled_providers = config["ENABLED_IDENTITY_PROVIDERS"]["providers"]
login_options = [
{
"name": details.get("name"),
"idp": idp,
"desc": details.get("desc"),
"secondary": details.get("secondary"),
}
for idp, details in enabled_providers.items()
]
else:
logger.warning("LOGIN_OPTIONS not configured or empty")
login_options = []
try:
all_provider_info = [
provider_info(login_details) for login_details in login_options
]
except KeyError as e:
raise InternalError("LOGIN_OPTIONS misconfigured: cannot find key {}".format(e))
# if several login_options are defined for this default IDP, will
# default to the first one:
default_provider_info = next(
(info for info in all_provider_info if info["idp"] == default_idp), None
)
if not default_provider_info:
raise InternalError(
"default provider misconfigured: DEFAULT_LOGIN_IDP is set to {}, which is not configured in LOGIN_OPTIONS".format(
default_idp
)
)
return default_provider_info, all_provider_info
def make_login_blueprint():
"""
Return:
flask.Blueprint: the blueprint used for ``/login`` endpoints
Raises:
ValueError: if app is not amenably configured
"""
blueprint = flask.Blueprint("login", __name__)
blueprint_api = RestfulApi(blueprint, decorators=[enable_audit_logging])
@blueprint.route("", methods=["GET"])
def default_login():
"""
The default root login route.
"""
default_provider_info, all_provider_info = get_login_providers_info()
return flask.jsonify(
{"default_provider": default_provider_info, "providers": all_provider_info}
)
# Add identity provider login routes for IDPs enabled in the config.
configured_idps = config["OPENID_CONNECT"].keys()
if "fence" in configured_idps:
blueprint_api.add_resource(FenceLogin, "/fence", strict_slashes=False)
blueprint_api.add_resource(FenceCallback, "/fence/login", strict_slashes=False)
if "google" in configured_idps:
blueprint_api.add_resource(GoogleLogin, "/google", strict_slashes=False)
blueprint_api.add_resource(
GoogleCallback, "/google/login", strict_slashes=False
)
if "orcid" in configured_idps:
blueprint_api.add_resource(ORCIDLogin, "/orcid", strict_slashes=False)
blueprint_api.add_resource(ORCIDCallback, "/orcid/login", strict_slashes=False)
if "ras" in configured_idps:
blueprint_api.add_resource(RASLogin, "/ras", strict_slashes=False)
# note that the callback endpoint is "/ras/callback", not "/ras/login" like other IDPs
blueprint_api.add_resource(RASCallback, "/ras/callback", strict_slashes=False)
if "synapse" in configured_idps:
blueprint_api.add_resource(SynapseLogin, "/synapse", strict_slashes=False)
blueprint_api.add_resource(
SynapseCallback, "/synapse/login", strict_slashes=False
)
if "microsoft" in configured_idps:
blueprint_api.add_resource(MicrosoftLogin, "/microsoft", strict_slashes=False)
blueprint_api.add_resource(
MicrosoftCallback, "/microsoft/login", strict_slashes=False
)
if "okta" in configured_idps:
blueprint_api.add_resource(OktaLogin, "/okta", strict_slashes=False)
blueprint_api.add_resource(OktaCallback, "/okta/login", strict_slashes=False)
if "cognito" in configured_idps:
blueprint_api.add_resource(CognitoLogin, "/cognito", strict_slashes=False)
blueprint_api.add_resource(
CognitoCallback, "/cognito/login", strict_slashes=False
)
if "shibboleth" in configured_idps:
blueprint_api.add_resource(ShibbolethLogin, "/shib", strict_slashes=False)
blueprint_api.add_resource(
ShibbolethCallback, "/shib/login", strict_slashes=False
)
if "cilogon" in configured_idps:
blueprint_api.add_resource(CilogonLogin, "/cilogon", strict_slashes=False)
blueprint_api.add_resource(
CilogonCallback, "/cilogon/login", strict_slashes=False
)
return blueprint
def get_all_shib_idps():
"""
Get the list of all existing Shibboleth IDPs.
This function only returns the information we need to generate login URLs.
Returns:
list: list of {"idp": "", "name": ""} dictionaries
"""
url = config["OPENID_CONNECT"].get("fence", {}).get("shibboleth_discovery_url")
if not url:
raise InternalError(
"Unable to get list of Shibboleth IDPs: OPENID_CONNECT.fence.shibboleth_discovery_url not configured"
)
res = requests.get(url)
assert (
res.status_code == 200
), "Unable to get list of Shibboleth IDPs from {}".format(url)
all_shib_idps = []
for shib_idp in res.json():
if "entityID" not in shib_idp:
logger.warning(
f"get_all_shib_idps(): 'entityID' field not in IDP data: {shib_idp}. Skipping this IDP."
)
continue
idp = shib_idp["entityID"]
if len(shib_idp.get("DisplayNames", [])) > 0:
name = get_shib_idp_en_name(shib_idp["DisplayNames"])
else:
logger.warning(
f"get_all_shib_idps(): 'DisplayNames' field not in IDP data: {shib_idp}. Using IDP ID '{idp}' as IDP name."
)
name = idp
all_shib_idps.append(
{
"idp": idp,
"name": name,
}
)
return all_shib_idps
def get_shib_idp_en_name(names):
"""
Returns a name in English for a Shibboleth IDP, or the first available
name if no English name was provided.
Args:
names (list): list of {"lang": "", "value": ""} dictionaries
Example:
[
{
"value": "University of Chicago",
"lang": "en"
},
{
"value": "Universidad de Chicago",
"lang": "es"
}
]
Returns:
str: Display name to use for this Shibboleth IDP
"""
for name in names:
if name.get("lang") == "en":
return name["value"]
return names[0]["value"]
| 35.984127 | 126 | 0.626305 | [
"Apache-2.0"
] | chicagopcdc/fence | fence/blueprints/login/__init__.py | 13,602 | Python |
#! /usr/bin/python
"""
Monitoring functions for xrootd cache server, producing classads
that can be handed to condor
"""
import os
import math
import time
import errno
import struct
import collections
import six
from six.moves import urllib
import classad
import XRootD.client
__all__ = ['collect_cache_stats']
# these paths in the cache are to be treated as top level "VOs" for stats collection
vo_paths = [ '/user', '/pnfs/fnal.gov/usr' ]
def _split_path(path):
""" Split a path into a list of directory names """
if path[0] != '/':
raise Exception("Not absolute path")
result = []
while path != '/':
path, tail = os.path.split(path)
if tail: result.append(tail)
return list(reversed(result))
def _is_prefix(lhs, rhs):
""" return True if the first list is a prefix of the second """
rhs = list(rhs)
while rhs:
if lhs == rhs: return True
rhs.pop()
return False
def scan_cache_dirs(rootdir):
""" Scan the top level directory of the cache.
Walks the path looking for directories that are not in vo_paths.
For each of these generate a cache summary
"""
results = {}
try:
root_components = _split_path(rootdir)
for dirpath, dirnames, filenames in os.walk(rootdir, topdown=True):
# get the path components as a list, removing the rootdir part
dirpath_components = _split_path(dirpath)[len(root_components):]
for name in list(dirnames):
path_components = dirpath_components + [name]
for p in [ _split_path(p) for p in vo_paths]:
# if this directory is in vo_paths, keep recursing
if _is_prefix( path_components, p):
break
else:
# if nothing is in vo_paths, get the stats and remove from dirnames
# so this walk goes no further
vo_name = os.path.join('/', *path_components)
try:
results[vo_name] = scan_vo_dir(os.path.join(dirpath, name))
except (OSError, IOError) as ex:
results[vo_name] = {'scan_vo_dir_error': str(ex) }
dirnames.remove(name)
return results
except (OSError, IOError) as ex:
return { 'scan_cache_dirs_error' : { 'message' : str(ex) } } # error message?
def scan_vo_dir(vodir):
""" Scan a VO directory (assumed to be the whole directory tree after the top level """
now = time.time()
totalsize = 0
nfiles = 0
naccesses = 0
accesses = collections.defaultdict(int)
most_recent_access = 0
bad_cinfo_files = 0
for root, dirs, files in os.walk(vodir):
fnames = set(files)
# Somebody might add a file ending in .cinfo in the cache
# so look for the f, f.cinfo pair
for f, cinfo in ((f, f + '.cinfo') for f in fnames if f + '.cinfo' in fnames):
try:
st = os.stat(os.path.join(root, f))
except OSError as ex:
if ex.errno == errno.ENOENT:
# must have just been deleted
continue
else: raise
try:
access_info = read_cinfo(os.path.join(root, cinfo), now)
except OSError as ex:
if ex.errno == errno.ENOENT:
continue
else:
bad_cinfo_files += 1
access_info = { "naccesses" : 0, "last_access": 0, "by_hour" : {} }
except ReadCInfoError as ex:
bad_cinfo_files += 1
access_info = ex.access_info
nfiles += 1
file_size = st.st_blocks*512 # allow for sparse files
totalsize += file_size
naccesses += access_info["naccesses"]
most_recent_access = max(most_recent_access, access_info["last_access"])
for h in access_info["by_hour"]:
accesses["naccesses_hr_" + h] += access_info["by_hour"][h]
accesses["bytes_hr_" + h] += access_info["bytes_hr"][h]
result = classad.ClassAd({
"used_bytes" : totalsize,
"nfiles" : nfiles,
"naccesses" : naccesses,
"bad_cinfo_files" : bad_cinfo_files
})
result.update(accesses)
if most_recent_access > 0:
result["most_recent_access_time"] = most_recent_access
return result
# Parsing the cinfo files
# The header (not a c struct; consecutive separate values with no padding)
# version + buffer size + file size (blocks)
# int + long long + long long
_header_fmt = '=iqq'
_header_fmt_size = struct.calcsize(_header_fmt)
# then the number of accesses
# int
_int_fmt = '@q'
_int_fmt_size = struct.calcsize(_int_fmt)
# each access contains a struct (native size + padding)
# AttachTime + DetachTime + BytesDisk + BytesRam + BytesMissed
# time_t + long long + long long + long long + long long
_status_fmt = '@qqqqq'
_status_fmt_size = struct.calcsize(_status_fmt)
class ReadCInfoError(Exception):
def __init__(self, *args):
Exception.__init__(self, *args)
if len(args) > 1:
self.access_info = args[1]
else:
self.access_info = {}
def read_cinfo(cinfo_file, now):
""" Try to extract useful info from the cinfo file """
result = { "naccesses": 0,
"last_access": 0,
"by_hour" : { "01": 0, "12": 0, "24": 0 },
"bytes_hr" : { "01": 0, "12": 0, "24": 0 },
}
cf = open(cinfo_file, 'rb')
# read and unpack the header
buf = cf.read(_header_fmt_size)
if len(buf) < _header_fmt_size:
# a mangled file
raise ReadCInfoError("%s header too short" % cinfo_file, result)
version, buffer_size, file_size = struct.unpack(_header_fmt, buf)
# we only understand version 2
if version != 2:
raise ReadCInfoError("%s unknown version: %s" % (cinfo_file, version), result)
# Get the size of the state vector and skip over it
# buff_synced uses 1 bit per bufferSize block of bytes
# Length is rounded up to the nearest byte
buff_synced_len = int(math.ceil(float(file_size)/buffer_size/8))
# If the file_size is zero, state vector length is 1
# (Difference is due to Python's integer division returning the floor)
if file_size == 0:
buff_synced_len = 1
cf.read(buff_synced_len)
# Go past cksum (char[16]) and creationTime (time_t)
cf.read(16 + 8)
# now the access count (an int)
buf = cf.read(_int_fmt_size)
if len(buf) < _int_fmt_size:
raise ReadCInfoError("%s: invalid access field" % cinfo_file, result)
access_count, = struct.unpack(_int_fmt, buf)
result["naccesses"] = access_count
if access_count < 0:
raise ReadCInfoError("%s: invalid access count: %s" % (cinfo_file, access_count), result)
elif access_count == 0:
return result
# read the access times
hr_01 = now - 60*60
hr_12 = now - 12*60*60
hr_24 = now - 24*60*60
# Read AStat structs
try:
for buf in iter(lambda: cf.read(_status_fmt_size), b''):
access_time, _, bytes_disk, bytes_ram, _ = struct.unpack(_status_fmt, buf)
result["last_access"] = access_time
#print access_time, bytes_disk, bytes_ram
#print time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(access_time))
intervals = list()
if access_time >= hr_01: intervals.append('01')
if access_time >= hr_12: intervals.append('12')
if access_time >= hr_24: intervals.append('24')
else:
# no longer interested
next
for interval in intervals:
result["by_hour"][interval] += 1
result["bytes_hr"][interval] += bytes_disk + bytes_ram
except struct.error as ex:
# return what we've got
raise ReadCInfoError("%s unable to decode access time data: %s" % (cinfo_file, str(ex)), result)
return result
def test_xrootd_server(url):
""" Contact the xrootd server to check if it's alive
"""
try:
myclient = XRootD.client.FileSystem(url)
startt = time.time()
response, _ = myclient.ping(timeout=10)
elapsed = time.time() - startt
if response.fatal:
status = "fatal"
elif response.error:
status = "error"
elif response.ok:
status = "ok"
else:
status = "unknown"
result = {"ping_response_status" : status, "ping_response_code" : response.code,
"ping_response_message" : response.message, "ping_elapsed_time" : elapsed}
return result
except Exception as ex: # more specific exception would be better
return {"ping_response_status" : "failed", "ping_response_code" : -1,
"ping_response_message" : str(ex), "ping_elapsed_time" : 0.0}
def get_cache_info(rootdir, cache_max_fs_fraction):
"""Get information about the cache itself"""
result = {}
try:
stat = os.statvfs(rootdir)
total_size = int(stat.f_blocks*stat.f_bsize*cache_max_fs_fraction)
free_size = int(total_size - (stat.f_blocks-stat.f_bfree)*stat.f_bsize)
result['total_cache_bytes'] = total_size
result['free_cache_bytes'] = free_size
result['free_cache_fraction'] = 1 - float(stat.f_blocks-stat.f_bfree)/int(stat.f_blocks*cache_max_fs_fraction)
return result
except (OSError, IOError) as ex:
return {}
def collect_cache_stats(url, rootdir, cache_max_fs_fraction=1.0):
""" Collect stats on the cache server """
start_time = time.time()
parsed_url = urllib.parse.urlparse(url)
# Python 2.6's urlparse returns a ParseResult object whereas
# Python 2.4's urlparse returns a tuple that doesn't handle
# root:// properly
try:
if parsed_url.scheme not in ('root', 'xroot'):
raise Exception("URL '%s' is not an xrootd url" % url)
hostname = parsed_url.netloc
except AttributeError:
if parsed_url[0] not in ('root', 'xroot'):
raise Exception("URL '%s' is not an xrootd url" % url)
hostname = parsed_url[2][2:] # Avoid the '//' prefix
result = {'MyType' : 'Machine', 'Name': 'xrootd@%s' % hostname, 'stats_time' : int(start_time)}
result.update(test_xrootd_server(url))
result.update(get_cache_info(rootdir, cache_max_fs_fraction))
stats_per_vo = scan_cache_dirs(rootdir)
# add up the sizes
totals = dict()
most_recent_access = 0
result['VO'] = {}
for vo, vostats in stats_per_vo.items():
for k, v in vostats.items():
if k == "most_recent_access_time":
most_recent_access = max(most_recent_access, v)
else:
try:
totals[k] += v
except KeyError:
totals[k] = v
result['VO'][vo] = vostats
result['used_cache_bytes'] = totals.pop("used_bytes", 0)
for k, v in totals.items():
result["total_" + k] = v
if most_recent_access > 0:
result["most_recent_access_time"] = most_recent_access
result['time_to_collect_stats'] = time.time() - start_time
return classad.ClassAd(result)
if __name__ == '__main__':
import sys
args = sys.argv[1:]
if len(args) > 2:
args[2] = float(args[2])
elif len(args) == 2:
args.append(0.99) # max cache fraction
print(collect_cache_stats(*args))
| 33.947977 | 118 | 0.596969 | [
"Apache-2.0"
] | ivukotic/xcache | src/xrootd_cache_stats.py | 11,746 | Python |
'''
dShell output classes
@author: tparker
'''
import os
import sys
import logging
import struct
import datetime
import dshell
import util
class Output(object):
'''
dShell output base class, extended by output types
'''
_DEFAULT_FORMAT = ''
_DEFAULT_TIMEFORMAT = '%Y-%m-%d %H:%M:%S'
_DEFAULT_DELIM = ' '
_NULL = None
# true if you want to remove extra fields from the parsed record
_FILTER_EXTRA = False
def __init__(self, *a, **kw):
'''
base output class constructor
configuration kwords:
logger=<existing logging object> to pass in a logger
format='format string' to override default formatstring for output class
pcap = filename to write pcap
'''
# setup the logger
self.logger = kw.get('logger', logging)
# parse the format string
self.setformat(kw.get('format', self._DEFAULT_FORMAT))
self.timeformat = (kw.get('timeformat', self._DEFAULT_TIMEFORMAT))
self.delim = (kw.get('delim', self._DEFAULT_DELIM))
if 'pcap' in kw:
self.pcapwriter = PCAPWriter(kw['pcap'])
else:
self.pcapwriter = None
# this is up to the output plugin to process
# by default stuffs extra fields and data into 'extra' field
# if _FILTER_EXTRA is true
self.extra = kw.get('extra', False)
# create the default session writer
if 'session' in kw:
self.sessionwriter = SessionWriter(**kw)
else:
self.sessionwriter = None
# write a message to the log
def log(self, msg, level=logging.INFO, *args, **kw):
'''write a message to the log
passes all args and kwargs thru to logging
except for level= is used to set logging level'''
self.logger.log(level, msg, *args, **kw)
def setformat(self, formatstr=None, typemap=None):
'''parse a format string and extract the field info
if no string given, reverts to default for class
will set self.fields to be a list of (name,type,spec) tuples
self.fieldnames to a list of fieldnames
and self.fieldmap to a list of key=in value=out mappings
format string can also map in field to out field with %(in:out)spectype
or specify an explicit out type with %(in:out)specintype:outtype
(note this breaks compatibility with text formatting,
but useful for db or other output modules)
a typemap of [intype]=outtype (or [in]=(newintype,outtype)
can be used to map and replace types
'''
if formatstr:
self.format = formatstr + "\n"
else:
self.format = self._DEFAULT_FORMAT + "\n"
self.fields = [] # will be a (name,type,length) tuple
self.fieldnames = []
self.fieldmap = {}
# get all the field names
e = 0
while True:
# find the next format spec of %(...)
s = self.format.find('%', e) + 1
if s < 1 or self.format[s] != '(':
break # not %(...
e = self.format.find(')', s)
if e < 0:
break # didn't find a closing paren
# get text between parens as field name
fname = self.format[s + 1:e]
# len/precision specs will be 0-9 between ) and type char
fspec = ''
for i in xrange(e + 1, len(self.format)):
if self.format[i] in '1234567890.+-# lLh':
fspec += self.format[i]
else:
break # this char is not a spec char, it is the type char
ftype = self.format[i]
i += 1
# is the field type a intype:outtype def?
if i < len(self.format) and self.format[i] == ':':
e = self.format.find(' ', i) # find the end whitespace
# split on: to get input:output mapping
ftype, outtype = self.format[i - 1:e].split(':')
else:
outtype = None # output will be same as input type
e = i # start at next char on loop
try: # field name to column mapping
fname, fmap = fname.split(':')
except:
fmap = fname # no mapping
if typemap and ftype in typemap and not outtype:
try:
(ftype, outtype) = typemap[ftype]
except:
outtype = typemap[ftype]
# append the field name,type,spec,mapping
self.fields.append((fname, ftype, fspec))
self.fieldnames.append(fname)
if outtype:
self.fieldmap[fname] = (fmap, outtype) # map of in to out,type
def parse(self, *args, **kw):
'''parse the input args/kwargs into a record dict according to format string
- timestamps are formatted to date/time strings
- fields not in the input will be defined but blank
- extra fields in the record will be formatted into a
"name=value name2=value2..." string and put in 'extra'
- args will go into 'data'
- format keyword can contain a new format string to use (this also sets format for future output)
'''
# convert timestamps to proper format
for ts in [k for k in kw if k == 'ts' or k.endswith('time')]:
dt = ts[:-4] + 'datetime' # ts->datetime , Xtime -> Xdatetime
kw[dt] = datetime.datetime.fromtimestamp(
float(kw[ts])).strftime(self.timeformat) # format properly
if kw.get('direction') is 'cs':
kw['dir_arrow'] = '->'
elif kw.get('direction') is 'sc':
kw['dir_arrow'] = '<-'
else:
kw['dir_arrow'] = '--'
if 'format' in kw:
self.setformat(kw['format']) # change the format string?
del kw['format']
# create the record initialized to the _NULL value
rec = dict((f, self._NULL) for f in self.fieldnames)
# populate record from datadict if datadict key is a field
if self._FILTER_EXTRA:
rec.update(
dict((f, kw[f]) for f in self.fieldnames if (f in kw and kw[f] != None)))
# place extra datadict keys into the extra field (and exclude the
# addr tuple)
if self.extra:
rec['extra'] = self.delim.join(['%s=%s' % (f, kw[f]) for f in sorted(
kw.keys()) if f not in self.fieldnames and f != 'addr'])
else: # not filtering extra, just lump them in as fields
rec.update(kw)
# populate the data field
if args:
rec['data'] = self.delim.join(map(str, args))
return rec
def dump(self, pkt=None, **kw): # pass packets to pcap
'''dump raw packet data to an output
override this if you want a format other than pcap'''
pktdata = str(pkt) # might be string, might be a dpkt object
pktlen = kw.get('len', len(pktdata))
if self.pcapwriter:
self.pcapwriter.write(pktlen, pktdata, kw['ts'])
else:
self.log(util.hexPlusAscii(str(pkt)), level=logging.DEBUG)
# close the PCAP output
def close(self):
if self.pcapwriter:
self.pcapwriter.close()
def dispatch(self, m, *args, **kwargs):
'''dispatch from Q pop'''
if m == 'write':
self.write(*args, **kwargs)
if m == 'alert':
self.alert(*args, **kwargs)
if m == 'dump':
self.dump(*args, **kwargs)
class FileOutput(Output):
def __init__(self, *args, **kw):
'''configuration for fileoutput:
fh=<existing open file handle>
file=filename to write to
mode=mode to open file as, default 'w'
'''
# do base init first
Output.__init__(self, *args, **kw)
# get the output filehandle or file
f = None
if 'fh' in kw:
self.fh = kw['fh']
return
elif 'file' in kw:
f = kw['file']
elif args:
f = args[0]
if f:
if 'mode' in kw:
mode = kw['mode']
else:
mode = 'w'
if mode == 'noclobber':
mode = 'w'
try:
while os.stat(f):
p = f.split('-')
try:
p, n = p[:-1], int(p[-1])
except ValueError:
n = 0
f = '-'.join(p + ['%04d' % (int(n) + 1)])
except OSError:
pass # file not found
self.fh = open(f, mode)
else:
self.fh = sys.stdout
def write(self, obj, **kw):
'''write session data to the session output or stdout'''
if self.sessionwriter:
self.sessionwriter.write(obj, **kw)
elif self.fh:
self.fh.write(str(obj))
def close(self):
'''close output if not stdout'''
if self.fh != sys.stdout:
self.fh.close()
Output.close(self)
class TextOutput(FileOutput):
'''formatted text output to file or stdout'''
_DEFAULT_FORMAT = "%(decoder)s %(datetime)s %(sip)16s:%(sport)-5s %(dir_arrow)s %(dip)16s:%(dport)-5s ** %(data)s **"
_NULL = ''
_FILTER_EXTRA = True
def __init__(self, *args, **kw):
if 'extra' in kw:
self._DEFAULT_FORMAT += " [ %(extra)s ]"
FileOutput.__init__(self, *args, **kw)
def alert(self, *args, **kw):
'''write an alert record
we pass in the decoder object and args/dict'''
rec = self.parse(*args, **kw)
if rec:
self.fh.write(self.format % rec)
class DBOutput(Output):
'''format strings as used by the DBOutput module to create tables and map fields
these follow the usual %(name)type and in most cases a custom format string will work
defualt type maps are:
s,r = VARCHAR (if field len given) /TEXT (if no len)
c = CHAR(1)
x,X,o = VARCHAR
d,i,u = INTEGER
e,E,f,F,g,G = DECIMAL
with the following extra: (using these breaks text format string compatibility)
b = boolean
t = timestamp
D = datetime
T = this field selects table
(following are postgres-only)
A = inet
H = host
N = cidr
M = macaddr
format string can also map field to column with %(field:column)type
or specify an explicit column type with %(field:column)pytype:DBTYPE
(note this also breaks compatibility with text format strings)
'''
_DEFAULT_FORMAT = "%(decoder)T %(ts:timestamp)t %(sip)s %(sport)s %(dip)s %(dport)s %(data:alert)s"
_NULL = None
# format type to (type,coltype) map
_TYPEMAP = {'s': 'VARCHAR', 'r': 'VARCHAR', 'c': 'CHAR(1)',
'x': 'VARCHAR', 'X': 'VARCHAR', 'o': 'VARCHAR',
'd': 'INTEGER', 'i': 'INTEGER', 'u': 'INTEGER',
'e': 'DECIMAL', 'E': 'DECIMAL',
'f': 'DECIMAL', 'F': 'DECIMAL',
'g': 'DECIMAL', 'G': 'DECIMAL',
# 'b' isn't a python type, so (ftype,DBTYPE) tuple for value formats input as ftype
'b': ('d', 'BOOLEAN'),
# not standard across database types!
't': ('f', 'TIMESTAMP'), 'D': ('s', 'DATETIME'),
'A': ('s', 'INET'), 'H': ('s', 'HOST'), 'N': ('s', 'CIDR'), 'M': ('s', 'MACADDR')} # these are postgres specific
# acceptable params to pass to db module connect method
_DBCONNPARAMS = ['host', 'user', 'passwd',
'password', 'db', 'database', 'port', 'charset']
# map of db type to insert placeholder. '%s' is the default, but sqlite3 doesn't like it
# you can override this with the 'placeholder' config keyword
_DBTYPE_PLACEHOLDER_MAP = {'sqlite3': '?'}
def __init__(self, *args, **kw):
'''configuration:
config=db config .ini file name to parse
config keywords:
dbtype=database type, selects DB API module to load
in conf file use [dbtype] section name instead
host,user,passwd,password,db,database,port will be passed to db module if present
table=db table to use if not specified by a field
insert_param=character to use as parameter placeholder for INSERT
(sqlite3=?, default=%%s)
format_types=types to format before insert (default=x)
('s' to pad strings, 'x' to convert to hex, 'f' to format floats, 'fx' for hex and floats...)
'''
self.dbconfig = kw.copy()
# if we were passed a config.ini file, parse it and add the k/v pairs
# to the config
if 'config' in self.dbconfig:
import ConfigParser
config = ConfigParser.ConfigParser()
config.read(self.dbconfig['config'])
sections = config.sections()
if len(sections) > 0:
self.dbconfig['dbtype'] = sections[0]
for k, v in config.items(sections[0], raw=True):
self.dbconfig[k] = v
# import the db module
self.db = __import__(self.dbconfig['dbtype'])
# create a connection, using a dict filtered to db conn params
self.dbconn = self.db.connect(
*args, **dict((k, self.dbconfig[k]) for k in self._DBCONNPARAMS if k in self.dbconfig))
# do the base init last to catch the format string, etc.. (as it may
# have come from the config file)
Output.__init__(self, *args, **self.dbconfig)
def createtable(self, table=None):
'''creates a table based on the format string'''
if not table and 'table' in self.dbconfig:
table = self.dbconfig['table']
try:
cursor = self.dbconn.cursor()
sqlfields = []
for fname, ftype, fspec in [f for f in self.fields if f[1] != 'T']:
ctype = self.fieldmap[fname][1]
# if no width spec, use TEXT instead of VARCHAR and hope the db
# likes it
if ctype == 'VARCHAR' and not fspec:
ctype = 'TEXT'
fdef = self.fieldmap[fname][0] + ' ' + ctype
if fspec:
# try to conver python format spec to something SQL will
# take
fdef += '(' + \
fspec.strip('+-# lLh').replace('.', ',') + ')'
sqlfields.append(fdef)
sql = 'CREATE TABLE "' + table + '" (' + ','.join(sqlfields) + ')'
self.log(sql, logging.DEBUG)
return cursor.execute(sql)
except:
raise
def close(self):
'''closes database connection'''
self.dbconn.close()
Output.close(self)
def alert(self, *args, **kw):
'''write an output record
we pass in the decoder object and args/dict'''
rec = self.parse(self, *args, **kw)
if rec:
self.insert(rec)
def setformat(self, formatstr=None):
'''calls main setformat and then builds the insert SQL'''
# what is the insert param?? some databases use %s, some use ?
# try to map it or take the placeholder keyword from config
ph = self.dbconfig.get('insert_param',
self._DBTYPE_PLACEHOLDER_MAP.get(
self.dbconfig['dbtype'], '%%s')
)
# these are the types we need to format before passing to the db
self.format_types = self.dbconfig.get('format_types', 'x')
Output.setformat(self, formatstr, typemap=self._TYPEMAP)
# build all fields we map (except for [T]able select)
self.tablefield = 'decoder' # default to decodername
for fname, ftype, fspec in self.fields:
if ftype == 'T':
self.tablefield = fname
sqlfields = [self.fieldmap[fname][0]
for (fname, ftype, fspec) in self.fields if fname in self.fieldmap]
self.insertsql = 'INSERT INTO "%%s" (%s) VALUES (%s)' % (
','.join(sqlfields), ','.join([ph] * len(sqlfields)))
def insert(self, rec, table=None):
''' inserts rec dict using self.format into table (if given, else default or specified by field)
if insert fails, tries to create table and insert again before raising exception '''
if not table:
if 'table' in self.dbconfig:
table = self.dbconfig['table']
elif rec[self.tablefield]:
table = rec[self.tablefield]
try:
sqlvalues = []
cursor = self.dbconn.cursor()
for fname, ftype, fspec in self.fields:
if fname in self.fieldmap:
# do we preformat this data?
if ftype in self.format_types:
sqlvalues.append(('%' + fspec + ftype) % rec[fname])
else:
sqlvalues.append(rec[fname])
# create a INSERT INTO table (fields) VALUES (?,?,?) for execute
sql = self.insertsql % table
self.log(sql + ' %s' % sqlvalues, logging.DEBUG)
except:
raise
# try once, if it fails, try to create table and retry
# throws on second failure or create table failure
fail = False
while True:
try:
cursor.execute(sql, sqlvalues)
self.dbconn.commit()
break # success
except Exception, e:
self.log(e, level=logging.WARNING)
if fail:
raise
else:
fail = True
try:
self.createtable(table)
except:
raise
class PCAPWriter(FileOutput):
'''writes a pcap file'''
def __init__(self, *args, **kw):
FileOutput.__init__(self, *args, **kw)
if self.fh:
self.fh.write(
struct.pack('IHHIIII', 0xa1b2c3d4, 2, 4, 0, 0, 65535, 1))
# overrides Output.write to write session as PCAP
# data flow is Output.dump->pcapwriter.write
def write(self, pktlen, pktdata, ts):
if self.fh:
self.fh.write(
struct.pack('II', int(ts), int((ts - int(ts)) * 1000000)))
# captured length, original length
self.fh.write(struct.pack('II', len(pktdata), pktlen))
self.fh.write(pktdata)
class SessionWriter(Output):
'''writes the session to one or more files'''
def __init__(self, session=None, **kw):
self.file = kw.get('session', session)
self.dir = kw.get('direction', 'both')
self.mode = kw.get('mode', 'a')
self.timeformat = (kw.get('timeformat', self._DEFAULT_TIMEFORMAT))
self.fieldnames = []
def write(self, obj, **kwargs):
out = None
kw = dict(**kwargs)
# if a session object with info() and data() methods (conn or blob, but
# not packet)
try:
kw.update(**obj.info()) # get object info
kw = self.parse(**kw)
if self.dir == 'both':
ds = [None]
elif self.dir == 'split':
ds = ['cs', 'sc']
else:
ds = [self.dir]
for d in ds:
kw.update(direction=d if d else 'both') # set direction
# format filename and open
out = FileOutput(self.file % kw, mode=self.mode)
# write obj data for direction
out.fh.write(obj.data(direction=d))
out.close()
except: # if not a session object
# build filename from kw
out = FileOutput(self.file % kw, mode=self.mode)
out.fh.write(str(obj))
out.close()
class QueueOutput(Output):
'''pipes pickled packets to parent process'''
def __init__(self, q, **kwargs):
self.queue = q
Output.__init__(self, **kwargs)
def write(self, *args, **kw): self.dispatch('write', *args, **kw)
def alert(self, *args, **kw): self.dispatch('alert', *args, **kw)
def dump(self, *args, **kw): self.dispatch('dump', *args, **kw)
def dispatch(self, m, *args, **kw): # takes (method,...) to Q
self.queue.put((m, args, kw))
def close(self):
self.queue.close()
Output.close(self)
# default output module
obj = TextOutput
| 38.886861 | 133 | 0.521445 | [
"BSD-2-Clause"
] | NTgitdude23/Dshell | lib/output/output.py | 21,310 | Python |
"""Auto-generated file, do not edit by hand. MQ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_MQ = PhoneMetadata(id='MQ', country_code=596, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[56]\\d{8}', possible_number_pattern='\\d{9}'),
fixed_line=PhoneNumberDesc(national_number_pattern='596(?:0[2-5]|[12]0|3[05-9]|4[024-8]|[5-7]\\d|89|9[4-8])\\d{4}', possible_number_pattern='\\d{9}', example_number='596301234'),
mobile=PhoneNumberDesc(national_number_pattern='696(?:[0-479]\\d|5[01]|8[0-689])\\d{4}', possible_number_pattern='\\d{9}', example_number='696201234'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(\\d{3})(\\d{2})(\\d{2})(\\d{2})', format=u'\\1 \\2 \\3 \\4', national_prefix_formatting_rule=u'0\\1')])
| 85.3 | 182 | 0.759672 | [
"Apache-2.0"
] | Eyepea/python-phonenumbers | python/phonenumbers/data/region_MQ.py | 1,706 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['VpnSiteArgs', 'VpnSite']
@pulumi.input_type
class VpnSiteArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
address_space: Optional[pulumi.Input['AddressSpaceArgs']] = None,
bgp_properties: Optional[pulumi.Input['BgpSettingsArgs']] = None,
device_properties: Optional[pulumi.Input['DevicePropertiesArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_security_site: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
site_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input['SubResourceArgs']] = None,
vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]] = None,
vpn_site_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VpnSite resource.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.
:param pulumi.Input['AddressSpaceArgs'] address_space: The AddressSpace that contains an array of IP address ranges.
:param pulumi.Input['BgpSettingsArgs'] bgp_properties: The set of bgp properties.
:param pulumi.Input['DevicePropertiesArgs'] device_properties: The device properties.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] ip_address: The ip-address for the vpn-site.
:param pulumi.Input[bool] is_security_site: IsSecuritySite flag.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input['SubResourceArgs'] virtual_wan: The VirtualWAN to which the vpnSite belongs.
:param pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]] vpn_site_links: List of all vpn site links.
:param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if address_space is not None:
pulumi.set(__self__, "address_space", address_space)
if bgp_properties is not None:
pulumi.set(__self__, "bgp_properties", bgp_properties)
if device_properties is not None:
pulumi.set(__self__, "device_properties", device_properties)
if id is not None:
pulumi.set(__self__, "id", id)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if is_security_site is not None:
pulumi.set(__self__, "is_security_site", is_security_site)
if location is not None:
pulumi.set(__self__, "location", location)
if site_key is not None:
pulumi.set(__self__, "site_key", site_key)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if virtual_wan is not None:
pulumi.set(__self__, "virtual_wan", virtual_wan)
if vpn_site_links is not None:
pulumi.set(__self__, "vpn_site_links", vpn_site_links)
if vpn_site_name is not None:
pulumi.set(__self__, "vpn_site_name", vpn_site_name)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name of the VpnSite.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="addressSpace")
def address_space(self) -> Optional[pulumi.Input['AddressSpaceArgs']]:
"""
The AddressSpace that contains an array of IP address ranges.
"""
return pulumi.get(self, "address_space")
@address_space.setter
def address_space(self, value: Optional[pulumi.Input['AddressSpaceArgs']]):
pulumi.set(self, "address_space", value)
@property
@pulumi.getter(name="bgpProperties")
def bgp_properties(self) -> Optional[pulumi.Input['BgpSettingsArgs']]:
"""
The set of bgp properties.
"""
return pulumi.get(self, "bgp_properties")
@bgp_properties.setter
def bgp_properties(self, value: Optional[pulumi.Input['BgpSettingsArgs']]):
pulumi.set(self, "bgp_properties", value)
@property
@pulumi.getter(name="deviceProperties")
def device_properties(self) -> Optional[pulumi.Input['DevicePropertiesArgs']]:
"""
The device properties.
"""
return pulumi.get(self, "device_properties")
@device_properties.setter
def device_properties(self, value: Optional[pulumi.Input['DevicePropertiesArgs']]):
pulumi.set(self, "device_properties", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
The ip-address for the vpn-site.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter(name="isSecuritySite")
def is_security_site(self) -> Optional[pulumi.Input[bool]]:
"""
IsSecuritySite flag.
"""
return pulumi.get(self, "is_security_site")
@is_security_site.setter
def is_security_site(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_security_site", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="siteKey")
def site_key(self) -> Optional[pulumi.Input[str]]:
"""
The key for vpn-site that can be used for connections.
"""
return pulumi.get(self, "site_key")
@site_key.setter
def site_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "site_key", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="virtualWan")
def virtual_wan(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The VirtualWAN to which the vpnSite belongs.
"""
return pulumi.get(self, "virtual_wan")
@virtual_wan.setter
def virtual_wan(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "virtual_wan", value)
@property
@pulumi.getter(name="vpnSiteLinks")
def vpn_site_links(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]]:
"""
List of all vpn site links.
"""
return pulumi.get(self, "vpn_site_links")
@vpn_site_links.setter
def vpn_site_links(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]]):
pulumi.set(self, "vpn_site_links", value)
@property
@pulumi.getter(name="vpnSiteName")
def vpn_site_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the VpnSite being created or updated.
"""
return pulumi.get(self, "vpn_site_name")
@vpn_site_name.setter
def vpn_site_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vpn_site_name", value)
class VpnSite(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_space: Optional[pulumi.Input[pulumi.InputType['AddressSpaceArgs']]] = None,
bgp_properties: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]] = None,
device_properties: Optional[pulumi.Input[pulumi.InputType['DevicePropertiesArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_security_site: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
site_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]]] = None,
vpn_site_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
VpnSite Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['AddressSpaceArgs']] address_space: The AddressSpace that contains an array of IP address ranges.
:param pulumi.Input[pulumi.InputType['BgpSettingsArgs']] bgp_properties: The set of bgp properties.
:param pulumi.Input[pulumi.InputType['DevicePropertiesArgs']] device_properties: The device properties.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] ip_address: The ip-address for the vpn-site.
:param pulumi.Input[bool] is_security_site: IsSecuritySite flag.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.
:param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_wan: The VirtualWAN to which the vpnSite belongs.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]] vpn_site_links: List of all vpn site links.
:param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VpnSiteArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
VpnSite Resource.
:param str resource_name: The name of the resource.
:param VpnSiteArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VpnSiteArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_space: Optional[pulumi.Input[pulumi.InputType['AddressSpaceArgs']]] = None,
bgp_properties: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]] = None,
device_properties: Optional[pulumi.Input[pulumi.InputType['DevicePropertiesArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_security_site: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
site_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]]] = None,
vpn_site_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VpnSiteArgs.__new__(VpnSiteArgs)
__props__.__dict__["address_space"] = address_space
__props__.__dict__["bgp_properties"] = bgp_properties
__props__.__dict__["device_properties"] = device_properties
__props__.__dict__["id"] = id
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["is_security_site"] = is_security_site
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["site_key"] = site_key
__props__.__dict__["tags"] = tags
__props__.__dict__["virtual_wan"] = virtual_wan
__props__.__dict__["vpn_site_links"] = vpn_site_links
__props__.__dict__["vpn_site_name"] = vpn_site_name
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200301:VpnSite"), pulumi.Alias(type_="azure-native:network:VpnSite"), pulumi.Alias(type_="azure-nextgen:network:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181001:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181001:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181101:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190901:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190901:VpnSite"), pulumi.Alias(type_="azure-native:network/v20191101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20191101:VpnSite"), pulumi.Alias(type_="azure-native:network/v20191201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20191201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200501:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200501:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20201101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20201101:VpnSite")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VpnSite, __self__).__init__(
'azure-native:network/v20200301:VpnSite',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VpnSite':
"""
Get an existing VpnSite resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = VpnSiteArgs.__new__(VpnSiteArgs)
__props__.__dict__["address_space"] = None
__props__.__dict__["bgp_properties"] = None
__props__.__dict__["device_properties"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["ip_address"] = None
__props__.__dict__["is_security_site"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["site_key"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_wan"] = None
__props__.__dict__["vpn_site_links"] = None
return VpnSite(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressSpace")
def address_space(self) -> pulumi.Output[Optional['outputs.AddressSpaceResponse']]:
"""
The AddressSpace that contains an array of IP address ranges.
"""
return pulumi.get(self, "address_space")
@property
@pulumi.getter(name="bgpProperties")
def bgp_properties(self) -> pulumi.Output[Optional['outputs.BgpSettingsResponse']]:
"""
The set of bgp properties.
"""
return pulumi.get(self, "bgp_properties")
@property
@pulumi.getter(name="deviceProperties")
def device_properties(self) -> pulumi.Output[Optional['outputs.DevicePropertiesResponse']]:
"""
The device properties.
"""
return pulumi.get(self, "device_properties")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Output[Optional[str]]:
"""
The ip-address for the vpn-site.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="isSecuritySite")
def is_security_site(self) -> pulumi.Output[Optional[bool]]:
"""
IsSecuritySite flag.
"""
return pulumi.get(self, "is_security_site")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the VPN site resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="siteKey")
def site_key(self) -> pulumi.Output[Optional[str]]:
"""
The key for vpn-site that can be used for connections.
"""
return pulumi.get(self, "site_key")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualWan")
def virtual_wan(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The VirtualWAN to which the vpnSite belongs.
"""
return pulumi.get(self, "virtual_wan")
@property
@pulumi.getter(name="vpnSiteLinks")
def vpn_site_links(self) -> pulumi.Output[Optional[Sequence['outputs.VpnSiteLinkResponse']]]:
"""
List of all vpn site links.
"""
return pulumi.get(self, "vpn_site_links")
| 47.235656 | 2,846 | 0.658106 | [
"Apache-2.0"
] | sebtelko/pulumi-azure-native | sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | 23,051 | Python |
# -*- coding: utf-8 -*-
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
import click
from rnacentral_pipeline.rnacentral import attempted, r2dt
@click.group("r2dt")
def cli():
"""
A group of commands for parsing data from secondary structures into an
importable format.
"""
pass
@cli.command("process-svgs")
@click.option("--allow-missing", is_flag=True, default=False)
@click.argument("model_info", type=click.File("r"))
@click.argument("directory", type=click.Path())
@click.argument("output", type=click.File("w"))
def process_svgs(model_info, directory, output, allow_missing=False):
"""
Process all SVG secondary structures in the given directory and produce a
single data file that can be imported into the database.
"""
r2dt.write(model_info, directory, output, allow_missing=allow_missing)
@cli.group("should-show")
def should_show():
"""
Some commands relating to building a model for should show as well as
running it.
"""
@should_show.command("convert-sheet")
@click.argument("filename", type=click.File("r"))
@click.argument("output", type=click.File("w"))
def convert_sheet(filename, output):
"""
This command is to convert a downloaded google sheet csv into a csv that can
be used for training data. Often we will build a spreadsheet of example URS
and then use that to build a training set. It is nice since you can embedd
an SVG in google sheets so it is fast for us to compare several of them.
In order to move that back into the training data you can download that
sheet as a CSV and then run this command on it to build the CSV that is used
in training. It requires there be a 'urs' and 'Labeled Should show' column
to build the CSV. The values in labeled should show must be true/false
(ignoring case).
"""
r2dt.write_converted_sheet(filename, output)
@should_show.command("fetch-data")
@click.option("--db-url", envvar="PGDATABASE")
@click.argument("filename", type=click.File("r"))
@click.argument("output", type=click.File("w"))
def fetch_training_data(filename, output, db_url=None):
"""
This builds a CSV file of training data to use for the model building. I
keep it separate so I can build a training csv and play with it interactivly
before committing the final modeling building logic to the pipeline.
"""
r2dt.write_training_data(filename, db_url, output)
@should_show.command("inspect-data")
@click.option("--db-url", envvar="PGDATABASE")
@click.argument("filename", type=click.File("r"))
@click.argument("output", type=click.File("w"))
def fetch_inspect_data(filename, output, db_url=None):
"""
This is the command to use when trying to fetch more examples to add to the
training set. This will fetch some information that is useful for a person
to evaluate a diagram and decide if it should be true/false in the training
set.
"""
r2dt.write_training_data(filename, db_url, output)
@should_show.command("build-model")
@click.option("--db-url", envvar="PGDATABASE")
@click.argument("training-info", type=click.File("r"))
@click.argument("model", type=click.Path())
def build_model(training_info, model, db_url=None):
"""
This builds a model given then training information. The training
information should be a csv file of:
URS,flag
The flag must be 1 or 0 to indicate if the URS should be shown or not. THis
will fetch the data like the fetch-data command but will then build a model
and write it out the the output file directly.
"""
r2dt.build_model(training_info, db_url, Path(model))
@should_show.command("compute")
@click.option("--db-url", envvar="PGDATABASE")
@click.argument("model", type=click.Path())
@click.argument("filename", type=click.File("r"))
@click.argument("output", type=click.File("w"))
def write_should_show(model, filename, output, db_url=None):
"""
This computes the should show values for the data in the given file and a
file listing urs ids to use. The data needed for the URS will be fetched
from the database. This is meant to operate on large batches, like
relabeling the entire database.
"""
r2dt.write_should_show(model, filename, db_url, output)
@cli.group("model-info")
def model_info():
"""
Commands for parsing and generating data files we can import into the
database as model info files.
"""
pass
@model_info.command("crw")
@click.argument("filename", type=click.File("r"))
@click.argument("output", default="-", type=click.File("w"))
def crw_model_info(filename, output):
"""
Parse the CRW metadata file and produce
"""
r2dt.write_crw(filename, output)
@model_info.command("ribovision")
@click.argument("filename", type=click.File("r"))
@click.argument("output", default="-", type=click.File("w"))
def ribovision_model_info(filename, output):
"""
Parse the metadata.tsv file from R2DT for Ribovision models to
produce something we can put in our database.
"""
r2dt.write_ribovision(filename, output)
@model_info.command("gtrnadb")
@click.argument("filename", type=click.File("r"))
@click.argument("output", default="-", type=click.File("w"))
def gtrnadb_model_info(filename, output):
"""
Parse the metadata.tsv file from R2DT for gtrnadb models to
produce something we can put in our database.
"""
r2dt.write_gtrnadb(filename, output)
@model_info.command("rnase-p")
@click.argument("filename", type=click.File("r"))
@click.argument("output", default="-", type=click.File("w"))
def rnase_p_model_info(filename, output):
"""
Parse the metadata.tsv file from R2DT for Ribovision models to
produce something we can put in our database.
"""
r2dt.write_rnase_p(filename, output)
@cli.command("create-attempted")
@click.argument("filename", type=click.File("r"))
@click.argument("output", default="-", type=click.File("w"))
def r2dt_create_attempted(filename, output):
attempted.r2dt(filename, output)
@cli.command("publish")
@click.option("--suffix", default="")
@click.option("--allow-missing", is_flag=True, default=False)
@click.argument("model_info", type=click.File("r"))
@click.argument(
"directory",
type=click.Path(
writable=False,
dir_okay=True,
file_okay=False,
),
)
@click.argument(
"output",
type=click.Path(
writable=True,
dir_okay=True,
file_okay=False,
),
)
def r2dt_publish(model_info, directory, output, allow_missing, suffix=""):
r2dt.publish(
model_info, directory, output, allow_missing=allow_missing, suffix=suffix
)
@cli.command("prepare-s3")
@click.option("--allow-missing", is_flag=True, default=False)
@click.argument("model_info", type=click.File("r"))
@click.argument(
"directory",
type=click.Path(
writable=False,
dir_okay=True,
file_okay=False,
),
)
@click.argument(
"output",
type=click.Path(
writable=True,
dir_okay=True,
file_okay=False,
),
)
@click.argument("file_list", type=click.Path())
def r2dt_prepare_s3(model_info, directory, output, file_list, allow_missing):
file_list = Path(file_list)
output = Path(output)
r2dt.prepare_s3(
model_info, directory, output, file_list, allow_missing=allow_missing
)
| 32.842324 | 81 | 0.707391 | [
"Apache-2.0"
] | RNAcentral/rnacentral-import-pipeline | rnacentral_pipeline/cli/r2dt.py | 7,915 | Python |
import json
from django.http.response import Http404, HttpResponse, HttpResponseBadRequest
from hknweb.utils import login_and_permission
from hknweb.academics.models import Instructor
from hknweb.course_surveys.constants import Attr, COURSE_SURVEYS_EDIT_PERMISSION
@login_and_permission(COURSE_SURVEYS_EDIT_PERMISSION)
def merge_instructors(request):
if request.method != "POST":
return Http404()
instructor_ids = request.GET.get(Attr.INSTRUCTOR_IDS, None)
instructor_ids = json.loads(instructor_ids)
instructor_ids = list(map(int, instructor_ids))
if len(instructor_ids) < 2:
return HttpResponseBadRequest()
base_instructor = Instructor.objects.get(pk=instructor_ids[0])
for id in instructor_ids[1:]:
instructor = Instructor.objects.get(pk=id)
for icsr in instructor.icsr_instructor.all():
icsr.icsr_instructor = base_instructor
icsr.save()
instructor.delete()
return HttpResponse()
| 30.90625 | 80 | 0.744186 | [
"MIT"
] | Boomaa23/hknweb | hknweb/course_surveys/views/merge_instructors.py | 989 | Python |
import os
import pyudev
import psutil
import logging
import time
from arm.ripper import music_brainz
from arm.ui import db
from arm.config.config import cfg
from flask_login import LoginManager, current_user, login_user, UserMixin # noqa: F401
from prettytable import PrettyTable
hidden_attribs = ("OMDB_API_KEY", "EMBY_USERID", "EMBY_PASSWORD", "EMBY_API_KEY", "PB_KEY", "IFTTT_KEY", "PO_KEY",
"PO_USER_KEY", "PO_APP_KEY", "ARM_API_KEY", "TMDB_API_KEY")
HIDDEN_VALUE = "<hidden>"
class Job(db.Model):
job_id = db.Column(db.Integer, primary_key=True)
arm_version = db.Column(db.String(20))
crc_id = db.Column(db.String(63))
logfile = db.Column(db.String(256))
start_time = db.Column(db.DateTime)
stop_time = db.Column(db.DateTime)
job_length = db.Column(db.String(12))
status = db.Column(db.String(32))
stage = db.Column(db.String(63))
no_of_titles = db.Column(db.Integer)
title = db.Column(db.String(256))
title_auto = db.Column(db.String(256))
title_manual = db.Column(db.String(256))
year = db.Column(db.String(4))
year_auto = db.Column(db.String(4))
year_manual = db.Column(db.String(4))
video_type = db.Column(db.String(20))
video_type_auto = db.Column(db.String(20))
video_type_manual = db.Column(db.String(20))
imdb_id = db.Column(db.String(15))
imdb_id_auto = db.Column(db.String(15))
imdb_id_manual = db.Column(db.String(15))
poster_url = db.Column(db.String(256))
poster_url_auto = db.Column(db.String(256))
poster_url_manual = db.Column(db.String(256))
devpath = db.Column(db.String(15))
mountpoint = db.Column(db.String(20))
hasnicetitle = db.Column(db.Boolean)
errors = db.Column(db.Text)
disctype = db.Column(db.String(20)) # dvd/bluray/data/music/unknown
label = db.Column(db.String(256))
path = db.Column(db.String(256))
ejected = db.Column(db.Boolean)
updated = db.Column(db.Boolean)
pid = db.Column(db.Integer)
pid_hash = db.Column(db.Integer)
tracks = db.relationship('Track', backref='job', lazy='dynamic')
config = db.relationship('Config', uselist=False, backref="job")
def __init__(self, devpath):
"""Return a disc object"""
self.devpath = devpath
self.mountpoint = "/mnt" + devpath
self.hasnicetitle = False
self.video_type = "unknown"
self.ejected = False
self.updated = False
if cfg['VIDEOTYPE'] != "auto":
self.video_type = cfg['VIDEOTYPE']
self.parse_udev()
self.get_pid()
def parse_udev(self):
"""Parse udev for properties of current disc"""
context = pyudev.Context()
device = pyudev.Devices.from_device_file(context, self.devpath)
self.disctype = "unknown"
for key, value in device.items():
if key == "ID_FS_LABEL":
self.label = value
if value == "iso9660":
self.disctype = "data"
elif key == "ID_CDROM_MEDIA_BD":
self.disctype = "bluray"
elif key == "ID_CDROM_MEDIA_DVD":
self.disctype = "dvd"
elif key == "ID_CDROM_MEDIA_TRACK_COUNT_AUDIO":
self.disctype = "music"
else:
pass
def get_pid(self):
pid = os.getpid()
p = psutil.Process(pid)
self.pid = pid
self.pid_hash = hash(p)
def get_disc_type(self, found_hvdvd_ts):
if self.disctype == "music":
logging.debug("Disc is music.")
self.label = music_brainz.main(self)
elif os.path.isdir(self.mountpoint + "/VIDEO_TS"):
logging.debug(f"Found: {self.mountpoint}/VIDEO_TS")
self.disctype = "dvd"
elif os.path.isdir(self.mountpoint + "/video_ts"):
logging.debug(f"Found: {self.mountpoint}/video_ts")
self.disctype = "dvd"
elif os.path.isdir(self.mountpoint + "/BDMV"):
logging.debug(f"Found: {self.mountpoint}/BDMV")
self.disctype = "bluray"
elif os.path.isdir(self.mountpoint + "/HVDVD_TS"):
logging.debug(f"Found: {self.mountpoint}/HVDVD_TS")
# do something here
elif found_hvdvd_ts:
logging.debug("Found file: HVDVD_TS")
# do something here too
else:
logging.debug("Did not find valid dvd/bd files. Changing disctype to 'data'")
self.disctype = "data"
def identify_audio_cd(self):
"""
Get the title for audio cds to use for the logfile name.
Needs the job class passed into it so it can be forwarded to mb
return - only the logfile - setup_logging() adds the full path
"""
# Use the music label if we can find it - defaults to music_cd.log
disc_id = music_brainz.get_disc_id(self)
mb_title = music_brainz.get_title(disc_id, self)
if mb_title == "not identified":
self.label = self.title = "not identified"
logfile = "music_cd.log"
new_log_file = f"music_cd_{round(time.time() * 100)}.log"
else:
logfile = f"{mb_title}.log"
new_log_file = f"{mb_title}_{round(time.time() * 100)}.log"
temp_log_full = os.path.join(cfg['LOGPATH'], logfile)
logfile = new_log_file if os.path.isfile(temp_log_full) else logfile
return logfile
def __str__(self):
"""Returns a string of the object"""
s = self.__class__.__name__ + ": "
for attr, value in self.__dict__.items():
s = s + "(" + str(attr) + "=" + str(value) + ") "
return s
def pretty_table(self):
"""Returns a string of the prettytable"""
x = PrettyTable()
x.field_names = ["Config", "Value"]
x._max_width = {"Config": 50, "Value": 60}
for attr, value in self.__dict__.items():
if attr == "config":
x.add_row([str(attr), str(value.pretty_table())])
else:
x.add_row([str(attr), str(value)])
return str(x.get_string())
def get_d(self):
r = {}
for key, value in self.__dict__.items():
if '_sa_instance_state' not in key:
r[str(key)] = str(value)
return r
def __repr__(self):
return '<Job {}>'.format(self.label)
def eject(self):
"""Eject disc if it hasn't previously been ejected"""
if not self.ejected:
self.ejected = True
try:
if os.system("umount " + self.devpath):
logging.debug("we unmounted disc" + self.devpath)
if os.system("eject " + self.devpath):
logging.debug("we ejected disc" + self.devpath)
self.ejected = True
else:
logging.debug("failed to eject" + self.devpath)
except Exception as e:
logging.debug(self.devpath + " couldn't be ejected " + str(e))
class Track(db.Model):
track_id = db.Column(db.Integer, primary_key=True)
job_id = db.Column(db.Integer, db.ForeignKey('job.job_id'))
track_number = db.Column(db.String(4))
length = db.Column(db.Integer)
aspect_ratio = db.Column(db.String(20))
fps = db.Column(db.Float)
main_feature = db.Column(db.Boolean)
basename = db.Column(db.String(256))
filename = db.Column(db.String(256))
orig_filename = db.Column(db.String(256))
new_filename = db.Column(db.String(256))
ripped = db.Column(db.Boolean)
status = db.Column(db.String(32))
error = db.Column(db.Text)
source = db.Column(db.String(32))
def __init__(self, job_id, track_number, length, aspect_ratio, fps, main_feature, source, basename, filename):
"""Return a track object"""
self.job_id = job_id
self.track_number = track_number
self.length = length
self.aspect_ratio = aspect_ratio
self.fps = fps
self.main_feature = main_feature
self.source = source
self.basename = basename
self.filename = filename
self.ripped = False
def __repr__(self):
return '<Post {}>'.format(self.track_number)
class Config(db.Model):
CONFIG_ID = db.Column(db.Integer, primary_key=True)
job_id = db.Column(db.Integer, db.ForeignKey('job.job_id'))
ARM_CHECK_UDF = db.Column(db.Boolean)
GET_VIDEO_TITLE = db.Column(db.Boolean)
SKIP_TRANSCODE = db.Column(db.Boolean)
VIDEOTYPE = db.Column(db.String(25))
MINLENGTH = db.Column(db.String(6))
MAXLENGTH = db.Column(db.String(6))
MANUAL_WAIT = db.Column(db.Boolean)
MANUAL_WAIT_TIME = db.Column(db.Integer)
RAW_PATH = db.Column(db.String(255))
TRANSCODE_PATH = db.Column(db.String(255))
COMPLETED_PATH = db.Column(db.String(255))
EXTRAS_SUB = db.Column(db.String(255))
INSTALLPATH = db.Column(db.String(255))
LOGPATH = db.Column(db.String(255))
LOGLEVEL = db.Column(db.String(255))
LOGLIFE = db.Column(db.Integer)
DBFILE = db.Column(db.String(255))
WEBSERVER_IP = db.Column(db.String(25))
WEBSERVER_PORT = db.Column(db.Integer)
SET_MEDIA_PERMISSIONS = db.Column(db.Boolean)
CHMOD_VALUE = db.Column(db.Integer)
SET_MEDIA_OWNER = db.Column(db.Boolean)
CHOWN_USER = db.Column(db.String(50))
CHOWN_GROUP = db.Column(db.String(50))
RIPMETHOD = db.Column(db.String(25))
MKV_ARGS = db.Column(db.String(25))
DELRAWFILES = db.Column(db.Boolean)
HASHEDKEYS = db.Column(db.Boolean)
HB_PRESET_DVD = db.Column(db.String(256))
HB_PRESET_BD = db.Column(db.String(256))
DEST_EXT = db.Column(db.String(10))
HANDBRAKE_CLI = db.Column(db.String(25))
MAINFEATURE = db.Column(db.Boolean)
HB_ARGS_DVD = db.Column(db.String(256))
HB_ARGS_BD = db.Column(db.String(256))
EMBY_REFRESH = db.Column(db.Boolean)
EMBY_SERVER = db.Column(db.String(25))
EMBY_PORT = db.Column(db.String(6))
EMBY_CLIENT = db.Column(db.String(25))
EMBY_DEVICE = db.Column(db.String(50))
EMBY_DEVICEID = db.Column(db.String(128))
EMBY_USERNAME = db.Column(db.String(50))
EMBY_USERID = db.Column(db.String(128))
EMBY_PASSWORD = db.Column(db.String(128))
EMBY_API_KEY = db.Column(db.String(64))
NOTIFY_RIP = db.Column(db.Boolean)
NOTIFY_TRANSCODE = db.Column(db.Boolean)
PB_KEY = db.Column(db.String(64))
IFTTT_KEY = db.Column(db.String(64))
IFTTT_EVENT = db.Column(db.String(25))
PO_USER_KEY = db.Column(db.String(64))
PO_APP_KEY = db.Column(db.String(64))
OMDB_API_KEY = db.Column(db.String(64))
def __init__(self, c, job_id):
self.__dict__.update(c)
self.job_id = job_id
def list_params(self):
"""Returns a string of the object"""
s = self.__class__.__name__ + ": "
for attr, value in self.__dict__.items():
if s:
s = s + "\n"
if str(attr) in hidden_attribs and value:
value = HIDDEN_VALUE
s = s + str(attr) + ":" + str(value)
return s
def __str__(self):
"""Returns a string of the object"""
s = self.__class__.__name__ + ": "
for attr, value in self.__dict__.items():
if str(attr) in hidden_attribs and value:
value = HIDDEN_VALUE
s = s + "(" + str(attr) + "=" + str(value) + ") "
return s
def pretty_table(self):
"""Returns a string of the prettytable"""
x = PrettyTable()
x.field_names = ["Config", "Value"]
x._max_width = {"Config": 20, "Value": 30}
for attr, value in self.__dict__.items():
if str(attr) in hidden_attribs and value:
value = HIDDEN_VALUE
x.add_row([str(attr), str(value)])
return str(x.get_string())
def get_d(self):
r = {}
for key, value in self.__dict__.items():
if str(key) not in hidden_attribs:
r[str(key)] = str(value)
return r
class User(db.Model, UserMixin):
user_id = db.Column(db.Integer, index=True, primary_key=True)
email = db.Column(db.String(64))
password = db.Column(db.String(128))
hash = db.Column(db.String(256))
def __init__(self, email=None, password=None, hashed=None):
self.email = email
self.password = password
self.hash = hashed
def __repr__(self):
return '<User %r>' % (self.email)
def get_id(self):
return self.user_id
class AlembicVersion(db.Model):
version_num = db.Column(db.String(36), autoincrement=False, primary_key=True)
def __init__(self, version=None):
self.version_num = version
class UISettings(db.Model):
id = db.Column(db.Integer, autoincrement=True, primary_key=True)
use_icons = db.Column(db.Boolean)
save_remote_images = db.Column(db.Boolean)
bootstrap_skin = db.Column(db.String(64))
language = db.Column(db.String(4))
index_refresh = db.Column(db.Integer)
database_limit = db.Column(db.Integer)
def __init__(self, use_icons=None, save_remote_images=None, bootstrap_skin=None, language=None, index_refresh=None,
database_limit=None):
self.use_icons = use_icons
self.save_remote_images = save_remote_images
self.bootstrap_skin = bootstrap_skin
self.language = language
self.index_refresh = index_refresh
self.database_limit = database_limit
def __repr__(self):
return '<UISettings %r>' % self.id
def __str__(self):
"""Returns a string of the object"""
s = self.__class__.__name__ + ": "
for attr, value in self.__dict__.items():
s = s + "(" + str(attr) + "=" + str(value) + ") "
return s
def get_d(self):
r = {}
for key, value in self.__dict__.items():
if '_sa_instance_state' not in key:
r[str(key)] = str(value)
return r
| 36.21447 | 119 | 0.610346 | [
"MIT"
] | charmarkk/automatic-ripping-machine | arm/models/models.py | 14,015 | Python |
# Copyright 2020 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# https://stackoverflow.com/questions/16910955/programmatically-configure-logback-appender?noredirect=1
#
import ch.qos.logback.core.Appender as LogAppender
import ch.qos.logback.core.util.COWArrayList as COWArrayList
import ch.qos.logback.classic.encoder.PatternLayoutEncoder as PatternLayoutEncoder
import ch.qos.logback.core.FileAppender as FileAppender
import org.slf4j.LoggerFactory as LoggerFactory
import ch.qos.logback.classic.Level as logLevels
import json
def getLogAppenders( loggerName="console" ):
loggerMap = []
myLogger = LoggerFactory.getLogger("logmanager")
loggerContext = LoggerFactory.getILoggerFactory()
myLogger.error("===================")
appenderMap = {}
for logger in loggerContext.getLoggerList():
appenderList = logger.iteratorForAppenders()
while appenderList.hasNext():
appender = appenderList.next()
logger.error("Logger %s" % appender.getName())
if appender.getName() not in appenderMap.keys():
loggerMap.append({"name": appender.getName(), "appender": "NA"})
myLogger.error("Appender %s: %s" % (appender.getName(), "NA"))
myLogger.error("===================")
return loggerMap
def createLogAppender( name, file ):
lc = LoggerFactory.getILoggerFactory()
ple = PatternLayoutEncoder()
ple.setPattern("%date %level [%thread] %logger{10} [%file:%line] %msg%n")
ple.setContext(lc)
ple.start()
fileAppender = FileAppender()
fileAppender.setFile(file)
fileAppender.setEncoder(ple)
fileAppender.setContext(lc)
fileAppender.start()
logger = LoggerFactory.getLogger(string)
logger.addAppender(fileAppender)
#logger.setLevel(logLevels.DEBUG)
# set to true if root should log too
logger.setAdditive(True)
return logger
myLogger = LoggerFactory.getLogger("logmanager")
verb = "GET"
if (request):
if (request.query):
if (request.query['verb']):
verb = request.query['verb']
if( verb == "create"):
string = request.query['string']
file = request.query['file']
myLogger.info("Setting %s to %s" % (string, file))
createLogAppender(string, file)
loggerMap = getLogAppenders()
myLogger.error("%s" % json.dumps(loggerMap, indent=4, sort_keys=True))
response.entity = {"status": "OK", "data":loggerMap }
| 44.842105 | 462 | 0.720657 | [
"MIT"
] | xebialabs-community/xlr-logreport-plugin | src/main/resources/restapi/logger/getLogAppenders.py | 3,408 | Python |
import threading
from time import sleep
from .intcode import Intcode
class Amplifier(object):
def __init__(self, mem_str: str):
self._amps = [Intcode(mem_str, name=f'Amp {n + 1}') for n in range(5)]
def run(self, inputs: str or list, trace=False, quiet=True):
out = 0
p = self._amps[0]
if isinstance(inputs, str):
inputs = [int(v) for v in inputs.split(',')]
for inp in inputs:
p.reset_core()
p.simulate([inp, out], trace=trace)
out = p.output[0]
self._print_log(quiet)
return out
def _print_log(self, quiet):
if not quiet:
for p in self._amps:
msg = f'{p.name} log:'
top_n_tail = "*" * (len(msg) + 4)
print(top_n_tail)
print(f'* {msg} *')
print(top_n_tail)
print('\n'.join(p.get_log()))
def run_regeneration(self, inputs: str or list, trace=False, quiet=True):
if isinstance(inputs, str):
inputs = [int(v) for v in inputs.split(',')]
p = self._amps[0]
p.reset_core()
for n in self._amps[1:]:
p.connect(n.receiver)
p = n
p.reset_core()
self._amps[-1].connect(self._amps[0].receiver)
threads = []
for a, n in zip(self._amps, inputs):
a.receiver(n)
t = threading.Thread(target=a.simulate, kwargs={'trace': trace})
threads.append(t)
t.start()
self._amps[0].receiver(0)
while any(t.is_alive() for t in threads):
sleep(0.0001)
self._print_log(quiet)
return self._amps[0]._input.pop() | 31.796296 | 78 | 0.522423 | [
"Unlicense"
] | GeoffRiley/AdventOfCode | intcode/amplifier.py | 1,717 | Python |
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
from __future__ import absolute_import
import unittest
import appcenter_sdk
from DistributionGroupAppsDeleteRequest.clsDistributionGroupAppsDeleteRequest import DistributionGroupAppsDeleteRequest # noqa: E501
from appcenter_sdk.rest import ApiException
class TestDistributionGroupAppsDeleteRequest(unittest.TestCase):
"""DistributionGroupAppsDeleteRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDistributionGroupAppsDeleteRequest(self):
"""Test DistributionGroupAppsDeleteRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = appcenter_sdk.models.clsDistributionGroupAppsDeleteRequest.DistributionGroupAppsDeleteRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 27.1 | 133 | 0.760148 | [
"MIT"
] | Brantone/appcenter-sdks | sdks/python/test/test_DistributionGroupAppsDeleteRequest.py | 1,084 | Python |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .tags import TagPatterns
class Criticality(object):
def __init__(self, critical_tags=None, non_critical_tags=None):
self.critical_tags = self._get_tag_patterns(critical_tags)
self.non_critical_tags = self._get_tag_patterns(non_critical_tags)
def _get_tag_patterns(self, tags):
return TagPatterns(tags) if not isinstance(tags, TagPatterns) else tags
def tag_is_critical(self, tag):
return self.critical_tags.match(tag)
def tag_is_non_critical(self, tag):
return self.non_critical_tags.match(tag)
def test_is_critical(self, test):
if self.critical_tags and not self.critical_tags.match(test.tags):
return False
return not self.non_critical_tags.match(test.tags)
def __bool__(self):
return bool(self.critical_tags or self.non_critical_tags)
#PY2
def __nonzero__(self):
return self.__bool__()
| 34.704545 | 79 | 0.73019 | [
"Apache-2.0"
] | userzimmermann/robotframework | src/robot/model/criticality.py | 1,527 | Python |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Competition.url_redirect'
db.alter_column(u'web_competition', 'url_redirect', self.gf('django.db.models.fields.URLField')(max_length=200, null=True))
def backwards(self, orm):
# Changing field 'Competition.url_redirect'
db.alter_column(u'web_competition', 'url_redirect', self.gf('django.db.models.fields.TextField')(null=True))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'authenz.cluser': {
'Meta': {'object_name': 'ClUser'},
'bibtex': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'contact_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_on_submission_finished_successfully': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'method_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'method_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'organization_or_affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'organizer_direct_message_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizer_status_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'participation_status_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'publication_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'rabbitmq_password': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'rabbitmq_queue_limit': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5', 'blank': 'True'}),
'rabbitmq_username': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'team_members': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'team_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'queues.queue': {
'Meta': {'object_name': 'Queue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organizers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'organizers'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['authenz.ClUser']"}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"}),
'vhost': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
u'teams.team': {
'Meta': {'unique_together': "(('name', 'competition'),)", 'object_name': 'Team'},
'allow_requests': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_creator'", 'to': u"orm['authenz.ClUser']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'image_url_base': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'to': u"orm['authenz.ClUser']", 'through': u"orm['teams.TeamMembership']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teams.TeamStatus']", 'null': 'True'})
},
u'teams.teammembership': {
'Meta': {'object_name': 'TeamMembership'},
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_invitation': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_request': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teams.TeamMembershipStatus']", 'null': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"})
},
u'teams.teammembershipstatus': {
'Meta': {'object_name': 'TeamMembershipStatus'},
'codename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'teams.teamstatus': {
'Meta': {'object_name': 'TeamStatus'},
'codename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'web.competition': {
'Meta': {'ordering': "['end_date']", 'object_name': 'Competition'},
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'competition_admins'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['authenz.ClUser']"}),
'allow_organizer_teams': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_public_submissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_teams': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'anonymous_leaderboard': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'chahub_data_hash': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'chahub_needs_retry': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'chahub_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'competition_docker_image': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'competitioninfo_creator'", 'to': u"orm['authenz.ClUser']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disallow_leaderboard_modifying': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_detailed_results': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_forum': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_medical_image_viewer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_per_submission_metadata': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_teams': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'force_submission_to_leaderboard': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_registration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hide_chart': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hide_top_three': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'image_url_base': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'is_migrating': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_migrating_delayed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'last_phase_migration': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'competitioninfo_modified_by'", 'to': u"orm['authenz.ClUser']"}),
'original_yaml_file': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'queue': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'competitions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['queues.Queue']"}),
'require_team_approval': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'reward': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'show_datasets_from_yaml': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'competition_teams'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['teams.Team']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'url_redirect': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'web.competitiondefbundle': {
'Meta': {'object_name': 'CompetitionDefBundle'},
'config_bundle': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['authenz.ClUser']"}),
's3_config_bundle': ('s3direct.fields.S3DirectField', [], {'null': 'True', 'blank': 'True'})
},
u'web.competitiondump': {
'Meta': {'object_name': 'CompetitionDump'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dumps'", 'to': u"orm['web.Competition']"}),
'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Starting'", 'max_length': '64'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'web.competitionparticipant': {
'Meta': {'unique_together': "(('user', 'competition'),)", 'object_name': 'CompetitionParticipant'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'participants'", 'to': u"orm['web.Competition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ParticipantStatus']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'participation'", 'to': u"orm['authenz.ClUser']"})
},
u'web.competitionphase': {
'Meta': {'ordering': "['phasenumber']", 'object_name': 'CompetitionPhase'},
'auto_migration': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'phases'", 'to': u"orm['web.Competition']"}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'phase'", 'blank': 'True', 'to': u"orm['web.Dataset']"}),
'default_docker_image': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'disable_custom_docker_image': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'execution_time_limit': ('django.db.models.fields.PositiveIntegerField', [], {'default': '300'}),
'force_best_submission_to_leaderboard': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ingestion_program': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'ingestion_program_docker_image': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'ingestion_program_organizer_dataset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ingestion_program_organizer_dataset'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['web.OrganizerDataSet']"}),
'input_data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'input_data_organizer_dataset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'input_data_organizer_dataset'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['web.OrganizerDataSet']"}),
'is_migrated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_scoring_only': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'leaderboard_management_mode': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '50'}),
'max_submissions': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'}),
'max_submissions_per_day': ('django.db.models.fields.PositiveIntegerField', [], {'default': '999'}),
'phase_never_ends': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'phasenumber': ('django.db.models.fields.PositiveIntegerField', [], {}),
'public_data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public_data_organizer_dataset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'public_data_organizer_dataset'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['web.OrganizerDataSet']"}),
'reference_data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'reference_data_organizer_dataset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reference_data_organizer_dataset'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['web.OrganizerDataSet']"}),
'scoring_program': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'scoring_program_docker_image': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'scoring_program_organizer_dataset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'scoring_program_organizer_dataset'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['web.OrganizerDataSet']"}),
'start_date': ('django.db.models.fields.DateTimeField', [], {}),
'starting_kit': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'starting_kit_organizer_dataset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'starting_kit_organizer_dataset'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['web.OrganizerDataSet']"})
},
u'web.competitionsubmission': {
'Meta': {'unique_together': "(('submission_number', 'phase', 'participant'),)", 'object_name': 'CompetitionSubmission'},
'bibtex': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'chahub_data_hash': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'chahub_needs_retry': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'chahub_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'completed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'coopetition_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'detailed_results_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'dislike_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'docker_image': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'download_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'exception_details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'execution_key': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'file_url_base': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'history_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ingestion_program_stderr_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'ingestion_program_stdout_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'inputfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'is_migrated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'like_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'method_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'method_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'organization_or_affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'output_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'participant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'to': u"orm['web.CompetitionParticipant']"}),
'phase': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'submissions'", 'to': u"orm['web.CompetitionPhase']"}),
'prediction_output_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'prediction_runfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'prediction_stderr_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'prediction_stdout_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'private_output_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'publication_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'queue_name': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'readable_filename': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'runfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
's3_file': ('s3direct.fields.S3DirectField', [], {'null': 'True', 'blank': 'True'}),
'scores_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.CompetitionSubmissionStatus']"}),
'status_details': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'stderr_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'stdout_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'submission_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'team'", 'null': 'True', 'to': u"orm['teams.Team']"}),
'team_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'when_made_public': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'when_unmade_public': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'web.competitionsubmissionmetadata': {
'Meta': {'object_name': 'CompetitionSubmissionMetadata'},
'beginning_cpu_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'beginning_swap_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'beginning_virtual_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_cpu_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_swap_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_virtual_memory_usage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ingestion_program_duration': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'is_predict': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_scoring': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'processes_running_in_temp_dir': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'submission': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'metadatas'", 'to': u"orm['web.CompetitionSubmission']"})
},
u'web.competitionsubmissionstatus': {
'Meta': {'object_name': 'CompetitionSubmissionStatus'},
'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'web.contentcategory': {
'Meta': {'object_name': 'ContentCategory'},
'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'content_limit': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_menu': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['web.ContentCategory']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'visibility': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ContentVisibility']"})
},
u'web.contentvisibility': {
'Meta': {'object_name': 'ContentVisibility'},
'classname': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'web.dataset': {
'Meta': {'ordering': "['number']", 'object_name': 'Dataset'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datasets'", 'to': u"orm['authenz.ClUser']"}),
'datafile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ExternalFile']"}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'web.defaultcontentitem': {
'Meta': {'object_name': 'DefaultContentItem'},
'category': ('mptt.fields.TreeForeignKey', [], {'to': u"orm['web.ContentCategory']"}),
'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_visibility': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ContentVisibility']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'rank': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'web.externalfile': {
'Meta': {'object_name': 'ExternalFile'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'source_address_info': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.ExternalFileType']"})
},
u'web.externalfilesource': {
'Meta': {'object_name': 'ExternalFileSource'},
'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'service_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'web.externalfiletype': {
'Meta': {'object_name': 'ExternalFileType'},
'codename': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'web.organizerdataset': {
'Meta': {'object_name': 'OrganizerDataSet'},
'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.TextField', [], {'default': "''"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sub_data_files': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['web.OrganizerDataSet']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'None'", 'max_length': '64'}),
'uploaded_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authenz.ClUser']"})
},
u'web.page': {
'Meta': {'ordering': "['category', 'rank']", 'unique_together': "(('label', 'category', 'container'),)", 'object_name': 'Page'},
'category': ('mptt.fields.TreeForeignKey', [], {'to': u"orm['web.ContentCategory']"}),
'codename': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'null': 'True', 'to': u"orm['web.Competition']"}),
'container': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'to': u"orm['web.PageContainer']"}),
'defaults': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.DefaultContentItem']", 'null': 'True', 'blank': 'True'}),
'html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'markup': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rank': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'web.pagecontainer': {
'Meta': {'unique_together': "(('object_id', 'content_type'),)", 'object_name': 'PageContainer'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'web.participantstatus': {
'Meta': {'object_name': 'ParticipantStatus'},
'codename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'web.phaseleaderboard': {
'Meta': {'object_name': 'PhaseLeaderBoard'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phase': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'board'", 'unique': 'True', 'to': u"orm['web.CompetitionPhase']"})
},
u'web.phaseleaderboardentry': {
'Meta': {'unique_together': "(('board', 'result'),)", 'object_name': 'PhaseLeaderBoardEntry'},
'board': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entries'", 'to': u"orm['web.PhaseLeaderBoard']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'leaderboard_entry_result'", 'to': u"orm['web.CompetitionSubmission']"})
},
u'web.submissioncomputedscore': {
'Meta': {'object_name': 'SubmissionComputedScore'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'operation': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'scoredef': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'computed_score'", 'unique': 'True', 'to': u"orm['web.SubmissionScoreDef']"})
},
u'web.submissioncomputedscorefield': {
'Meta': {'object_name': 'SubmissionComputedScoreField'},
'computed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fields'", 'to': u"orm['web.SubmissionComputedScore']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']"})
},
u'web.submissionresultgroup': {
'Meta': {'ordering': "['ordering']", 'object_name': 'SubmissionResultGroup'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'phases': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['web.CompetitionPhase']", 'through': u"orm['web.SubmissionResultGroupPhase']", 'symmetrical': 'False'})
},
u'web.submissionresultgroupphase': {
'Meta': {'unique_together': "(('group', 'phase'),)", 'object_name': 'SubmissionResultGroupPhase'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionResultGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phase': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.CompetitionPhase']"})
},
u'web.submissionscore': {
'Meta': {'unique_together': "(('result', 'scoredef'),)", 'object_name': 'SubmissionScore'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'scores'", 'to': u"orm['web.CompetitionSubmission']"}),
'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']"}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '10'})
},
u'web.submissionscoredef': {
'Meta': {'unique_together': "(('key', 'competition'),)", 'object_name': 'SubmissionScoreDef'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}),
'computed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['web.SubmissionResultGroup']", 'through': u"orm['web.SubmissionScoreDefGroup']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'numeric_format': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'selection_default': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_rank': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sorting': ('django.db.models.fields.SlugField', [], {'default': "'asc'", 'max_length': '20'})
},
u'web.submissionscoredefgroup': {
'Meta': {'unique_together': "(('scoredef', 'group'),)", 'object_name': 'SubmissionScoreDefGroup'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionResultGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']"})
},
u'web.submissionscoreset': {
'Meta': {'unique_together': "(('key', 'competition'),)", 'object_name': 'SubmissionScoreSet'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.Competition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'ordering': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['web.SubmissionScoreSet']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'scoredef': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['web.SubmissionScoreDef']", 'null': 'True', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['web'] | 94.325866 | 264 | 0.578119 | [
"Apache-2.0"
] | AIMultimediaLab/AI4Media-EaaS-prototype-Py2-public | codalab/apps/web/migrations/0082_auto__chg_field_competition_url_redirect.py | 46,314 | Python |
# Python program for implementation of Quicksort Sort
# This function takes last element as pivot, places
# the pivot element at its correct position in sorted
# array, and places all smaller (smaller than pivot)
# to left of pivot and all greater elements to right
# of pivot
def partition(arr, low, high):
i = (low - 1) # index of smaller element
pivot = arr[high] # pivot
for j in range(low, high):
# If current element is smaller than or
# equal to pivot
if arr[j] <= pivot:
# increment index of smaller element
i = i + 1
arr[i], arr[j] = arr[j], arr[i]
arr[i + 1], arr[high] = arr[high], arr[i + 1]
return (i + 1)
# The main function that implements QuickSort
# arr[] --> Array to be sorted,
# low --> Starting index,
# high --> Ending index
# Function to do Quick sort
def quickSort(arr, low, high):
if len(arr) == 1:
return arr
if low < high:
# pi is partitioning index, arr[p] is now
# at right place
pi = partition(arr, low, high)
# Separately sort elements before
# partition and after partition
quickSort(arr, low, pi - 1)
quickSort(arr, pi + 1, high)
if __name__ == "__main__":
# Driver code to test above
arr = [10, 7, 8, 9, 1, 5]
n = len(arr)
quickSort(arr, 0, n - 1)
print("Sorted array is:")
for i in range(n):
print("%d" % arr[i]) | 25.875 | 53 | 0.591442 | [
"Apache-2.0"
] | goldy1992/algorithms | quicksort/quicksort.py | 1,449 | Python |
'''
Inter-coder agreement statistic Fleiss' Pi.
.. moduleauthor:: Chris Fournier <chris.m.fournier@gmail.com>
'''
from __future__ import absolute_import, division
from decimal import Decimal
from segeval.agreement import __fnc_metric__, __actual_agreement_linear__
def __fleiss_pi_linear__(dataset, **kwargs):
'''
Calculates Fleiss' :math:`\pi` (or multi-:math:`\pi`), originally proposed in
[Fleiss1971]_, and is equivalent to Siegel and Castellan's :math:`K`
[SiegelCastellan1988]_. For 2 coders, this is equivalent to Scott's :math:`\pi`
[Scott1955]_.
'''
metric_kwargs = dict(kwargs)
metric_kwargs['return_parts'] = True
# Arguments
return_parts = kwargs['return_parts']
# Check that there are an equal number of items for each coder
if len(set([len(coder_segs.values()) for coder_segs in dataset.values()])) != 1:
raise Exception('Unequal number of items contained.')
# Initialize totals
all_numerators, all_denominators, _, coders_boundaries = \
__actual_agreement_linear__(dataset, **metric_kwargs)
# Calculate Aa
A_a = Decimal(sum(all_numerators)) / sum(all_denominators)
# Calculate Ae
p_e_segs = list()
for boundaries_info in coders_boundaries.values():
for item in boundaries_info:
boundaries, total_boundaries = item
p_e_seg = Decimal(boundaries) / total_boundaries
p_e_segs.append(p_e_seg)
# Calculate P_e_seg
P_e_seg = Decimal(sum(p_e_segs)) / len(p_e_segs)
A_e = (P_e_seg ** 2)
# Calculate pi
pi = (A_a - A_e) / (Decimal('1') - A_e)
# Return
if return_parts:
return A_a, A_e
else:
return pi
def fleiss_pi_linear(dataset, **kwargs):
'''
Calculates Fleiss' :math:`\pi` (or multi-:math:`\pi`), originally proposed in
[Fleiss1971]_, and is equivalent to Siegel and Castellan's :math:`K`
[SiegelCastellan1988]_. For 2 coders, this is equivalent to Scott's :math:`\pi`
[Scott1955]_.
'''
return __fnc_metric__(__fleiss_pi_linear__, dataset, **kwargs)
| 36.45614 | 84 | 0.681906 | [
"BSD-3-Clause"
] | cfournie/segmentation.evaluation | segeval/agreement/pi.py | 2,078 | Python |
class Solution:
def knightProbability(self, N: int, K: int, r: int, c: int) -> float:
memo = {}
def dfs(i, j, p, k):
if 0 <= i < N and 0 <= j < N and k < K:
sm = 0
for x, y in ((-1, -2), (-2, -1), (-2, 1), (-1, 2), (1, 2), (2, 1), (2, -1), (1, -2)):
if (i + x, j + y, k) not in memo:
memo[(i + x, j + y, k)] = dfs(i + x, j + y, p / 8, k + 1)
sm += memo[(i + x, j + y, k)]
return sm
else:
return 0 <= i < N and 0 <= j < N and p or 0
return dfs(r, c, 1, 0)
| 42.4 | 101 | 0.327044 | [
"MIT"
] | nilax97/leetcode-solutions | solutions/Knight Probability in Chessboard/solution.py | 636 | Python |
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import models
import torch.nn.functional as F
import math
import torch.utils.model_zoo as model_zoo
nonlinearity = nn.ReLU
class EncoderBlock(nn.Module):
def __init__(self, inchannel, outchannel, stride):
super().__init__()
self.c1=nn.Conv2d(inchannel, outchannel, 3, stride, 1, bias=False)
self.bn1=nn.BatchNorm2d(outchannel)
self.re1=nn.ReLU(inplace=True)
self.c2=nn.Conv2d(outchannel, outchannel, 3, 1, 1, bias=False)
self.bn2=nn.BatchNorm2d(outchannel)
self.re2=nn.ReLU(inplace=True)
def forward(self, x):
x = self.c1(x)
x = self.bn1(x)
x = self.re1(x)
x = self.c2(x)
x = self.bn2(x)
x = self.re2(x)
return x
class EncoderBlock0(nn.Module):
def __init__(self, in_channels, n_filters):
super().__init__()
# B, C, H, W -> B, C/4, H, W
self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1)
self.norm1 = nn.BatchNorm2d(in_channels // 4)
self.relu1 = nonlinearity(inplace=True)
# B, C/4, H, W -> B, C/4, H, W
self.pool = nn.MaxPool2d(2, 2)#
self.norm2 = nn.BatchNorm2d(in_channels // 4)
self.relu2 = nonlinearity(inplace=True)
# B, C/4, H, W -> B, C, H, W
self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1)
self.norm3 = nn.BatchNorm2d(n_filters)
self.relu3 = nonlinearity(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.pool(x)
x = self.norm2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.norm3(x)
x = self.relu3(x)
return x
class DecoderBlock(nn.Module):
def __init__(self, in_channels, n_filters):
super().__init__()
# B, C, H, W -> B, C/4, H, W
self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1)
self.norm1 = nn.BatchNorm2d(in_channels // 4)
self.relu1 = nonlinearity(inplace=True)
# B, C/4, H, W -> B, C/4, H, W
self.deconv2 = nn.ConvTranspose2d(in_channels // 4, in_channels // 4, 3,
stride=2, padding=1, output_padding=1)
self.norm2 = nn.BatchNorm2d(in_channels // 4)
self.relu2 = nonlinearity(inplace=True)
# B, C/4, H, W -> B, C, H, W
self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1)
self.norm3 = nn.BatchNorm2d(n_filters)
self.relu3 = nonlinearity(inplace=True)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.deconv2(x)
x = self.norm2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.norm3(x)
x = self.relu3(x)
return x
class ChannelSE(nn.Module):
def __init__(self,inchannel):
super().__init__()
self.lin1=torch.nn.Linear(inchannel, inchannel//2)
self.lin2=torch.nn.Linear(inchannel//2, inchannel)
self.c=inchannel
def forward(self,x):
#_,c,h,w=x.size
#print(c)
#print(h)
#print(w)
m=torch.mean(torch.mean(x,dim=2,keepdim=True),dim=3,keepdim=True)
m = m.view(m.size(0), -1)
m=self.lin1(m)
m=nn.ReLU()(m)
m=self.lin2(m)
m=nn.Sigmoid()(m)
m = m.view(m.size(0), self.c,1,1)
x=m*x#torch.matmul(m,x)
return x
class SpatialSE(nn.Module):
def __init__(self,inchannel):
super().__init__()
self.conv=torch.nn.Conv2d(inchannel,1,kernel_size=1,stride=1)
def forward(self,x):
#_,c,h,w=x.size
#print(c)
#print(h)
#print(w)
m = self.conv(x)
m=nn.Sigmoid()(m)
x=m*x#torch.matmul(m,x)
return x
class DecoderBlockv(nn.Module):
def __init__(self, in_channels, n_filters):
super().__init__()
# B, C, H, W -> B, C/4, H, W
self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1)
self.norm1 = nn.BatchNorm2d(in_channels // 4)
self.relu1 = nonlinearity(inplace=True)
# B, C/4, H, W -> B, C/4, H, W
self.deconv2 = nn.ConvTranspose2d(in_channels // 4, in_channels // 4, 3,
stride=2, padding=1, output_padding=1)
self.norm2 = nn.BatchNorm2d(in_channels // 4)
self.relu2 = nonlinearity(inplace=True)
# B, C/4, H, W -> B, C, H, W
self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1)
self.norm3 = nn.BatchNorm2d(n_filters)
self.relu3 = nonlinearity(inplace=True)
self.cSE = ChannelSE(n_filters)
self.sSE = SpatialSE(n_filters)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.deconv2(x)
x = self.norm2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.norm3(x)
x = self.relu3(x)
x = self.cSE(x) + self.sSE(x)
return x
class ConvUp(nn.Module):
def __init__(self, in_channels, n_filters):
super().__init__()
self.upsample = nn.Upsample(scale_factor=2,mode='bilinear')
self.conv1 = nn.Conv2d(in_channels, n_filters, 3, padding = 1)
self.norm1 = nn.BatchNorm2d(n_filters)
self.relu1 = nonlinearity(inplace=True)
def forward(self, x):
x = self.upsample(x)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
return x
class ConscSE(nn.Module):
def __init__(self, n_filters):
super().__init__()
self.cSE = ChannelSE(n_filters)
self.sSE = SpatialSE(n_filters)
def forward(self, x):
x = self.cSE(x) + self.sSE(x)
return x
class DecoderBlockup(nn.Module):
def __init__(self, in_channels, n_filters):
super().__init__()
# B, C, H, W -> B, C/4, H, W
self.conv1 = nn.Conv2d(in_channels, in_channels // 4, 1)
self.norm1 = nn.BatchNorm2d(in_channels // 4)
self.relu1 = nonlinearity(inplace=True)
# B, C/4, H, W -> B, C/4, H, W
self.deconv2 = ConvUp(in_channels // 4, in_channels // 4)
self.norm2 = nn.BatchNorm2d(in_channels // 4)
self.relu2 = nonlinearity(inplace=True)
# B, C/4, H, W -> B, C, H, W
self.conv3 = nn.Conv2d(in_channels // 4, n_filters, 1)
self.norm3 = nn.BatchNorm2d(n_filters)
self.relu3 = nonlinearity(inplace=True)
self.cSE = ChannelSE(n_filters)
self.sSE = SpatialSE(n_filters)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.deconv2(x)
x = self.norm2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.norm3(x)
x = self.relu3(x)
x = self.cSE(x) + self.sSE(x)
return x
class DecoderBlock23(nn.Module):
def __init__(self, in_channels, n_filters, scal=4):
super().__init__()
self.up = nn.Upsample(scale_factor=2,mode='bilinear')
# B, C, H, W -> B, C/4, H, W
self.conv1 = nn.Conv2d(in_channels, in_channels // scal, 1)
self.norm1 = nn.BatchNorm2d(in_channels // scal)
self.relu1 = nonlinearity(inplace=True)
# B, C/4, H, W -> B, C, H, W
self.conv2 = nn.Conv2d(in_channels // scal, n_filters, 1)
self.norm2 = nn.BatchNorm2d(n_filters)
self.relu2 = nonlinearity(inplace=True)
self.cSE = ChannelSE(n_filters)
self.sSE = SpatialSE(n_filters)
def forward(self, x):
x = self.up(x)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.norm2(x)
x = self.relu2(x)
#x = self.cSE(x) + self.sSE(x)
return x
class Upscale:
transposed_conv = 0
upsample_bilinear = 1
pixel_shuffle = 2
class BasicDecoderBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, conv_size=3, upscale=Upscale.transposed_conv):
super().__init__()
padding = 0
if conv_size == 3:
padding = 1
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels, middle_channels, conv_size, padding=padding),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True)
)
last_conv_channels = middle_channels
if upscale == Upscale.transposed_conv:
self.layer2 = nn.Sequential(
nn.ConvTranspose2d(middle_channels, middle_channels, 3, stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(middle_channels),
nn.ReLU(inplace=True)
)
elif upscale == Upscale.upsample_bilinear:
self.layer2 = nn.Upsample(scale_factor=2)
else:
self.layer2 = nn.PixelShuffle(upscale_factor=2)
last_conv_channels = middle_channels // 4
self.layer3 = nn.Sequential(
nn.Conv2d(last_conv_channels, out_channels, conv_size, padding=padding),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
return x
class UnetBNDecoderBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels, upscale=Upscale.upsample_bilinear):
super().__init__()
self.layer = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.layer(x)
class LinkNet34a(nn.Module):
def __init__(self, num_classes, num_channels=3):
super().__init__()
assert num_channels == 3, "num channels not used now. to use changle first conv layer to support num channels other then 3"
filters = [64, 128, 256, 512]
resnet = models.resnet34(pretrained=True)
self.firstconv = resnet.conv1
self.firstbn = resnet.bn1
self.firstrelu = resnet.relu
self.firstmaxpool = resnet.maxpool
self.encoder1 = resnet.layer1
self.encoder2 = resnet.layer2
self.encoder3 = resnet.layer3
self.encoder4 = resnet.layer4
# Center
self.center = nn.Sequential(
nn.MaxPool2d(2, 2),
nn.Conv2d(filters[3], filters[1], 3, padding=1),
nn.BatchNorm2d(filters[1]),
nn.ReLU(inplace=True)
)
# Decoder
self.decoder5 = UnetBNDecoderBlock(filters[1],filters[2]//4, filters[2])#
self.conv5=nn.Conv2d(256+512,256,1)
self.decoder4 = UnetBNDecoderBlock(filters[2],filters[2]//4, filters[1])#DecoderBlock(filters[3], filters[2])
self.conv4=nn.Conv2d(128+256,256,1)
self.decoder3 = UnetBNDecoderBlock(filters[2],filters[2]//4, filters[0])#DecoderBlock(filters[2], filters[1])
self.conv3=nn.Conv2d(64+128,128,1)
self.decoder2 = UnetBNDecoderBlock(filters[1],filters[1]//4, filters[0])#DecoderBlock(filters[1], filters[0])
self.conv2=nn.Conv2d(128,64,1)
#self.decoder1 = UnetBNDecoderBlock(filters[0],filters[0]//4, filters[0])#DecoderBlock(filters[0], filters[0])
# Final Classifier
self.finaldeconv1 = UnetBNDecoderBlock(filters[0],filters[0]//4, filters[0])#ConvUp(filters[0], filters[0])
# Final Classifier
self.logit = nn.Sequential(
nn.Conv2d(64, 64, 3, padding=1),
nn.BatchNorm2d(64),
nonlinearity(inplace=True),
nn.Conv2d(64, 1, 1),
)
# noinspection PyCallingNonCallable
def forward(self, x):
# Encoder
x = x.float()
x = self.firstconv(x)
x = self.firstbn(x)
x = self.firstrelu(x)
#x = self.firstmaxpool(x)
e1 = self.encoder1(x)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
############################
e5 = self.center(e4)
d5 = torch.cat([self.decoder5(e5) , e4], 1)#concat([self.decoder5(e5) , e4])
d5 = self.conv5(d5)
#########################
d4 = torch.cat([self.decoder4(d5) , e3], 1)#concat([self.decoder5(e5) , e4])
d4 = self.conv4(d4)
# d4 = e3
#d3 = self.decoder3(d4) + e2
#print(e2.shape)
d3 = torch.cat([self.decoder3(d4) , e2], 1)#concat([self.decoder5(e5) , e4])
#print(d3.shape)
d3 = self.conv3(d3)
#d2 = self.decoder2(d3) + e1
d2 = torch.cat([self.decoder2(d3) , e1], 1)#concat([self.decoder5(e5) , e4])
d2 = self.conv2(d2)
#d1 = self.decoder1(d2)
# Final Classification
f = self.finaldeconv1(d2)
#f = self.finalrelu1(f)
f = self.logit(f)
return f
class DecoderBlockH(nn.Module):
def __init__(self, in_channels,channels, n_filters):
super().__init__()
self.up = nn.Upsample(scale_factor=2,mode='bilinear')
# B, C, H, W -> B, C/4, H, W
self.conv1 = nn.Conv2d(in_channels, channels, 3, padding=1)
self.norm1 = nn.BatchNorm2d(channels)
self.relu1 = nonlinearity(inplace=True)
# B, C/4, H, W -> B, C, H, W
self.conv2 = nn.Conv2d(channels, n_filters, 3, padding=1)
self.norm2 = nn.BatchNorm2d(n_filters)
self.relu2 = nonlinearity(inplace=True)
#self.cSE = ChannelSE(n_filters)
#self.sSE = SpatialSE(n_filters)
def forward(self, x, e=None):
x = self.up(x)
if e is not None:
x = torch.cat([x, e], 1)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.norm2(x)
x = self.relu2(x)
#x = self.cSE(x) + self.sSE(x)
return x
class ConvBn2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1):
super().__init__()
self.layer = nn.Sequential(
#nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(in_channels, out_channels, kernel_size, padding),
nn.BatchNorm2d(out_channels),
#nn.ReLU(inplace=True)
)
def forward(self, x):
return self.layer(x)
class Decoder3(nn.Module):
def __init__(self, in_channels,res_channels, channels, n_filters):
super().__init__()
self.up = nn.Upsample(scale_factor=2, mode='bilinear')
# B, C, H, W -> B, C/4, H, W
self.conv1 = nn.Conv2d(in_channels+res_channels, channels, 3, padding=1)
self.norm1 = nn.BatchNorm2d(channels)
self.relu1 = nonlinearity(inplace=True)
# B, C/4, H, W -> B, C, H, W
self.conv2 = nn.Conv2d(channels, n_filters, 3, padding=1)
self.norm2 = nn.BatchNorm2d(n_filters)
self.relu2 = nonlinearity(inplace=True)
self.SCSE = SCSEBlock(n_filters)#ChannelSE(n_filters)
#self.sSE = SpatialSE(n_filters)
def forward(self, x, e=None):
x = self.up(x)
if e is not None:
x = torch.cat([x, e], 1)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.norm2(x)
x = self.relu2(x)
x = self.SCSE(x)# + self.sSE(x)
return x
class DenseNet34(nn.Module):
def __init__(self ):
super().__init__()
#super(Net,self).__init__()
filters = [64, 128, 256, 512]
self.resnet = models.resnet34(pretrained=True)#ResNet(BasicBlock, [3, 4, 6, 3], num_classes=1 )
self.encoder1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
)
self.encoder2 = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2),
self.resnet.layer1,
)
self.encoder3 = self.resnet.layer2
self.encoder4 = self.resnet.layer3
self.encoder5 = self.resnet.layer4
self.center = nn.Sequential(
ConvBn2d( 512, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
ConvBn2d( 256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
)
######################################################################
#self.decoder5 = Decoder3(256, 512, 512, 64)
#self.decoder4 = Decoder3( 64, 256, 256, 64)
#self.decoder3 = Decoder3( 64, 128, 128, 64)
#self.decoder2 = Decoder3( 64, 64, 64, 64)
#self.decoder1 = Decoder3( 64, 64, 32, 64)
self.decoder5 = DecoderBlockH(filters[3]+filters[2],filters[2], 64)
#self.conv5=nn.Conv2d(64+512,64,1)#before or after SE?
self.se5=SCSEBlock(64)
self.decoder4 = DecoderBlockH(filters[2]+64, filters[1], 64)
#self.conv4=nn.Conv2d(64+256,64,1)
self.se4=SCSEBlock(64)
self.decoder3 = DecoderBlockH(filters[1]+64, filters[1], 64)
#self.conv3=nn.Conv2d(64+128,64,1)
self.se3=SCSEBlock(64)
self.decoder2 = DecoderBlockH(filters[0]+64, filters[0], 64)
#self.conv2=nn.Conv2d(64+64,64,1)
self.se2=SCSEBlock(64)
self.decoder1 = DecoderBlockH(filters[0], filters[0]//2, 64)
self.se1=SCSEBlock(64)
##############################################################################
self.fuse_pixel = nn.Sequential(
nn.Conv2d(320, 64, kernel_size=3, padding=1),
)
self.logit_pixel = nn.Sequential(
#nn.Conv2d(320, 64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d( 64, 1, kernel_size=1, padding=0),
)
self.logit_image = nn.Sequential(
#nn.Linear(512, 128),
nn.ReLU(inplace=True),
#nn.Linear(128, 1),
nn.Linear(64, 1),
)
self.fuse_image = nn.Sequential(
nn.Linear(512, 64),
#nn.ReLU(inplace=True),
#nn.Linear(128, 1),
)
self.fuse = nn.Sequential(
#nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
#nn.Conv2d(128, 64, kernel_size=1, padding=0),
#nn.BatchNorm2d(64),
#nn.ReLU(inplace=True),
)
self.logit = nn.Sequential(
nn.Conv2d(128, 1, kernel_size=1, padding=0),
#nn.ReLU(inplace=True),
#nn.Conv2d( 64, 1, kernel_size=1, padding=0),
)
def forward(self, x):
batch_size,C,H,W = x.shape
"""
mean=[0.485, 0.456, 0.406]
std =[0.229, 0.224, 0.225]
x = torch.cat([
(x-mean[2])/std[2],
(x-mean[1])/std[1],
(x-mean[0])/std[0],
],1)
"""
x = x.float()
e1 = self.encoder1(x ) #; print('e1',e1.size())
e2 = self.encoder2(e1) #; print('e2',e2.size())
e3 = self.encoder3(e2) #; print('e3',e3.size())
e4 = self.encoder4(e3) #; print('e4',e4.size())
e5 = self.encoder5(e4) #; print('e5',e5.size())
f = self.center(e5) #; print('f',f.size())
#print(f.shape)
#print(e5.shape)
#e1 = self.encoder1(x)#
#e2 = self.encoder2(e1)#
#e3 = self.encoder3(e2)#
#e4 = self.encoder4(e3)#
#e5 = self.center(e4)#512
####################################################################################
#d5 = self.decoder5( f,e5) #; print('d5',f.size())
#d4 = self.decoder4(d5,e4) #; print('d4',f.size())
#d3 = self.decoder3(d4,e3) #; print('d3',f.size())
#d2 = self.decoder2(d3,e2) #; print('d2',f.size())
#d1 = self.decoder1(d2,e1) #; print('d1',f.size())
d5 = self.decoder5(f,e5)
d5 = self.se5(d5)
# Decoder with Skip Connections
#d4 = self.decoder4(d5) + e3
#d4 = torch.cat([self.decoder4(d5) , e3], 1)#concat([self.decoder5(e5) , e4])
#print(d5.shape)
#print(e3.shape)
d4 = self.decoder4(d5,e4)
d4 = self.se4(d4)
# d4 = e3
#d3 = self.decoder3(d4) + e2
#print(e2.shape)
#d3 = torch.cat([self.decoder3(d4) , e2], 1)#concat([self.decoder5(e5) , e4])
#print(d3.shape)
d3 = self.decoder3(d4,e3)
d3 = self.se3(d3)
#d2 = self.decoder2(d3) + e1
#d2 = torch.cat([self.decoder2(d3) , e1], 1)#concat([self.decoder5(e5) , e4])
d2 = self.decoder2(d3,e2)
d2 = self.se2(d2)
d1 = self.decoder1(d2)
d1 = self.se1(d1)
########################################################################################
d = torch.cat((
d1,
F.upsample(d2,scale_factor= 2, mode='bilinear',align_corners=False),
F.upsample(d3,scale_factor= 4, mode='bilinear',align_corners=False),
F.upsample(d4,scale_factor= 8, mode='bilinear',align_corners=False),
F.upsample(d5,scale_factor=16, mode='bilinear',align_corners=False),
),1)
#######################################################################
"""
d = F.dropout(d, p=0.50, training=self.training)
logit_pixel = self.logit_pixel(d)
f = F.adaptive_avg_pool2d(e5, output_size=1).view(batch_size,-1)
f = F.dropout(f, p=0.50, training=self.training)
logit_image = self.logit_image(f).view(-1)
"""
###########################################################################
#d = torch.cat([d1,d2,d3,d4,d5],1) #hyper-columns
d = F.dropout(d, p=0.50, training=self.training)
fuse_pixel = self.fuse_pixel(d)#64-128-128
logit_pixel = self.logit_pixel(fuse_pixel)#1-128-128
e = F.adaptive_avg_pool2d(e5, output_size=1).view(batch_size,-1) #image pool#-512-1-1
e = F.dropout(e, p=0.50, training=self.training)#
fuse_image = self.fuse_image(e)#-64-1-1
logit_image = self.logit_image(fuse_image).view(-1)#-1-1-1
#fuse = self.fuse(torch.mul(fuse_pixel, F.upsample(fuse_image.view(batch_size,-1,1,1,),scale_factor=128, mode='nearest')))
#fuse = self.fuse(fuse_pixel+ F.upsample(fuse_image.view(batch_size,-1,1,1,),scale_factor=128, mode='nearest'))
fuse = self.fuse(torch.cat([ #fuse
fuse_pixel,
F.upsample(fuse_image.view(batch_size,-1,1,1,),scale_factor=128, mode='nearest')
],1))
logit = self.logit(fuse)#1-128-128
return logit, logit_pixel, logit_image
#return logit_pixel, logit_image
##-----------------------------------------------------------------
#def criterion(self, logit_pixel, logit_image, truth_pixel, truth_image, is_average=True):
"""
d3 = F.upsample(d3,scale_factor= 4, mode='bilinear',align_corners=False)
d4 = F.upsample(d4,scale_factor= 8, mode='bilinear',align_corners=False)
d5 = F.upsample(d5,scale_factor=16, mode='bilinear',align_corners=False)
d = torch.cat([d1,d2,d3,d4,d5],1) #hyper-columns
d = F.dropout(d, p=0.50, training=self.training)
fuse_pixel = self.fuse_pixel(d)
logit_pixel = self.logit_pixel(fuse_pixel)
e = F.adaptive_avg_pool2d(e5, output_size=1).view(batch_size,-1) #image pool
e = F.dropout(e, p=0.50, training=self.training)
fuse_image = self.fuse_image(e)
logit_image = self.logit_image(fuse_image).view(-1)
fuse = self.fuse(torch.cat([ #fuse
fuse_pixel,
F.upsample(fuse_image.view(batch_size,-1,1,1,),scale_factor=128, mode='nearest')
],1))
logit = self.logit(fuse)
return logit, logit_pixel, logit_image
"""
| 35.429619 | 131 | 0.543931 | [
"MIT"
] | ZWZseven/Kaggle_TGS2018_solution | model/model.py | 24,163 | Python |
# Generated by Django 2.2 on 2021-12-05 14:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_activate', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.235294 | 266 | 0.639344 | [
"MIT"
] | harshavardhan-bhumi/profiles-rest-api | profiles_api/migrations/0001_initial.py | 1,708 | Python |
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
'''
MapReduce Job Metrics
---------------------
mapreduce.job.elapsed_ime The elapsed time since the application started (in ms)
mapreduce.job.maps_total The total number of maps
mapreduce.job.maps_completed The number of completed maps
mapreduce.job.reduces_total The total number of reduces
mapreduce.job.reduces_completed The number of completed reduces
mapreduce.job.maps_pending The number of maps still to be run
mapreduce.job.maps_running The number of running maps
mapreduce.job.reduces_pending The number of reduces still to be run
mapreduce.job.reduces_running The number of running reduces
mapreduce.job.new_reduce_attempts The number of new reduce attempts
mapreduce.job.running_reduce_attempts The number of running reduce attempts
mapreduce.job.failed_reduce_attempts The number of failed reduce attempts
mapreduce.job.killed_reduce_attempts The number of killed reduce attempts
mapreduce.job.successful_reduce_attempts The number of successful reduce attempts
mapreduce.job.new_map_attempts The number of new map attempts
mapreduce.job.running_map_attempts The number of running map attempts
mapreduce.job.failed_map_attempts The number of failed map attempts
mapreduce.job.killed_map_attempts The number of killed map attempts
mapreduce.job.successful_map_attempts The number of successful map attempts
MapReduce Job Counter Metrics
-----------------------------
mapreduce.job.counter.reduce_counter_value The counter value of reduce tasks
mapreduce.job.counter.map_counter_value The counter value of map tasks
mapreduce.job.counter.total_counter_value The counter value of all tasks
MapReduce Map Task Metrics
--------------------------
mapreduce.job.map.task.progress The distribution of all map task progresses
MapReduce Reduce Task Metrics
--------------------------
mapreduce.job.reduce.task.progress The distribution of all reduce task progresses
'''
# stdlib
from urlparse import urljoin
from urlparse import urlsplit
from urlparse import urlunsplit
# 3rd party
import requests
from requests.exceptions import Timeout, HTTPError, InvalidURL, ConnectionError
from simplejson import JSONDecodeError
# Project
from checks import AgentCheck
from config import _is_affirmative
# Default Settings
DEFAULT_CUSTER_NAME = 'default_cluster'
# Service Check Names
YARN_SERVICE_CHECK = 'mapreduce.resource_manager.can_connect'
MAPREDUCE_SERVICE_CHECK = 'mapreduce.application_master.can_connect'
# URL Paths
YARN_APPS_PATH = 'ws/v1/cluster/apps'
MAPREDUCE_JOBS_PATH = 'ws/v1/mapreduce/jobs'
# Application type and states to collect
YARN_APPLICATION_TYPES = 'MAPREDUCE'
YARN_APPLICATION_STATES = 'RUNNING'
# Metric types
HISTOGRAM = 'histogram'
INCREMENT = 'increment'
# Metrics to collect
MAPREDUCE_JOB_METRICS = {
'elapsedTime': ('mapreduce.job.elapsed_time', HISTOGRAM),
'mapsTotal': ('mapreduce.job.maps_total', INCREMENT),
'mapsCompleted': ('mapreduce.job.maps_completed', INCREMENT),
'reducesTotal': ('mapreduce.job.reduces_total', INCREMENT),
'reducesCompleted': ('mapreduce.job.reduces_completed', INCREMENT),
'mapsPending': ('mapreduce.job.maps_pending', INCREMENT),
'mapsRunning': ('mapreduce.job.maps_running', INCREMENT),
'reducesPending': ('mapreduce.job.reduces_pending', INCREMENT),
'reducesRunning': ('mapreduce.job.reduces_running', INCREMENT),
'newReduceAttempts': ('mapreduce.job.new_reduce_attempts', INCREMENT),
'runningReduceAttempts': ('mapreduce.job.running_reduce_attempts', INCREMENT),
'failedReduceAttempts': ('mapreduce.job.failed_reduce_attempts', INCREMENT),
'killedReduceAttempts': ('mapreduce.job.killed_reduce_attempts', INCREMENT),
'successfulReduceAttempts': ('mapreduce.job.successful_reduce_attempts', INCREMENT),
'newMapAttempts': ('mapreduce.job.new_map_attempts', INCREMENT),
'runningMapAttempts': ('mapreduce.job.running_map_attempts', INCREMENT),
'failedMapAttempts': ('mapreduce.job.failed_map_attempts', INCREMENT),
'killedMapAttempts': ('mapreduce.job.killed_map_attempts', INCREMENT),
'successfulMapAttempts': ('mapreduce.job.successful_map_attempts', INCREMENT),
}
MAPREDUCE_JOB_COUNTER_METRICS = {
'reduceCounterValue': ('mapreduce.job.counter.reduce_counter_value', INCREMENT),
'mapCounterValue': ('mapreduce.job.counter.map_counter_value', INCREMENT),
'totalCounterValue': ('mapreduce.job.counter.total_counter_value', INCREMENT),
}
MAPREDUCE_MAP_TASK_METRICS = {
'elapsedTime': ('mapreduce.job.map.task.elapsed_time', HISTOGRAM)
}
MAPREDUCE_REDUCE_TASK_METRICS = {
'elapsedTime': ('mapreduce.job.reduce.task.elapsed_time', HISTOGRAM)
}
class MapReduceCheck(AgentCheck):
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# Parse job specific counters
self.general_counters = self._parse_general_counters(init_config)
# Parse job specific counters
self.job_specific_counters = self._parse_job_specific_counters(init_config)
def check(self, instance):
# Get properties from conf file
rm_address = instance.get('resourcemanager_uri')
if rm_address is None:
raise Exception('The ResourceManager URL must be specified in the instance configuration')
collect_task_metrics = _is_affirmative(instance.get('collect_task_metrics', False))
# Get additional tags from the conf file
tags = instance.get('tags', [])
if tags is None:
tags = []
else:
tags = list(set(tags))
# Get the cluster name from the conf file
cluster_name = instance.get('cluster_name')
if cluster_name is None:
self.warning("The cluster_name must be specified in the instance configuration, defaulting to '%s'" % (DEFAULT_CUSTER_NAME))
cluster_name = DEFAULT_CUSTER_NAME
tags.append('cluster_name:%s' % cluster_name)
# Get the running MR applications from YARN
running_apps = self._get_running_app_ids(rm_address)
# Report success after gathering all metrics from ResourceManaager
self.service_check(YARN_SERVICE_CHECK,
AgentCheck.OK,
tags=['url:%s' % rm_address],
message='Connection to ResourceManager "%s" was successful' % rm_address)
# Get the applications from the application master
running_jobs = self._mapreduce_job_metrics(running_apps, tags)
# # Get job counter metrics
self._mapreduce_job_counters_metrics(running_jobs, tags)
# Get task metrics
if collect_task_metrics:
self._mapreduce_task_metrics(running_jobs, tags)
# Report success after gathering all metrics from Application Master
if running_jobs:
job_id, metrics = running_jobs.items()[0]
am_address = self._get_url_base(metrics['tracking_url'])
self.service_check(MAPREDUCE_SERVICE_CHECK,
AgentCheck.OK,
tags=['url:%s' % am_address],
message='Connection to ApplicationManager "%s" was successful' % am_address)
def _parse_general_counters(self, init_config):
'''
Return a dictionary for each job counter
{
counter_group_name: [
counter_name
]
}
}
'''
job_counter = {}
if init_config.get('general_counters'):
# Parse the custom metrics
for counter_group in init_config['general_counters']:
counter_group_name = counter_group.get('counter_group_name')
counters = counter_group.get('counters')
if not counter_group_name:
raise Exception('"general_counters" must contain a valid "counter_group_name"')
if not counters:
raise Exception('"general_counters" must contain a list of "counters"')
# Add the counter_group to the job_counters if it doesn't already exist
if counter_group_name not in job_counter:
job_counter[counter_group_name] = []
for counter in counters:
counter_name = counter.get('counter_name')
if not counter_name:
raise Exception('At least one "counter_name" should be specified in the list of "counters"')
job_counter[counter_group_name].append(counter_name)
return job_counter
def _parse_job_specific_counters(self, init_config):
'''
Return a dictionary for each job counter
{
job_name: {
counter_group_name: [
counter_name
]
}
}
}
'''
job_counter = {}
if init_config.get('job_specific_counters'):
# Parse the custom metrics
for job in init_config['job_specific_counters']:
job_name = job.get('job_name')
metrics = job.get('metrics')
if not job_name:
raise Exception('Counter metrics must have a "job_name"')
if not metrics:
raise Exception('Jobs specified in counter metrics must contain at least one metric')
# Add the job to the custom metrics if it doesn't already exist
if job_name not in job_counter:
job_counter[job_name] = {}
for metric in metrics:
counter_group_name = metric.get('counter_group_name')
counters = metric.get('counters')
if not counter_group_name:
raise Exception('Each counter metric must contain a valid "counter_group_name"')
if not counters:
raise Exception('Each counter metric must contain a list of "counters"')
# Add the counter group name if it doesn't exist for the current job
if counter_group_name not in job_counter[job_name]:
job_counter[job_name][counter_group_name] = []
for counter in counters:
counter_name = counter.get('counter_name')
if not counter_name:
raise Exception('At least one "counter_name" should be specified in the list of "counters"')
job_counter[job_name][counter_group_name].append(counter_name)
return job_counter
def _get_running_app_ids(self, rm_address, **kwargs):
'''
Return a dictionary of {app_id: (app_name, tracking_url)} for the running MapReduce applications
'''
metrics_json = self._rest_request_to_json(rm_address,
YARN_APPS_PATH,
YARN_SERVICE_CHECK,
states=YARN_APPLICATION_STATES,
applicationTypes=YARN_APPLICATION_TYPES)
running_apps = {}
if metrics_json.get('apps'):
if metrics_json['apps'].get('app') is not None:
for app_json in metrics_json['apps']['app']:
app_id = app_json.get('id')
tracking_url = app_json.get('trackingUrl')
app_name = app_json.get('name')
if app_id and tracking_url and app_name:
running_apps[app_id] = (app_name, tracking_url)
return running_apps
def _mapreduce_job_metrics(self, running_apps, addl_tags):
'''
Get metrics for each MapReduce job.
Return a dictionary for each MapReduce job
{
job_id: {
'job_name': job_name,
'app_name': app_name,
'user_name': user_name,
'tracking_url': tracking_url
}
'''
running_jobs = {}
for app_id, (app_name, tracking_url) in running_apps.iteritems():
metrics_json = self._rest_request_to_json(tracking_url,
MAPREDUCE_JOBS_PATH,
MAPREDUCE_SERVICE_CHECK)
if metrics_json.get('jobs'):
if metrics_json['jobs'].get('job'):
for job_json in metrics_json['jobs']['job']:
job_id = job_json.get('id')
job_name = job_json.get('name')
user_name = job_json.get('user')
if job_id and job_name and user_name:
# Build the structure to hold the information for each job ID
running_jobs[str(job_id)] = {'job_name': str(job_name),
'app_name': str(app_name),
'user_name': str(user_name),
'tracking_url': self._join_url_dir(tracking_url, MAPREDUCE_JOBS_PATH, job_id)}
tags = ['app_name:' + str(app_name),
'user_name:' + str(user_name),
'job_name:' + str(job_name)]
tags.extend(addl_tags)
self._set_metrics_from_json(tags, job_json, MAPREDUCE_JOB_METRICS)
return running_jobs
def _mapreduce_job_counters_metrics(self, running_jobs, addl_tags):
'''
Get custom metrics specified for each counter
'''
for job_id, job_metrics in running_jobs.iteritems():
job_name = job_metrics['job_name']
# Check if the job_name exist in the custom metrics
if self.general_counters or (job_name in self.job_specific_counters):
job_specific_metrics = self.job_specific_counters.get(job_name)
metrics_json = self._rest_request_to_json(job_metrics['tracking_url'],
'counters',
MAPREDUCE_SERVICE_CHECK)
if metrics_json.get('jobCounters'):
if metrics_json['jobCounters'].get('counterGroup'):
# Cycle through all the counter groups for this job
for counter_group in metrics_json['jobCounters']['counterGroup']:
group_name = counter_group.get('counterGroupName')
if group_name:
counter_metrics = set([])
# Add any counters in the job specific metrics
if job_specific_metrics and group_name in job_specific_metrics:
counter_metrics = counter_metrics.union(job_specific_metrics[group_name])
# Add any counters in the general metrics
if group_name in self.general_counters:
counter_metrics = counter_metrics.union(self.general_counters[group_name])
if counter_metrics:
# Cycle through all the counters in this counter group
if counter_group.get('counter'):
for counter in counter_group['counter']:
counter_name = counter.get('name')
# Check if the counter name is in the custom metrics for this group name
if counter_name and counter_name in counter_metrics:
tags = ['app_name:' + job_metrics.get('app_name'),
'user_name:' + job_metrics.get('user_name'),
'job_name:' + job_name,
'counter_name:' + str(counter_name).lower()]
tags.extend(addl_tags)
self._set_metrics_from_json(tags,
counter,
MAPREDUCE_JOB_COUNTER_METRICS)
def _mapreduce_task_metrics(self, running_jobs, addl_tags):
'''
Get metrics for each MapReduce task
Return a dictionary of {task_id: 'tracking_url'} for each MapReduce task
'''
for job_id, job_stats in running_jobs.iteritems():
metrics_json = self._rest_request_to_json(job_stats['tracking_url'],
'tasks',
MAPREDUCE_SERVICE_CHECK)
if metrics_json.get('tasks'):
if metrics_json['tasks'].get('task'):
for task in metrics_json['tasks']['task']:
task_type = task.get('type')
if task_type:
tags = ['app_name:' + job_stats['app_name'],
'user_name:' + job_stats['user_name'],
'job_name:' + job_stats['job_name'],
'task_type:' + str(task_type).lower()]
tags.extend(addl_tags)
if task_type == 'MAP':
self._set_metrics_from_json(tags, task, MAPREDUCE_MAP_TASK_METRICS)
elif task_type == 'REDUCE':
self._set_metrics_from_json(tags, task, MAPREDUCE_REDUCE_TASK_METRICS)
def _set_metrics_from_json(self, tags, metrics_json, metrics):
'''
Parse the JSON response and set the metrics
'''
for status, (metric_name, metric_type) in metrics.iteritems():
metric_status = metrics_json.get(status)
if metric_status is not None:
self._set_metric(metric_name,
metric_type,
metric_status,
tags)
def _set_metric(self, metric_name, metric_type, value, tags=None, device_name=None):
'''
Set a metric
'''
if metric_type == HISTOGRAM:
self.histogram(metric_name, value, tags=tags, device_name=device_name)
elif metric_type == INCREMENT:
self.increment(metric_name, value, tags=tags, device_name=device_name)
else:
self.log.error('Metric type "%s" unknown' % (metric_type))
def _rest_request_to_json(self, address, object_path, service_name, *args, **kwargs):
'''
Query the given URL and return the JSON response
'''
response_json = None
service_check_tags = ['url:%s' % self._get_url_base(address)]
url = address
if object_path:
url = self._join_url_dir(url, object_path)
# Add args to the url
if args:
for directory in args:
url = self._join_url_dir(url, directory)
self.log.debug('Attempting to connect to "%s"' % url)
# Add kwargs as arguments
if kwargs:
query = '&'.join(['{0}={1}'.format(key, value) for key, value in kwargs.iteritems()])
url = urljoin(url, '?' + query)
try:
response = requests.get(url, timeout=self.default_integration_http_timeout)
response.raise_for_status()
response_json = response.json()
except Timeout as e:
self.service_check(service_name,
AgentCheck.CRITICAL,
tags=service_check_tags,
message="Request timeout: {0}, {1}".format(url, e))
raise
except (HTTPError,
InvalidURL,
ConnectionError) as e:
self.service_check(service_name,
AgentCheck.CRITICAL,
tags=service_check_tags,
message="Request failed: {0}, {1}".format(url, e))
raise
except JSONDecodeError as e:
self.service_check(service_name,
AgentCheck.CRITICAL,
tags=service_check_tags,
message='JSON Parse failed: {0}, {1}'.format(url, e))
raise
except ValueError as e:
self.service_check(service_name,
AgentCheck.CRITICAL,
tags=service_check_tags,
message=str(e))
raise
return response_json
def _join_url_dir(self, url, *args):
'''
Join a URL with multiple directories
'''
for path in args:
url = url.rstrip('/') + '/'
url = urljoin(url, path.lstrip('/'))
return url
def _get_url_base(self, url):
'''
Return the base of a URL
'''
s = urlsplit(url)
return urlunsplit([s.scheme, s.netloc, '', '', ''])
| 40.253788 | 136 | 0.59043 | [
"BSD-3-Clause"
] | WPMedia/dd-agent | checks.d/mapreduce.py | 21,254 | Python |
"""
Create Sine function without using third-party plugins or expressions.
@Guilherme Trevisan - github.com/TrevisanGMW - 2021-01-25
1.0 - 2021-01-25
Initial Release
"""
try:
from shiboken2 import wrapInstance
except ImportError:
from shiboken import wrapInstance
try:
from PySide2.QtGui import QIcon
from PySide2.QtWidgets import QWidget
except ImportError:
from PySide.QtGui import QIcon, QWidget
from maya import OpenMayaUI as omui
import maya.cmds as cmds
import maya.mel as mel
import random
import sys
# Script Name
script_name = "GT - Add Sine Attributes"
# Version:
script_version = "1.0"
# Main Form ============================================================================
def build_gui_add_sine_attr():
window_name = "build_gui_add_sine_attr"
if cmds.window(window_name, exists =True):
cmds.deleteUI(window_name)
# Main GUI Start Here =================================================================================
# Build UI
build_gui_add_sine_attr = cmds.window(window_name, title=script_name + ' (v' + script_version + ')',\
titleBar=True, mnb=False, mxb=False, sizeable =True)
cmds.window(window_name, e=True, s=True, wh=[1,1])
content_main = cmds.columnLayout(adj = True)
# Title Text
title_bgc_color = (.4, .4, .4)
cmds.separator(h=10, style='none') # Empty Space
cmds.rowColumnLayout(nc=1, cw=[(1, 270)], cs=[(1, 10)], p=content_main) # Window Size Adjustment
cmds.rowColumnLayout(nc=3, cw=[(1, 10), (2, 200), (3, 50)], cs=[(1, 10), (2, 0), (3, 0)], p=content_main) # Title Column
cmds.text(" ", bgc=title_bgc_color) # Tiny Empty Green Space
cmds.text(script_name, bgc=title_bgc_color, fn="boldLabelFont", align="left")
cmds.button( l ="Help", bgc=title_bgc_color, c=lambda x:build_gui_help_add_sine_attr())
cmds.separator(h=5, style='none') # Empty Space
# Body ====================
body_column = cmds.rowColumnLayout(nc=1, cw=[(1, 260)], cs=[(1,10)], p=content_main)
cmds.text(l='Select attribute holder first, then run script.', align="center")
cmds.separator(h=10, style='none') # Empty Space
cmds.text('Sine Attributes Prefix:')
stretchy_system_prefix = cmds.textField(text='', pht='Sine Attributes Prefix (Optional)')
cmds.separator(h=5, style='none') # Empty Space
cmds.rowColumnLayout(nc=2, cw=[(1, 115),(2, 150)], cs=[(1,10)], p=content_main)
add_abs_output_chkbox = cmds.checkBox(label='Add Abs Output')
add_prefix_nn_chkbox = cmds.checkBox(label='Add Prefix to Nice Name', value=True)
cmds.rowColumnLayout(nc=1, cw=[(1, 260)], cs=[(1,10)], p=content_main)
cmds.separator(h=5, style='none') # Empty Space
cmds.separator(h=5)
cmds.separator(h=7, style='none') # Empty Space
cmds.button(l ="Add Sine Attributes", bgc=(.6, .6, .6), c=lambda x:validate_operation())
cmds.separator(h=10, style='none') # Empty Space
# Show and Lock Window
cmds.showWindow(build_gui_add_sine_attr)
cmds.window(window_name, e=True, s=False)
# Set Window Icon
qw = omui.MQtUtil.findWindow(window_name)
widget = wrapInstance(long(qw), QWidget)
icon = QIcon(':/sineCurveProfile.png')
widget.setWindowIcon(icon)
# Remove the focus from the textfield and give it to the window
cmds.setFocus(window_name)
# Main GUI Ends Here =================================================================================
def validate_operation():
''' Checks elements one last time before running the script '''
is_valid = False
stretchy_name = None
add_abs_output_value = cmds.checkBox(add_abs_output_chkbox, q=True, value=True)
add_prefix_nn_value = cmds.checkBox(add_prefix_nn_chkbox, q=True, value=True)
stretchy_prefix = cmds.textField(stretchy_system_prefix, q=True, text=True).replace(' ','')
selection = cmds.ls(selection=True) or []
if len(selection) > 0:
target = selection[0]
is_valid = True
else:
cmds.warning('Please select a target object to be the attribute holder.')
is_valid = False
# Name
if stretchy_prefix != '':
stretchy_name = stretchy_prefix
else:
stretchy_name = 'sine'
if is_valid:
current_attributes = cmds.listAttr(target, r=True, s=True , userDefined=True) or []
possible_conflicts = []
possible_conflicts.append(stretchy_name + 'Time')
possible_conflicts.append(stretchy_name + 'Amplitude')
possible_conflicts.append(stretchy_name + 'Frequency')
possible_conflicts.append(stretchy_name + 'Offset')
possible_conflicts.append(stretchy_name + 'Output')
possible_conflicts.append(stretchy_name + 'Tick')
possible_conflicts.append(stretchy_name + 'AbsOutput')
for conflict in possible_conflicts:
for attr in current_attributes:
if attr == conflict:
is_valid = False
if not is_valid:
cmds.warning('The object selected has conflicting attributes. Please change the prefix or select another object.')
# Run Script
if is_valid:
if stretchy_name:
add_sine_attributes(target, sine_prefix=stretchy_name, tick_source_attr='time1.outTime', hide_unkeyable=False, add_absolute_output=add_abs_output_value, nice_name_prefix=add_prefix_nn_value)
cmds.select(target, r=True)
else:
add_sine_attributes(target, sine_prefix=stretchy_name, tick_source_attr='time1.outTime', hide_unkeyable=False, add_absolute_output=add_abs_output_value, nice_name_prefix=add_prefix_nn_value)
cmds.select(target, r=True)
# Creates Help GUI
def build_gui_help_add_sine_attr():
''' Creates GUI for Make Stretchy IK '''
window_name = "build_gui_help_add_sine_attr"
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True)
cmds.window(window_name, title= script_name + " Help", mnb=False, mxb=False, s=True)
cmds.window(window_name, e=True, s=True, wh=[1,1])
cmds.columnLayout("main_column", p= window_name)
# Title Text
cmds.separator(h=12, style='none') # Empty Space
cmds.rowColumnLayout(nc=1, cw=[(1, 310)], cs=[(1, 10)], p="main_column") # Window Size Adjustment
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1, 10)], p="main_column") # Title Column
cmds.text(script_name + " Help", bgc=[.4,.4,.4], fn="boldLabelFont", align="center")
cmds.separator(h=10, style='none', p="main_column") # Empty Space
# Body ====================
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1,10)], p="main_column")
cmds.text(l='Create Sine attributes without using\nthird-party plugins or expressions.', align="center")
cmds.separator(h=5, style='none') # Empty Space
cmds.text(l='Select and object, then click on "Add Sine Attributes"', align="center")
cmds.separator(h=10, style='none') # Empty Space
cmds.text(l='Sine Attributes:', align='center', font='boldLabelFont')
cmds.text(l='Time: Multiplier for the time input (tick)', align="center")
cmds.text(l='Amplitude: Wave amplitude (how high it gets)', align="center")
cmds.text(l='Frequency: Wave frequency (how often it happens)', align="center")
cmds.text(l='Offset: Value added after calculation, offset.', align="center")
cmds.text(l='Tick: Time as seen by the sine system.', align="center")
cmds.text(l='Output: Result of the sine operation.', align="center")
cmds.text(l='Abs Output: Aboslute output. (no negative values)', align="center")
cmds.separator(h=10, style='none') # Empty Space
cmds.separator(h=15, style='none') # Empty Space
cmds.rowColumnLayout(nc=2, cw=[(1, 140),(2, 140)], cs=[(1,10),(2, 0)], p="main_column")
cmds.text('Guilherme Trevisan ')
cmds.text(l='<a href="mailto:trevisangmw@gmail.com">TrevisanGMW@gmail.com</a>', hl=True, highlightColor=[1,1,1])
cmds.rowColumnLayout(nc=2, cw=[(1, 140),(2, 140)], cs=[(1,10),(2, 0)], p="main_column")
cmds.separator(h=15, style='none') # Empty Space
cmds.text(l='<a href="https://github.com/TrevisanGMW">Github</a>', hl=True, highlightColor=[1,1,1])
cmds.separator(h=7, style='none') # Empty Space
# Close Button
cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1,10)], p="main_column")
cmds.separator(h=10, style='none')
cmds.button(l='OK', h=30, c=lambda args: close_help_gui())
cmds.separator(h=8, style='none')
# Show and Lock Window
cmds.showWindow(window_name)
cmds.window(window_name, e=True, s=False)
# Set Window Icon
qw = omui.MQtUtil.findWindow(window_name)
widget = wrapInstance(long(qw), QWidget)
icon = QIcon(':/question.png')
widget.setWindowIcon(icon)
def close_help_gui():
''' Closes Help Window '''
if cmds.window(window_name, exists=True):
cmds.deleteUI(window_name, window=True)
def add_sine_attributes(obj, sine_prefix='sine', tick_source_attr='time1.outTime', hide_unkeyable=True, add_absolute_output=False, nice_name_prefix=True):
'''
Create Sine function without using third-party plugins or expressions
Parameters:
obj (string): Name of the object
sine (string): Prefix given to the name of the attributes (default is "sine")
tick_source_attr (string): Name of the attribute used as the source for time. It uses the default "time1" node if nothing else is specified
hide_unkeyable (bool): Hides the tick and output attributes
add_absolute_output (bool): Also creates an output version that gives only positive numbers much like the abs() expression
Returns:
sine_output_attrs (list): A string with the name of the object and the name of the sine output attribute. E.g. "pSphere1.sineOutput"
In case an absolute output is added, it will be the second object in the list. E.g. ["pSphere1.sineOutput", "pSphere1.sineAbsOutput"]
If add_absolute_output is False the second attribute is None
'''
# Load Required Plugins
required_plugin = 'quatNodes'
if not cmds.pluginInfo(required_plugin, q=True, loaded=True):
cmds.loadPlugin(required_plugin, qt=False)
# Set Variables
influence_suffix = 'Time'
amplitude_suffix = 'Amplitude'
frequency_suffix = 'Frequency'
offset_suffix = 'Offset'
output_suffix = 'Output'
tick_suffix = 'Tick'
abs_suffix = 'AbsOutput'
influence_attr = sine_prefix + influence_suffix
amplitude_attr = sine_prefix + amplitude_suffix
frequency_attr = sine_prefix + frequency_suffix
offset_attr = sine_prefix + offset_suffix
output_attr = sine_prefix + output_suffix
tick_attr = sine_prefix + tick_suffix
abs_attr = sine_prefix + abs_suffix
# Create Nodes
mdl_node = cmds.createNode('multDoubleLinear', name=obj + '_multDoubleLiner')
quat_node = cmds.createNode('eulerToQuat', name=obj + '_eulerToQuat')
multiply_node = cmds.createNode('multiplyDivide', name=obj + '_amplitude_multiply')
sum_node = cmds.createNode('plusMinusAverage', name=obj + '_offset_sum')
influence_multiply_node = cmds.createNode('multiplyDivide', name=obj + '_influence_multiply')
# Add Attributes
if nice_name_prefix:
cmds.addAttr(obj, ln=influence_attr, at='double', k=True, maxValue=1, minValue=0)
cmds.addAttr(obj, ln=amplitude_attr, at='double', k=True)
cmds.addAttr(obj, ln=frequency_attr, at='double', k=True)
cmds.addAttr(obj, ln=offset_attr, at='double', k=True)
cmds.addAttr(obj, ln=tick_attr, at='double', k=True)
cmds.addAttr(obj, ln=output_attr, at='double', k=True)
if add_absolute_output:
cmds.addAttr(obj, ln=abs_attr, at='double', k=True)
else:
cmds.addAttr(obj, ln=influence_attr, at='double', k=True, maxValue=1, minValue=0, nn=influence_suffix)
cmds.addAttr(obj, ln=amplitude_attr, at='double', k=True, nn=amplitude_suffix)
cmds.addAttr(obj, ln=frequency_attr, at='double', k=True, nn=frequency_suffix)
cmds.addAttr(obj, ln=offset_attr, at='double', k=True, nn=offset_suffix)
cmds.addAttr(obj, ln=tick_attr, at='double', k=True, nn=tick_suffix)
cmds.addAttr(obj, ln=output_attr, at='double', k=True, nn=output_suffix)
if add_absolute_output:
cmds.addAttr(obj, ln=abs_attr, at='double', k=True, nn=re.sub(r'(\w)([A-Z])', r'\1 \2', abs_suffix))
cmds.setAttr(obj + '.' + influence_attr, 1)
cmds.setAttr(obj + '.' + amplitude_attr, 1)
cmds.setAttr(obj + '.' + frequency_attr, 10)
if hide_unkeyable:
cmds.setAttr(obj + '.' + tick_attr, k=False)
cmds.setAttr(obj + '.' + output_attr, k=False)
if add_absolute_output and hide_unkeyable:
cmds.setAttr(obj + '.' + abs_attr, k=False)
cmds.connectAttr(tick_source_attr, influence_multiply_node + '.input1X')
cmds.connectAttr(influence_multiply_node + '.outputX', obj + '.' + tick_attr)
cmds.connectAttr(obj + '.' + influence_attr, influence_multiply_node + '.input2X')
cmds.connectAttr(obj + '.' + amplitude_attr, multiply_node + '.input2X')
cmds.connectAttr(obj + '.' + frequency_attr, mdl_node + '.input1')
cmds.connectAttr(obj + '.' + tick_attr, mdl_node + '.input2')
cmds.connectAttr(obj + '.' + offset_attr, sum_node + '.input1D[0]')
cmds.connectAttr(mdl_node + '.output', quat_node + '.inputRotateX')
cmds.connectAttr(quat_node + '.outputQuatX', multiply_node + '.input1X')
cmds.connectAttr(multiply_node + '.outputX', sum_node + '.input1D[1]')
cmds.connectAttr(sum_node + '.output1D', obj + '.' + output_attr)
if add_absolute_output: # abs()
squared_node = cmds.createNode('multiplyDivide', name=obj + '_abs_squared')
reverse_squared_node = cmds.createNode('multiplyDivide', name=obj + '_reverseAbs_multiply')
cmds.setAttr(squared_node + '.operation', 3) # Power
cmds.setAttr(reverse_squared_node + '.operation', 3) # Power
cmds.setAttr(squared_node + '.input2X', 2)
cmds.setAttr(reverse_squared_node + '.input2X', .5)
cmds.connectAttr(obj + '.' + output_attr, squared_node + '.input1X')
cmds.connectAttr(squared_node + '.outputX', reverse_squared_node + '.input1X')
cmds.connectAttr(reverse_squared_node + '.outputX', obj + '.' + abs_attr)
return [(obj + '.' + output_attr), (obj + '.' + abs_attr)]
else:
return [(obj + '.' + output_attr), None]
#Build UI
if __name__ == '__main__':
build_gui_add_sine_attr() | 46.27003 | 207 | 0.621946 | [
"MIT"
] | freemanpro/gt-tools | python-scripts/gt_add_sine_attributes.py | 15,593 | Python |
#!/usr/bin/python
# -*- coding: UTF-8, tab-width: 4 -*-
from sys import argv, stdin, stdout, stderr
from codecs import open as cfopen
import json
def main(invocation, *cli_args):
json_src = stdin
if len(cli_args) > 0:
json_src = cfopen(cli_args[0], 'r', 'utf-8')
data = json.load(json_src, 'utf-8')
json_enc = dict(
indent=2,
sort_keys=True, ### ### <-- some magic here ### ###
)
json_enc['separators'] = (',', ': ',)
# ^-- because the default had space after comma even at end of line.
rules = data.get('entries')
if rules is not None:
del data['entries']
json_enc = json.JSONEncoder(**json_enc)
json_iter = json_enc.iterencode(data)
for chunk in json_iter:
chunk = chunk.lstrip()
if chunk == '': continue
if chunk.startswith('"'):
stdout.write(' ')
if rules is not None:
stdout.write('"entries": {\n')
verbsep = ' '
for verb in sorted(rules.keys()):
stdout.write(verbsep + json.dumps(verb) + ': [')
write_rule_subjs(stdout, rules[verb])
stdout.write(']')
verbsep = ',\n '
stdout.write('\n}, ')
stdout.write(chunk)
break
stdout.write(chunk)
for chunk in json_iter:
if rules is not None:
if chunk.startswith(','):
stdout.write(',')
chunk = chunk[1:]
if chunk.startswith('\n'):
chunk = ' ' + chunk.lstrip()
stdout.write(chunk)
stdout.write('\n')
def gen_rule_subj_hrname(subj):
hrname = [ subj.get(role, u'\uFFFF') for role in ('o', 'd',) ]
hrname = [ gen_rule_host_hrname(part) for part in hrname ]
return hrname
def gen_rule_host_hrname(host):
try:
host = host['h']
except: pass
host = split_subdomains(host)
return host
def split_subdomains(host):
parts = host.split('.')
major = [ parts.pop() ]
while len(parts) > 0:
part = parts.pop()
major.insert(0, part)
if len(part) > 3: break
return '.'.join(major) + ':' + '.'.join(parts)
def write_rule_subjs(dest, subjs):
if len(subjs) < 1: return
for subj in subjs:
subj['hrname'] = gen_rule_subj_hrname(subj)
subjs.sort(key=lambda s: s['hrname'])
props = None
stdout.write('\n')
for subj in subjs:
if props is not None:
dest.write(',\n')
dest.write(' {')
propsep = ' '
del subj['hrname']
props = [ 'o', 'd' ]
props += [ prop for prop in sorted(subj.keys()) if prop not in props ]
for prop in props:
if subj.has_key(prop):
dest.write(propsep + json.dumps(prop) + ': '
+ json.dumps(subj[prop]))
propsep = ', '
dest.write(' }')
stdout.write('\n ')
if __name__ == '__main__':
main(*argv)
| 23.423077 | 78 | 0.514943 | [
"MIT"
] | mk-pmb/firefox-requestpolicy-util | rqpol-sort.py | 3,045 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import asyncio
from typing import Optional
from fastapi import APIRouter, Depends, Request
from epicteller.core.controller import campaign as campaign_ctl
from epicteller.core.controller import room as room_ctl
from epicteller.core.error.base import NotFoundError
from epicteller.web.controller.paging import generate_paging_info
from epicteller.web.fetcher import room as room_fetcher
from epicteller.web.model import PagingResponse
from epicteller.web.model.campaign import Campaign
from epicteller.web.model.room import Room
router = APIRouter()
async def prepare(url_token: str):
room = await room_ctl.get_room(url_token=url_token)
if not room or room.is_removed:
raise NotFoundError()
return room
@router.get('/rooms/{url_token}', response_model=Room, response_model_exclude_none=True)
async def get_room(room: Room = Depends(prepare)):
web_room = await room_fetcher.fetch_room(room)
return web_room
@router.get('/rooms/{url_token}/campaigns', response_model=PagingResponse[Campaign], response_model_exclude_none=True)
async def get_room_campaigns(r: Request, room: Room = Depends(prepare), after: Optional[str] = None,
offset: Optional[int] = 0, limit: Optional[int] = 20):
after_id = 0
if after_campaign := await campaign_ctl.get_campaign(url_token=after):
after_id = after_campaign.id
total, campaigns = await asyncio.gather(
campaign_ctl.get_campaign_count_by_room(room),
campaign_ctl.get_campaigns_by_room(room, after_id, limit),
)
paging_info = await generate_paging_info(r,
total=total,
after=campaigns[-1].id if len(campaigns) else None,
offset=offset,
limit=limit)
return PagingResponse[Campaign](data=campaigns, paging=paging_info)
| 39.76 | 118 | 0.69165 | [
"MIT"
] | KawashiroNitori/epicteller | epicteller/web/handler/room.py | 1,988 | Python |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger('parsing')
PARSER_TYPES = ['movie', 'series']
# Mapping of parser type to (mapping of parser name to plugin instance)
parsers = {}
# Mapping from parser type to the name of the default/selected parser for that type
default_parsers = {}
selected_parsers = {}
# We need to wait until manager startup to access other plugin instances, to make sure they have all been loaded
@event('manager.startup')
def init_parsers(manager):
"""Prepare our list of parsing plugins and default parsers."""
for parser_type in PARSER_TYPES:
parsers[parser_type] = {}
for p in plugin.get_plugins(group=parser_type + '_parser'):
parsers[parser_type][p.name.replace('parser_', '')] = p.instance
# Select default parsers based on priority
func_name = 'parse_' + parser_type
default_parsers[parser_type] = max(iter(parsers[parser_type].items()),
key=lambda p: getattr(getattr(p[1], func_name), 'priority', 0))[0]
log.debug('setting default %s parser to %s. (options: %s)' %
(parser_type, default_parsers[parser_type], parsers[parser_type]))
class PluginParsing(object):
"""Provides parsing framework"""
@property
def schema(self):
# Create a schema allowing only our registered parsers to be used under the key of each parser type
properties = {}
for parser_type in PARSER_TYPES:
parser_names = [p.name.replace('parser_', '') for p in plugin.get_plugins(group=parser_type + '_parser')]
properties[parser_type] = {'type': 'string', 'enum': parser_names}
s = {
'type': 'object',
'properties': properties,
'additionalProperties': False
}
return s
def on_task_start(self, task, config):
# Set up user selected parsers from config for this task run
if config:
selected_parsers.update(config)
def on_task_exit(self, task, config):
# Restore default parsers for next task run
selected_parsers.clear()
on_task_abort = on_task_exit
def parse_series(self, data, name=None, **kwargs):
"""
Use the selected series parser to parse series information from `data`
:param data: The raw string to parse information from.
:param name: The series name to parse data for. If not supplied, parser will attempt to guess series name
automatically from `data`.
:returns: An object containing the parsed information. The `valid` attribute will be set depending on success.
"""
parser = parsers['series'][selected_parsers.get('series', default_parsers.get('series'))]
return parser.parse_series(data, name=name, **kwargs)
def parse_movie(self, data, **kwargs):
"""
Use the selected movie parser to parse movie information from `data`
:param data: The raw string to parse information from
:returns: An object containing the parsed information. The `valid` attribute will be set depending on success.
"""
parser = parsers['movie'][selected_parsers.get('movie') or default_parsers['movie']]
return parser.parse_movie(data, **kwargs)
@event('plugin.register')
def register_plugin():
plugin.register(PluginParsing, 'parsing', api_ver=2)
| 39.43956 | 118 | 0.667317 | [
"MIT"
] | jbones89/Flexget | flexget/plugins/parsers/plugin_parsing.py | 3,589 | Python |
# Generated by Django 3.2.5 on 2021-07-20 12:31
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_article'),
]
operations = [
migrations.AlterField(
model_name='article',
name='detail',
field=ckeditor.fields.RichTextField(),
),
]
| 19.4 | 50 | 0.597938 | [
"MIT"
] | merveealpay/django-blog-project | blog/migrations/0004_alter_article_detail.py | 388 | Python |
import flask
from flask import request, jsonify
import sqlite3
app = flask.Flask(__name__)
app.config["DEBUG"] = True
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
@app.route('/', methods=['GET'])
def home():
return '''<h1>Distant Reading Archive</h1>
<p>A prototype API for distant reading of science fiction novels.</p>'''
@app.route('/api/v1/resources/books/all', methods=['GET'])
def api_all():
conn = sqlite3.connect('books.db')
conn.row_factory = dict_factory
cur = conn.cursor()
all_books = cur.execute('SELECT * FROM books;').fetchall()
return jsonify(all_books)
@app.errorhandler(404)
def page_not_found(e):
return "<h1>404</h1><p>The resource could not be found.</p>", 404
@app.route('/api/v1/resources/books', methods=['GET'])
def api_filter():
query_parameters = request.args
# print("columns" in query_parameters)
id = query_parameters.get('id')
published = query_parameters.get('published')
author = query_parameters.get('author')
print(query_parameters.get('keyword'))
print(query_parameters.get('columns'))
# query = "SELECT * FROM books WHERE"
# to_filter = []
# if id:
# query += ' id=? AND'
# to_filter.append(id)
# if published:
# query += ' published=? AND'
# to_filter.append(published)
# if author:
# query += ' author=? AND'
# to_filter.append(author)
# if not (id or published or author):
# return page_not_found(404)
# query = query[:-4] + ';'
# conn = sqlite3.connect('books.db')
# conn.row_factory = dict_factory
# cur = conn.cursor()
# results = cur.execute(query, to_filter).fetchall()
return jsonify({"test":1})
app.run()
| 24.621622 | 72 | 0.630626 | [
"BSD-3-Clause"
] | tuilagio/wordNotify-rev1 | tools/test_flask.py | 1,822 | Python |
#!/usr/bin/python3
# apt install libnetfilter-queue-dev
import os
import random
import string
import time
from multiprocessing import Pool
from netfilterqueue import NetfilterQueue
from scapy.all import *
SINGLE_QUEUE = False
if SINGLE_QUEUE:
nfqueue_number = 1
else:
nfqueue_number = 4
def setup():
k_module = "modprobe br_netfilter"
os.system(k_module)
if SINGLE_QUEUE:
iptables_rule = "iptables -A FORWARD -j NFQUEUE --queue-num %d -m physdev --physdev-in ens38" % (nfqueue_number - 1)
else:
iptables_rule = "iptables -A FORWARD -j NFQUEUE --queue-balance 0:%d -m physdev --physdev-in ens38" % (nfqueue_number - 1)
print("Adding iptable rules : ")
print(iptables_rule)
os.system(iptables_rule)
print("Setting ipv4 forward settings : ")
os.system("sysctl net.ipv4.ip_forward=1")
def change_payload(packet, load):
packet[Raw].load = load
del packet[IP].len
del packet[IP].chksum
del packet[TCP].chksum
#python2
#return packet.__class__(packet)
#python3
return packet.__bytes__()
def slack_chars(payload, source, target, finalize=False):
if source in payload["data"]:
payload["diff"] += len(source) - len(target)
payload["data"] = payload["data"].replace(source, target)
if finalize:
slacks = [b"\r\nAccept-Encoding: gzip, deflate", b"\r\nConnection: Keep-Alive"]
payload["diff"] += len(slacks[0])
payload["data"] = payload["data"].replace(slacks[0], b"")
for slack in slacks[1:]:
if payload["diff"] < 0:
payload["diff"] += len(slack)
payload["data"] = payload["data"].replace(slack, b"")
if payload["diff"] > 7:
header = b"\r\nID: "
stuff = b"".join(bytes(random.choice(string.ascii_uppercase + string.digits), "ascii") for _ in range(payload["diff"] - len(header)))
payload["data"] = payload["data"][:-4:] + header + stuff
else:
payload["data"] = payload["data"][:-4:] + b" ".join(b"" for _ in range(payload["diff"]))
payload["data"] = payload["data"] + b"\r\n\r\n"
payload["diff"] = 0
return payload
def callback(payload):
print(payload)
try:
data = payload.get_payload()
pkt = IP(data)
if isinstance(pkt.payload, TCP):
if isinstance(pkt[TCP].payload, Raw):
raw_payload = pkt[TCP].load
if raw_payload.startswith(b"GET ") or raw_payload.startswith(b"POST "):
if b"Windows NT 6.1" in raw_payload:
wrap_payload = {"diff": 0, "data": raw_payload}
if b"; WOW64; Trident/" not in raw_payload:
wrap_payload = slack_chars(wrap_payload, b"; Trident/", b"; WOW64; Trident/")
wrap_payload = slack_chars(wrap_payload, b"Accept-Language: ja-JP\r\n", b"Accept-Language: ko-KR\r\n")
wrap_payload = slack_chars(wrap_payload, b"Accept-Language: en-US\r\n", b"Accept-Language: ko-KR\r\n", finalize=True)
raw_payload = wrap_payload["data"]
new_pkt = change_payload(pkt, raw_payload)
payload.set_payload(new_pkt)
except Exception as e:
print(e)
finally:
payload.accept()
def main():
setup()
if SINGLE_QUEUE:
start(0)
else:
p = Pool(nfqueue_number)
try:
p.map_async(start, [x for x in range(nfqueue_number)]).get(999999999)
p.close()
except KeyboardInterrupt:
p.terminate()
print("Flushing iptables.")
os.system('iptables -F')
os.system('iptables -X')
def start(queue_num):
nfqueue = NetfilterQueue()
nfqueue.bind(queue_num, callback)
try:
nfqueue.run(block=True)
finally:
nfqueue.unbind()
if __name__ == "__main__":
main()
| 30.6 | 145 | 0.586978 | [
"MIT"
] | moonlightelite/Traffic-Mod | traffic_modifier.py | 3,978 | Python |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Large tests for metric_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class StreamingPrecisionRecallAtEqualThresholdsLargeTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testLargeCase(self):
shape = [32, 512, 256, 1]
predictions = random_ops.random_uniform(
shape, 0.0, 1.0, dtype=dtypes_lib.float32)
labels = math_ops.greater(random_ops.random_uniform(shape, 0.0, 1.0), 0.5)
result, update_op = metric_ops.precision_recall_at_equal_thresholds(
labels=labels, predictions=predictions, num_thresholds=201)
# Run many updates, enough to cause highly inaccurate values if the
# code used float32 for accumulation.
num_updates = 71
with self.cached_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_updates):
sess.run(update_op)
prdata = sess.run(result)
# Since we use random values, we won't know the tp/fp/tn/fn values, but
# tp and fp at threshold 0 should be the total number of positive and
# negative labels, hence their sum should be total number of pixels.
expected_value = 1.0 * np.product(shape) * num_updates
got_value = prdata.tp[0] + prdata.fp[0]
# They should be at least within 1.
self.assertNear(got_value, expected_value, 1.0)
if __name__ == '__main__':
test.main()
| 39.597015 | 81 | 0.707501 | [
"Apache-2.0"
] | uve/tensorflow | tensorflow/contrib/metrics/python/ops/metric_ops_large_test.py | 2,653 | Python |
# -*- coding: utf-8 -*-
import types
import copy
import inspect
import pprint
import re
import sys
import os
import pdb
import warnings
import logging
try:
import cProfile
import pstats
has_debug = True
except ImportError:
has_debug = False
import urlparse
import cgi
from wsgiref.simple_server import make_server
from wsgiref.handlers import SimpleHandler
SIMPLEAPI_DEBUG = bool(int(os.environ.get('SIMPLEAPI_DEBUG', 0)))
SIMPLEAPI_DEBUG_FILENAME = os.environ.get('SIMPLEAPI_DEBUG_FILENAME',
'simpleapi.profile')
SIMPLEAPI_DEBUG_LEVEL = os.environ.get('SIMPLEAPI_DEBUG_LEVEL', 'all')
assert SIMPLEAPI_DEBUG_LEVEL in ['all', 'call'], \
u'SIMPLEAPI_DEBUG_LEVEL must be one of these: all, call'
if SIMPLEAPI_DEBUG and not has_debug:
SIMPLEAPI_DEBUG = False
warnings.warn("Debugging disabled since packages pstats/cProfile not found (maybe you have to install it).")
TRIGGERED_METHODS = ['get', 'post', 'put', 'delete']
FRAMEWORKS = ['flask', 'django', 'appengine', 'dummy', 'standalone', 'wsgi']
MAX_CONTENT_LENGTH = 1024 * 1024 * 16 # 16 megabytes
restricted_functions = [
'before_request',
'after_request'
]
try:
from google.appengine.ext.webapp import RequestHandler as AE_RequestHandler
has_appengine = True
except ImportError:
has_appengine = False
from simpleapi.message.common import SAException
from sapirequest import SAPIRequest
from request import Request, RequestException
from response import Response, ResponseMerger, ResponseException
from namespace import NamespaceException
from feature import __features__, Feature, FeatureException
from simpleapi.message import formatters, wrappers
from utils import glob_list
__all__ = ('Route', )
class Route(object):
def __new__(cls, *args, **kwargs):
if kwargs.get('framework') == 'appengine':
assert has_appengine
class AppEngineRouter(AE_RequestHandler):
def __getattribute__(self, name):
if name in TRIGGERED_METHODS:
self.request.method = name
return self
else:
return AE_RequestHandler.__getattribute__(self, name)
def __call__(self):
result = self.router(self.request)
self.response.out.write(result['result'])
AppEngineRouter.router = Router(*args, **kwargs)
return AppEngineRouter
elif kwargs.get('framework') == 'flask':
obj = Router(*args, **kwargs)
obj.__name__ = 'Route'
return obj
elif kwargs.get('framework') == 'wsgi':
router = Router(*args, **kwargs)
class WSGIHandler(object):
def __call__(self, *args, **kwargs):
return self.router.handle_request(*args, **kwargs)
handler = WSGIHandler()
handler.router = router
return handler
else:
return Router(*args, **kwargs)
class StandaloneRequest(object): pass
class RouterException(SAException): pass
class Router(object):
def __init__(self, *namespaces, **kwargs):
"""Takes at least one namespace.
"""
self.name = kwargs.pop('name', str(id(self)))
self.logger = logging.getLogger("simpleapi.%s" % self.name)
self.nmap = {}
self.debug = kwargs.pop('debug', False)
self.ignore_unused_args = kwargs.pop('ignore_unused_args', False)
if self.debug and not has_debug:
self.debug = False
warnings.warn("Debugging disabled since packages pstats/cProfile not found (maybe you have to install it).")
self.restful = kwargs.pop('restful', False)
self.framework = kwargs.pop('framework', 'django')
self.path = re.compile(kwargs.pop('path', r'^/'))
assert len(kwargs) == 0, u'Unknown Route configuration(s) (%s)' % \
", ".join(kwargs.keys())
# make shortcut
self._caller = self.__call__
assert self.framework in FRAMEWORKS
assert (self.debug ^ SIMPLEAPI_DEBUG) or \
not (self.debug and SIMPLEAPI_DEBUG), \
u'You can either activate Route-debug or simpleapi-debug, not both.'
if self.debug or SIMPLEAPI_DEBUG:
self.logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler.setFormatter(formatter)
self.logger.addHandler(handler)
else:
self.logger.setLevel(logging.WARNING)
if SIMPLEAPI_DEBUG and SIMPLEAPI_DEBUG_LEVEL == 'all':
self.profile_start()
for namespace in namespaces:
self.add_namespace(namespace)
def handle_request(self, environ, start_response):
if not self.path.match(environ.get('PATH_INFO')):
status = '404 Not found'
start_response(status, [])
return ["Entry point not found"]
else:
content_type = environ.get('CONTENT_TYPE')
try:
content_length = int(environ['CONTENT_LENGTH'])
except (KeyError, ValueError):
content_length = 0
# make sure we ignore too large requests for security and stability
# reasons
if content_length > MAX_CONTENT_LENGTH:
status = '413 Request entity too large'
start_response(status, [])
return ["Request entity too large"]
request_method = environ.get('REQUEST_METHOD', '').lower()
# make sure we only support methods we care
if not request_method in TRIGGERED_METHODS:
status = '501 Not Implemented'
start_response(status, [])
return ["Not Implemented"]
query_get = urlparse.parse_qs(environ.get('QUERY_STRING'))
for key, value in query_get.iteritems():
query_get[key] = value[0] # respect the first value only
query_post = {}
if content_type in ['application/x-www-form-urlencoded',
'application/x-url-encoded']:
post_env = environ.copy()
post_env['QUERY_STRING'] = ''
fs = cgi.FieldStorage(
fp=environ['wsgi.input'],
environ=post_env,
keep_blank_values=True
)
query_post = {}
for key in fs:
query_post[key] = fs.getvalue(key)
elif content_type == 'multipart/form-data':
# XXX TODO
raise NotImplementedError, u'Currently not supported.'
# GET + POST
query_data = query_get
query_data.update(query_post)
# Make request
request = StandaloneRequest()
request.method = request_method
request.data = query_data
request.remote_addr = environ.get('REMOTE_ADDR', '')
# Make call
result = self._caller(request)
status = '200 OK'
headers = [('Content-type', result['mimetype'])]
start_response(status, headers)
return [result['result'],]
def serve(self, host='', port=5050):
httpd = make_server(host, port, self.handle_request)
self.logger.info(u"Started serving on port %d..." % port)
try:
httpd.serve_forever()
except KeyboardInterrupt:
self.logger.info(u"Server stopped.")
def profile_start(self):
assert has_debug
self.profile = cProfile.Profile()
self.profile.enable()
def profile_stop(self):
assert has_debug
self.profile.disable()
self.profile.dump_stats(SIMPLEAPI_DEBUG_FILENAME)
def profile_stats(self):
assert has_debug
self.logger.debug(u"Loading stats...")
stats = pstats.Stats(SIMPLEAPI_DEBUG_FILENAME)
stats.strip_dirs().sort_stats('time', 'calls') \
.print_stats()
def __del__(self):
if SIMPLEAPI_DEBUG and SIMPLEAPI_DEBUG_LEVEL == 'all':
self.profile_stop()
self.profile_stats()
def is_standalone(self):
return self.framework in ['standalone', 'wsgi']
def is_dummy(self):
return self.framework == 'dummy'
def is_appengine(self):
return self.framework == 'appengine'
def is_flask(self):
return self.framework == 'flask'
def is_django(self):
return self.framework == 'django'
def _redefine_default_namespace(self):
# - recalculate default namespace version -
# if map has no default version, determine namespace with the
# highest version
if self.nmap.has_key('default'):
del self.nmap['default']
self.nmap['default'] = self.nmap[max(self.nmap.keys())]
def remove_namespace(self, version):
if self.nmap.has_key(version):
del self.nmap[version]
self._redefine_default_namespace()
return True
else:
return False
def add_namespace(self, namespace):
version = getattr(namespace, '__version__', 1)
assert isinstance(version, int), \
u'version must be either an integer or not set'
# make sure no version is assigned twice
assert not self.nmap.has_key(version), u'version is assigned twice'
allowed_functions = []
# check for introspection allowed
if getattr(namespace, '__introspection__', False):
allowed_functions.append('introspect')
# determine public and published functions
functions = filter(lambda item: '__' not in item[0] and item[0] not in
restricted_functions and ((getattr(item[1], 'published', False) ==
True) or item[0] in allowed_functions),
inspect.getmembers(namespace))
# determine arguments of each function
functions = dict(functions)
for function_name, function_method in functions.iteritems():
# check for reserved function names
assert function_name not in ['error', '__init__', 'get_name'],\
u'Name %s is reserved.' % function_name
# ArgSpec(args=['self', 'a', 'b'], varargs=None, keywords=None, defaults=None)
raw_args = inspect.getargspec(function_method)
# does the function allows kwargs?
kwargs_allowed = raw_args[2] is not None
# get all arguments
all_args = raw_args[0][1:] # exclude `self´
# build a dict of optional arguments
if raw_args[3] is not None:
default_args = zip(
raw_args[0][-len(raw_args[3]):],
raw_args[3]
)
default_args = dict(default_args)
else:
default_args = {}
# build a list of obligatory arguments
obligatory_args = list(set(all_args) - set(default_args.keys()))
# determine constraints for function
if hasattr(function_method, 'constraints'):
constraints = function_method.constraints
assert isinstance(constraints, dict) or callable(constraints)
if isinstance(constraints, dict):
def check_constraint(constraints):
def check(namespace, key, value):
constraint = constraints.get(key)
if not constraint:
return value
if hasattr(constraint, 'match'):
if constraint.match(value):
return value
else:
raise ValueError(u'%s does not match constraint')
else:
if isinstance(constraint, bool):
return bool(int(value))
else:
return constraint(value)
return check
constraint_function = check_constraint(constraints)
elif callable(constraints):
constraint_function = constraints
else:
constraints = None
constraint_function = lambda namespace, key, value: value
# determine allowed methods
if hasattr(function_method, 'methods'):
allowed_methods = function_method.methods
assert isinstance(allowed_methods, (list, tuple))
method_function = lambda method, methods: method in methods
else:
allowed_methods = None
method_function = lambda method, methods: True
# determine format
format = getattr(function_method, 'format', lambda val: val)
functions[function_name] = {
'method': function_method,
'name': function_name,
'args': {
'raw': raw_args,
'all': all_args,
'obligatory': obligatory_args,
'defaults': default_args,
'kwargs_allowed': kwargs_allowed
},
'constraints': {
'function': constraint_function,
'raw': constraints,
},
'format': format,
'methods': {
'function': method_function,
'allowed_methods': allowed_methods,
}
}
# configure authentication
if hasattr(namespace, '__authentication__'):
authentication = namespace.__authentication__
if isinstance(authentication, basestring):
if hasattr(namespace, authentication):
authentication = getattr(namespace, authentication)
else:
authentication = lambda namespace, access_key: \
namespace.__authentication__ == access_key
else:
# grant allow everyone access
authentication = lambda namespace, access_key: True
# configure ip address based access rights
if hasattr(namespace, '__ip_restriction__'):
ip_restriction = namespace.__ip_restriction__
assert isinstance(ip_restriction, list) or callable(ip_restriction)
if isinstance(ip_restriction, list):
# make the ip address list wildcard searchable
namespace.__ip_restriction__ = \
glob_list(namespace.__ip_restriction__)
# restrict access to the given ip address list
ip_restriction = lambda namespace, ip: ip in \
namespace.__ip_restriction__
else:
# accept every ip address
ip_restriction = lambda namespace, ip: True
# configure input formatters
input_formatters = formatters.copy()
allowed_formatters = getattr(namespace, '__input__',
formatters.get_defaults())
input_formatters = filter(lambda i: i[0] in allowed_formatters,
input_formatters.items())
input_formatters = dict(input_formatters)
# configure output formatters
output_formatters = formatters.copy()
allowed_formatters = getattr(namespace, '__output__',
formatters.get_defaults())
output_formatters = filter(lambda i: i[0] in allowed_formatters,
output_formatters.items())
output_formatters = dict(output_formatters)
# configure wrappers
useable_wrappers = wrappers.copy()
if hasattr(namespace, '__wrapper__'):
allowed_wrapper = namespace.__wrapper__
useable_wrappers = filter(lambda i: i[0] in allowed_wrapper,
useable_wrappers.items())
useable_wrappers = dict(useable_wrappers)
self.nmap[version] = {
'class': namespace,
'functions': functions,
'ip_restriction': ip_restriction,
'authentication': authentication,
'input_formatters': input_formatters,
'output_formatters': output_formatters,
'wrappers': useable_wrappers,
}
# set up all features
features = []
if hasattr(namespace, '__features__'):
raw_features = namespace.__features__
for feature in raw_features:
assert isinstance(feature, basestring) or \
issubclass(feature, Feature)
if isinstance(feature, basestring):
assert feature in __features__.keys(), \
u'%s is not a built-in feature' % feature
features.append(__features__[feature](self.nmap[version]))
elif issubclass(feature, Feature):
features.append(feature(self.nmap[version]))
self.nmap[version]['features'] = features
self._redefine_default_namespace()
return version
def __call__(self, http_request=None, **urlparameters):
sapi_request = SAPIRequest(self, http_request)
request_items = dict(sapi_request.REQUEST.items())
request_items.update(urlparameters)
if SIMPLEAPI_DEBUG and SIMPLEAPI_DEBUG_LEVEL == 'call':
self.logger.info(pprint.pformat(request_items))
self.profile_start()
version = request_items.pop('_version', 'default')
callback = request_items.pop('_callback', None)
output_formatter = request_items.pop('_output', None)
# let's activate JSONP automatically if _callback is given
if callback and not output_formatter:
output_formatter = 'jsonp'
elif not output_formatter:
output_formatter = 'json'
input_formatter = request_items.pop('_input', 'value')
wrapper = request_items.pop('_wrapper', 'default')
mimetype = request_items.pop('_mimetype', None)
input_formatter_instance = None
output_formatter_instance = None
wrapper_instance = None
try:
try:
version = int(version)
except (ValueError, TypeError):
pass
if not self.nmap.has_key(version):
# continue with wrong version to get the formatters/wrappers
# raise the error later!
namespace = self.nmap['default']
else:
namespace = self.nmap[version]
# check input formatter
if input_formatter not in namespace['input_formatters']:
raise RequestException(u'Input formatter not allowed or ' \
'unknown: %s' % input_formatter)
# get input formatter
input_formatter_instancec = namespace['input_formatters'][input_formatter](sapi_request, callback)
# check output formatter
if output_formatter not in namespace['output_formatters']:
raise RequestException(u'Output formatter not allowed or ' \
'unknown: %s' % output_formatter)
# get output formatter
output_formatter_instance = namespace['output_formatters'][output_formatter](sapi_request, callback)
# check wrapper
if wrapper not in namespace['wrappers']:
raise RequestException(u'Wrapper unknown or not allowed: %s' % \
wrapper)
# get wrapper
wrapper_instance = namespace['wrappers'][wrapper]
# check whether version exists or not
if not self.nmap.has_key(version):
raise RouterException(u'Version %s not found (possible: %s)' % \
(version, ", ".join(map(lambda i: str(i), self.nmap.keys()))))
request = Request(
sapi_request=sapi_request,
namespace=namespace,
input_formatter=input_formatter_instancec,
output_formatter=output_formatter_instance,
wrapper=wrapper_instance,
callback=callback,
mimetype=mimetype,
restful=self.restful,
debug=self.debug,
route=self,
ignore_unused_args=self.ignore_unused_args,
)
# map request items to the correct names
wi = wrapper_instance(sapi_request=sapi_request)
request_items = wi._parse(request_items)
if not isinstance(request_items,
(list, tuple, types.GeneratorType)):
request_items = [request_items, ]
responses = []
for request_item in request_items:
# clear session (except _internal)
sapi_request.session.clear()
# process request
try:
responses.append(request.process_request(request_item))
except (NamespaceException, RequestException, \
ResponseException, RouterException, FeatureException),e:
response = Response(
sapi_request,
errors=e.message,
output_formatter=output_formatter_instance,
wrapper=wrapper_instance,
mimetype=mimetype
)
responses.append(response)
rm = ResponseMerger(
sapi_request=sapi_request,
responses=responses,
)
http_response = rm.build()
except Exception, e:
if isinstance(e, (NamespaceException, RequestException, \
ResponseException, RouterException, \
FeatureException)):
err_msg = repr(e)
else:
err_msg = u'An internal error occurred during your request.'
trace = inspect.trace()
msgs = []
msgs.append('')
msgs.append(u"******* Exception raised *******")
msgs.append(u'Exception type: %s' % type(e))
msgs.append(u'Exception msg: %s' % repr(e))
msgs.append('')
msgs.append(u'------- Traceback follows -------')
for idx, item in enumerate(trace):
msgs.append(u"(%s)\t%s:%s (%s)" %
(idx+1, item[3], item[2], item[1]))
if item[4]:
for line in item[4]:
msgs.append(u"\t\t%s" % line.strip())
msgs.append('') # blank line
msgs.append(' -- End of traceback -- ')
msgs.append('')
self.logger.error("\n".join(msgs))
if self.debug:
e, m, tb = sys.exc_info()
pdb.post_mortem(tb)
response = Response(
sapi_request,
errors=err_msg,
output_formatter=output_formatter_instance,
wrapper=wrapper_instance,
mimetype=mimetype
)
http_response = response.build(skip_features=True)
if SIMPLEAPI_DEBUG and SIMPLEAPI_DEBUG_LEVEL == 'call':
self.profile_stop()
self.profile_stats()
return http_response | 38.074841 | 120 | 0.566225 | [
"MIT"
] | ghuntley/simpleapi | simpleapi/server/route.py | 23,912 | Python |
"""
Carbon Scraper Plugin for Userbot. //text in creative way.
usage: .carbon //as a reply to any text message
Thanks to @AvinashReddy3108 for a Base Plugin.
Go and Do a star on his repo: https://github.com/AvinashReddy3108/PaperplaneExtended/
"""
from selenium.webdriver.support.ui import Select
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from telethon import events
from urllib.parse import quote_plus
from urllib.error import HTTPError
from time import sleep
import asyncio
import os
@borg.on(events.NewMessage(pattern=r"\.carbon", outgoing=True))
async def carbon_api(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
""" A Wrapper for carbon.now.sh """
await e.edit("Processing...")
CARBON = 'https://carbon.now.sh/?l={lang}&code={code}'
CARBONLANG = "en"
textx = await e.get_reply_message()
pcode = e.text
if pcode[8:]:
pcode = str(pcode[8:])
elif textx:
pcode = str(textx.message) # Importing message to module
code = quote_plus(pcode) # Converting to urlencoded
url = CARBON.format(code=code, lang=CARBONLANG)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.binary_location = Config.GOOGLE_CHROME_BIN
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument('--disable-gpu')
prefs = {'download.default_directory' : './'}
chrome_options.add_experimental_option('prefs', prefs)
await e.edit("Processing 30%")
driver = webdriver.Chrome(executable_path=Config.CHROME_DRIVER, options=chrome_options)
driver.get(url)
download_path = './'
driver.command_executor._commands["send_command"] = ("POST", '/session/$sessionId/chromium/send_command')
params = {'cmd': 'Page.setDownloadBehavior', 'params': {'behavior': 'allow', 'downloadPath': download_path}}
command_result = driver.execute("send_command", params)
driver.find_element_by_xpath("//button[contains(text(),'Export')]").click()
sleep(5) # this might take a bit.
driver.find_element_by_xpath("//button[contains(text(),'4x')]").click()
sleep(5)
await e.edit("Processing 50%")
driver.find_element_by_xpath("//button[contains(text(),'PNG')]").click()
sleep(5) #Waiting for downloading
await e.edit("Processing 90%")
file = './carbon.png'
await e.edit("Done!!")
await e.client.send_file(
e.chat_id,
file,
caption="Made with Love by [AmazerS](https://t.me/AmazerS_xD)",
force_document=True,
reply_to=e.message.reply_to_msg_id,
)
os.remove('./carbon.png')
# Removing carbon.png after uploading
await e.delete() # Deleting msg
| 37.905405 | 111 | 0.700891 | [
"MPL-2.0"
] | Amazers03/Unitg | stdplugins/carbon.py | 2,805 | Python |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 31 20:29:57 2014
@author: garrett
"""
from user import User
def save_users(users, filename='output.csv'):
'''Save users out to a .csv file
Each row will represent a user UID, following by all the user's students
(if the user has any)
INPUT:
> users: set of User objects
> filename: filename to save .csv to.'''
with open(filename, 'w') as file:
for count, user in enumerate(users):
file.write(str(user.get_uid()))
for student in user.get_students():
file.write(',' + str(student.get_uid()))
file.write('\n')
if count % 100 == 0:
file.flush()
return
def load_users(filename):
'''Load users from a .csv file
Each row will represent a user uid, following by all the user's student
(if the user has any). Note: the uid is not assumed to be an integer,
so it read in as a string, which shouldn't matter anyway.
TODO: we could probably speed this up by loading multiple lines at a time.
INPUT:
> filename: filename to read .csv from
RETURN:
> users: a set of User objects'''
users = dict()
# On first read, we create Users, on the following read, we save student
# connections
with open(filename, 'r') as file:
for line in file:
line = line.split('\n')[0]
split_line = line.split(',')
new_uid = _try_converting_to_int(split_line[0])
new_user = User(new_uid)
users.update({new_user.get_uid(): new_user})
with open(filename, 'r') as file:
for line in file:
line = line.split('\n')[0]
split_line = line.split(',')
current_uid = _try_converting_to_int(split_line[0])
for student_uid in split_line[1:]:
student_uid = _try_converting_to_int(student_uid)
users[current_uid].add_students(users[student_uid])
return set(users.values())
def _try_converting_to_int(num):
try:
return int(num)
except ValueError:
return num
| 28.986486 | 78 | 0.598601 | [
"MIT"
] | Garrett-R/infections | save_load.py | 2,145 | Python |
def equivalent(left, right):
if left.alphabet != right.alphabet:
raise ValueError("Input alphabets must be equal!")
transitions = []
previous_states = []
alphabet = left.alphabet
states = [(left.initial_state(), right.initial_state())]
while len(states) != 0:
l, r = states.pop()
previous_states.append((l.name, r.name))
for value in alphabet:
next_l, next_r = l.next_state(value), r.next_state(value)
if (next_l is None and next_r is not None) \
or (next_r is None and next_l is not None):
return False
if (next_l[0], next_r[0]) not in previous_states:
transitions.append((next_l[1], next_r[1]))
states.append((left[next_l[0]], right[next_r[0]]))
for (left, right) in transitions:
if left != right:
return False
return True
| 32.785714 | 69 | 0.576253 | [
"MIT"
] | SHvatov/AutomataTheory | equivalence/equivalence.py | 918 | Python |
import torch
from torchvision.transforms import functional as TFF
import matplotlib.pyplot as plt
from theseus.base.trainer.supervised_trainer import SupervisedTrainer
from theseus.utilities.loading import load_state_dict
from theseus.classification.utilities.gradcam import CAMWrapper, show_cam_on_image
from theseus.utilities.visualization.visualizer import Visualizer
from theseus.utilities.analysis.analyzer import ClassificationAnalyzer
from theseus.utilities.loggers.observer import LoggerObserver
LOGGER = LoggerObserver.getLogger("main")
class ClassificationTrainer(SupervisedTrainer):
"""Trainer for classification tasks
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def check_best(self, metric_dict):
"""
Hook function, called after metrics are calculated
"""
if metric_dict['bl_acc'] > self.best_value:
if self.iters > 0: # Have been training, else in evaluation-only mode or just sanity check
LOGGER.text(
f"Evaluation improved from {self.best_value} to {metric_dict['bl_acc']}",
level=LoggerObserver.INFO)
self.best_value = metric_dict['bl_acc']
self.save_checkpoint('best')
else:
if self.visualize_when_val:
self.visualize_pred()
def save_checkpoint(self, outname='last'):
"""
Save all information of the current iteration
"""
weights = {
'model': self.model.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'iters': self.iters,
'best_value': self.best_value,
}
if self.scaler is not None:
weights[self.scaler.state_dict_key] = self.scaler.state_dict()
self.checkpoint.save(weights, outname)
def load_checkpoint(self, path:str):
"""
Load all information the current iteration from checkpoint
"""
LOGGER.text("Loading checkpoints...", level=LoggerObserver.INFO)
state_dict = torch.load(path, map_location='cpu')
self.iters = load_state_dict(self.iters, state_dict, 'iters')
self.best_value = load_state_dict(self.best_value, state_dict, 'best_value')
self.scaler = load_state_dict(self.scaler, state_dict, self.scaler.state_dict_key)
def visualize_gt(self):
"""
Visualize dataloader for sanity check
"""
LOGGER.text("Visualizing dataset...", level=LoggerObserver.DEBUG)
visualizer = Visualizer()
# Train batch
batch = next(iter(self.trainloader))
images = batch["inputs"]
batch = []
for idx, inputs in enumerate(images):
img_show = visualizer.denormalize(inputs)
img_cam = TFF.to_tensor(img_show)
batch.append(img_cam)
grid_img = visualizer.make_grid(batch)
fig = plt.figure(figsize=(8,8))
plt.axis('off')
plt.imshow(grid_img)
plt.tight_layout(pad=0)
LOGGER.log([{
'tag': "Sanitycheck/batch/train",
'value': fig,
'type': LoggerObserver.FIGURE,
'kwargs': {
'step': self.iters
}
}])
# Validation batch
batch = next(iter(self.valloader))
images = batch["inputs"]
batch = []
for idx, inputs in enumerate(images):
img_show = visualizer.denormalize(inputs)
img_cam = TFF.to_tensor(img_show)
batch.append(img_cam)
grid_img = visualizer.make_grid(batch)
fig = plt.figure(figsize=(8,8))
plt.axis('off')
plt.imshow(grid_img)
plt.tight_layout(pad=0)
LOGGER.log([{
'tag': "Sanitycheck/batch/val",
'value': fig,
'type': LoggerObserver.FIGURE,
'kwargs': {
'step': self.iters
}
}])
@torch.enable_grad() #enable grad for CAM
def visualize_pred(self):
r"""Visualize model prediction and CAM
"""
# Vizualize Grad Class Activation Mapping and model predictions
LOGGER.text("Visualizing model predictions...", level=LoggerObserver.DEBUG)
visualizer = Visualizer()
batch = next(iter(self.valloader))
images = batch["inputs"]
targets = batch["targets"]
self.model.eval()
model_name = self.model.model.name
grad_cam = CAMWrapper.get_method(
name='gradcam',
model=self.model.model.get_model(),
model_name=model_name, use_cuda=next(self.model.parameters()).is_cuda)
grayscale_cams, label_indices, scores = grad_cam(images, return_probs=True)
gradcam_batch = []
pred_batch = []
for idx in range(len(grayscale_cams)):
image = images[idx]
target = targets[idx].item()
label = label_indices[idx]
grayscale_cam = grayscale_cams[idx, :]
score = scores[idx]
img_show = visualizer.denormalize(image)
visualizer.set_image(img_show)
if self.valloader.dataset.classnames is not None:
label = self.valloader.dataset.classnames[label]
target = self.valloader.dataset.classnames[target]
if label == target:
color = [0,1,0]
else:
color = [1,0,0]
visualizer.draw_label(
f"GT: {target}\nP: {label}\nC: {score:.4f}",
fontColor=color,
fontScale=0.8,
thickness=2,
outline=None,
offset=100
)
img_cam =show_cam_on_image(img_show, grayscale_cam, use_rgb=True)
img_cam = TFF.to_tensor(img_cam)
gradcam_batch.append(img_cam)
pred_img = visualizer.get_image()
pred_img = TFF.to_tensor(pred_img)
pred_batch.append(pred_img)
if idx == 63: # limit number of images
break
# GradCAM images
gradcam_grid_img = visualizer.make_grid(gradcam_batch)
fig = plt.figure(figsize=(8,8))
plt.imshow(gradcam_grid_img)
plt.axis("off")
plt.tight_layout(pad=0)
LOGGER.log([{
'tag': "Validation/gradcam",
'value': fig,
'type': LoggerObserver.FIGURE,
'kwargs': {
'step': self.iters
}
}])
# Prediction images
pred_grid_img = visualizer.make_grid(pred_batch)
fig = plt.figure(figsize=(10,10))
plt.imshow(pred_grid_img)
plt.axis("off")
plt.tight_layout(pad=0)
LOGGER.log([{
'tag': "Validation/prediction",
'value': fig,
'type': LoggerObserver.FIGURE,
'kwargs': {
'step': self.iters
}
}])
# Zeroing gradients in optimizer for safety
self.optimizer.zero_grad()
@torch.no_grad()
def visualize_model(self):
# Vizualize Model Graph
LOGGER.text("Visualizing architecture...", level=LoggerObserver.DEBUG)
batch = next(iter(self.valloader))
images = batch["inputs"].to(self.model.device)
LOGGER.log([{
'tag': "Sanitycheck/analysis/architecture",
'value': self.model.model.get_model(),
'type': LoggerObserver.TORCH_MODULE,
'kwargs': {
'inputs': images
}
}])
def analyze_gt(self):
"""
Perform simple data analysis
"""
LOGGER.text("Analyzing datasets...", level=LoggerObserver.DEBUG)
analyzer = ClassificationAnalyzer()
analyzer.add_dataset(self.trainloader.dataset)
fig = analyzer.analyze(figsize=(10,5))
LOGGER.log([{
'tag': "Sanitycheck/analysis/train",
'value': fig,
'type': LoggerObserver.FIGURE,
'kwargs': {
'step': self.iters
}
}])
analyzer = ClassificationAnalyzer()
analyzer.add_dataset(self.valloader.dataset)
fig = analyzer.analyze(figsize=(10,5))
LOGGER.log([{
'tag': "Sanitycheck/analysis/val",
'value': fig,
'type': LoggerObserver.FIGURE,
'kwargs': {
'step': self.iters
}
}])
def on_evaluate_end(self):
if self.visualize_when_val:
self.visualize_pred()
self.save_checkpoint()
def on_start(self):
if self.resume is not None:
self.load_checkpoint(self.resume)
def sanitycheck(self):
"""Sanity check before training
"""
self.visualize_gt()
self.analyze_gt()
self.visualize_model()
self.evaluate_epoch()
| 32.228571 | 102 | 0.566046 | [
"MIT"
] | lannguyen0910/theseus | theseus/classification/trainer/trainer.py | 9,024 | Python |
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Beans Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test invalid p2p messages for nodes with bloom filters disabled.
Test that, when bloom filters are not enabled, peers are disconnected if:
1. They send a p2p mempool message
2. They send a p2p filterload message
3. They send a p2p filteradd message
4. They send a p2p filterclear message
"""
from test_framework.messages import msg_mempool, msg_filteradd, msg_filterload, msg_filterclear
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BeansTestFramework
from test_framework.util import assert_equal
class P2PNoBloomFilterMessages(BeansTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-peerbloomfilters=0"]]
def test_message_causes_disconnect(self, message):
"""Add a p2p connection that sends a message and check that it disconnects."""
peer = self.nodes[0].add_p2p_connection(P2PInterface())
peer.send_message(message)
peer.wait_for_disconnect()
assert_equal(self.nodes[0].getconnectioncount(), 0)
def run_test(self):
self.log.info("Test that peer is disconnected if it sends mempool message")
self.test_message_causes_disconnect(msg_mempool())
self.log.info("Test that peer is disconnected if it sends filterload message")
self.test_message_causes_disconnect(msg_filterload())
self.log.info("Test that peer is disconnected if it sends filteradd message")
self.test_message_causes_disconnect(msg_filteradd(data=b'\xcc'))
self.log.info("Test that peer is disconnected if it sends a filterclear message")
self.test_message_causes_disconnect(msg_filterclear())
if __name__ == '__main__':
P2PNoBloomFilterMessages().main()
| 40.673469 | 95 | 0.750627 | [
"MIT"
] | BakedInside/Beans-Core | test/functional/p2p_nobloomfilter_messages.py | 1,993 | Python |
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import six
import sklearn.metrics
import runtime.temp_file as temp_file
import xgboost as xgb
from runtime import db
from runtime.dbapi.paiio import PaiIOConnection
from runtime.feature.compile import compile_ir_feature_columns
from runtime.feature.derivation import get_ordered_field_descs
from runtime.feature.field_desc import DataType
from runtime.model import EstimatorType
from runtime.model.model import Model
from runtime.pai.pai_distributed import define_tf_flags
from runtime.step.xgboost.predict import _calc_predict_result
from runtime.xgboost.dataset import xgb_dataset
# TODO(typhoonzero): remove runtime.xgboost
from runtime.xgboost.feature_column import ComposedColumnTransformer
FLAGS = define_tf_flags()
SKLEARN_METRICS = [
'accuracy_score',
'average_precision_score',
'balanced_accuracy_score',
'brier_score_loss',
'cohen_kappa_score',
'explained_variance_score',
'f1_score',
'fbeta_score',
'hamming_loss',
'hinge_loss',
'log_loss',
'mean_absolute_error',
'mean_squared_error',
'mean_squared_log_error',
'median_absolute_error',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'zero_one_loss',
]
def evaluate(datasource,
select,
result_table,
model,
label_name=None,
model_params=None,
result_column_names=[],
pai_table=None):
"""TBD
"""
if model_params is None:
model_params = {}
validation_metrics = model_params.get("validation.metrics",
"accuracy_score")
validation_metrics = [m.strip() for m in validation_metrics.split(",")]
bst = xgb.Booster()
if isinstance(model, six.string_types):
with temp_file.TemporaryDirectory(as_cwd=True):
model = Model.load_from_db(datasource, model)
bst.load_model("my_model")
else:
assert isinstance(model,
Model), "not supported model type %s" % type(model)
bst.load_model("my_model")
model_params = model.get_meta("attributes")
fc_map_ir = model.get_meta("features")
train_label = model.get_meta("label")
train_label_desc = train_label.get_field_desc()[0]
if label_name:
train_label_desc.name = label_name
feature_columns = compile_ir_feature_columns(fc_map_ir,
EstimatorType.XGBOOST)
field_descs = get_ordered_field_descs(fc_map_ir)
feature_column_names = [fd.name for fd in field_descs]
feature_metas = dict([(fd.name, fd.to_dict(dtype_to_string=True))
for fd in field_descs])
transform_fn = ComposedColumnTransformer(
feature_column_names, *feature_columns["feature_columns"])
is_pai = True if pai_table else False
if is_pai:
conn = PaiIOConnection.from_table(pai_table)
else:
conn = db.connect_with_data_source(datasource)
with temp_file.TemporaryDirectory() as tmp_dir_name:
pred_fn = os.path.join(tmp_dir_name, "predict.txt")
dpred = xgb_dataset(
datasource=datasource,
fn=pred_fn,
dataset_sql=select,
feature_metas=feature_metas,
feature_column_names=feature_column_names,
label_meta=train_label_desc.to_dict(dtype_to_string=True),
cache=True,
batch_size=10000,
transform_fn=transform_fn,
is_pai=is_pai,
pai_table=pai_table,
pai_single_file=True,
feature_column_code=fc_map_ir)
for i, pred_dmatrix in enumerate(dpred):
if is_pai:
feature_file_name = pred_fn
else:
feature_file_name = pred_fn + "_%d" % i
preds = _calc_predict_result(bst, pred_dmatrix, model_params)
_store_evaluate_result(preds, feature_file_name, train_label_desc,
result_table, result_column_names,
validation_metrics, conn)
conn.close()
def _store_evaluate_result(preds, feature_file_name, label_desc, result_table,
result_column_names, validation_metrics, conn):
"""
Save the evaluation result in the table.
Args:
preds: the prediction result.
feature_file_name (str): the file path where the feature dumps.
label_desc (FieldDesc): the label FieldDesc object.
result_table (str): the result table name.
result_column_names (list[str]): the result column names.
validation_metrics (list[str]): the evaluation metric names.
conn: the database connection object.
Returns:
None.
"""
y_test = []
with open(feature_file_name, 'r') as f:
for line in f.readlines():
row = [i for i in line.strip().split("\t")]
# DMatrix store label in the first column
if label_desc.dtype == DataType.INT64:
y_test.append(int(row[0]))
elif label_desc.dtype == DataType.FLOAT32:
y_test.append(float(row[0]))
else:
raise TypeError("unsupported data type {}".format(
label_desc.dtype))
y_test = np.array(y_test)
evaluate_results = dict()
for metric_name in validation_metrics:
metric_name = metric_name.strip()
if metric_name not in SKLEARN_METRICS:
raise ValueError("unsupported metrics %s" % metric_name)
metric_func = getattr(sklearn.metrics, metric_name)
metric_value = metric_func(y_test, preds)
evaluate_results[metric_name] = metric_value
# write evaluation result to result table
with db.buffered_db_writer(conn, result_table, result_column_names) as w:
row = ["0.0"]
for mn in validation_metrics:
row.append(str(evaluate_results[mn]))
w.write(row)
| 35.202128 | 78 | 0.658356 | [
"Apache-2.0"
] | awsl-dbq/sqlflow | python/runtime/step/xgboost/evaluate.py | 6,618 | Python |