code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import cv2
import mediapipe as mp
class FaceDetection():
# initialize the face detection class with arguments from https://google.github.io/mediapipe/solutions/face_detection.html
def __init__(self, model_selection = 0, threshold = 0.5):
self.model_selection = model_selection
self.threshold = threshold
self.mp_draw = mp.solutions.drawing_utils
self.face_detection = mp.solutions.face_detection.FaceDetection(model_selection = self.model_selection, min_detection_confidence = self.threshold)
# gets bounding boxes using self.face_detection, returns a list of element, elment = (score, bbox_dict)
def get_bboxs(self, frame):
mp_detections = self.face_detection.process(frame)
score_bboxs = []
if mp_detections.detections:
for detection in mp_detections.detections:
score = detection.score[0]
mp_bbox = detection.location_data.relative_bounding_box
bbox_dict = {
'x_min': mp_bbox.xmin,
'y_min': mp_bbox.ymin,
'w': mp_bbox.width,
'h': mp_bbox.height
}
score_bboxs.append([score, bbox_dict])
return score_bboxs
# draws the bbox onto the frame
def draw_bbox(self, face_probs, bbox_dict, frame, col = (255, 0, 255), gender = None, gender_score = None):
x_min, y_min, w, h = bbox_dict.values()
frame_h, frame_w, _ = frame.shape
bbox = int(x_min * frame_w), int(y_min * frame_h), int(w * frame_w), int(h * frame_h)
# prepare text, depending on what atributes we predict
text = str(round(face_probs, 3))
if gender:
text = gender + ": " + str(round(gender_score, 2))
# draw bbox
cv2.rectangle(frame, bbox, col, 2)
cv2.putText(frame, text, (bbox[0], bbox[1] - 10),
cv2.FONT_HERSHEY_COMPLEX, 0.5, col, 1)
| [
"cv2.rectangle",
"mediapipe.solutions.face_detection.FaceDetection",
"cv2.putText"
] | [((410, 535), 'mediapipe.solutions.face_detection.FaceDetection', 'mp.solutions.face_detection.FaceDetection', ([], {'model_selection': 'self.model_selection', 'min_detection_confidence': 'self.threshold'}), '(model_selection=self.\n model_selection, min_detection_confidence=self.threshold)\n', (451, 535), True, 'import mediapipe as mp\n'), ((1814, 1848), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'bbox', 'col', '(2)'], {}), '(frame, bbox, col, 2)\n', (1827, 1848), False, 'import cv2\n'), ((1857, 1949), 'cv2.putText', 'cv2.putText', (['frame', 'text', '(bbox[0], bbox[1] - 10)', 'cv2.FONT_HERSHEY_COMPLEX', '(0.5)', 'col', '(1)'], {}), '(frame, text, (bbox[0], bbox[1] - 10), cv2.FONT_HERSHEY_COMPLEX,\n 0.5, col, 1)\n', (1868, 1949), False, 'import cv2\n')] |
#!/usr/bin/env python3.4
from flask import Flask
import requests
from fibonacci import fibonacci as fib
app = Flask(__name__)
@app.route('/count/<key>')
def count(key):
return requests.get('http://127.0.0.1:8080/count/{}'.format(key)).text
@app.route('/fibonacci/<n>')
def fibonacci(n):
return str(fib(int(n)))
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8082, debug=True)
| [
"flask.Flask"
] | [((112, 127), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (117, 127), False, 'from flask import Flask\n')] |
from importlib.resources import path
from jsonschema_typed import JSONSchema
with path("sentry_data_schemas", "event.schema.json") as schema_path:
EventData = JSONSchema["var:sentry_data_schemas:schema_path"]
| [
"importlib.resources.path"
] | [((83, 131), 'importlib.resources.path', 'path', (['"""sentry_data_schemas"""', '"""event.schema.json"""'], {}), "('sentry_data_schemas', 'event.schema.json')\n", (87, 131), False, 'from importlib.resources import path\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import pontoon.base.models
class Migration(migrations.Migration):
dependencies = [
("base", "0006_auto_20150602_0616"),
]
operations = [
migrations.AddField(
model_name="locale",
name="cldr_plurals",
field=models.CommaSeparatedIntegerField(
blank=True,
max_length=11,
verbose_name=b"CLDR Plurals",
validators=[pontoon.base.models.validate_cldr],
),
),
migrations.AlterField(
model_name="resource",
name="format",
field=models.CharField(
blank=True,
max_length=20,
verbose_name=b"Format",
choices=[
(b"po", b"po"),
(b"xliff", b"xliff"),
(b"properties", b"properties"),
(b"dtd", b"dtd"),
(b"inc", b"inc"),
(b"ini", b"ini"),
(b"lang", b"lang"),
(b"l20n", b"l20n"),
],
),
),
migrations.AlterField(
model_name="translation",
name="date",
field=models.DateTimeField(auto_now_add=True),
),
]
| [
"django.db.models.CommaSeparatedIntegerField",
"django.db.models.CharField",
"django.db.models.DateTimeField"
] | [((380, 523), 'django.db.models.CommaSeparatedIntegerField', 'models.CommaSeparatedIntegerField', ([], {'blank': '(True)', 'max_length': '(11)', 'verbose_name': "b'CLDR Plurals'", 'validators': '[pontoon.base.models.validate_cldr]'}), "(blank=True, max_length=11, verbose_name=\n b'CLDR Plurals', validators=[pontoon.base.models.validate_cldr])\n", (413, 523), False, 'from django.db import models, migrations\n'), ((721, 976), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(20)', 'verbose_name': "b'Format'", 'choices': "[(b'po', b'po'), (b'xliff', b'xliff'), (b'properties', b'properties'), (\n b'dtd', b'dtd'), (b'inc', b'inc'), (b'ini', b'ini'), (b'lang', b'lang'),\n (b'l20n', b'l20n')]"}), "(blank=True, max_length=20, verbose_name=b'Format', choices\n =[(b'po', b'po'), (b'xliff', b'xliff'), (b'properties', b'properties'),\n (b'dtd', b'dtd'), (b'inc', b'inc'), (b'ini', b'ini'), (b'lang', b'lang'\n ), (b'l20n', b'l20n')])\n", (737, 976), False, 'from django.db import models, migrations\n'), ((1345, 1384), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1365, 1384), False, 'from django.db import models, migrations\n')] |
# Copyright (c) 2014-present PlatformIO <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long,too-many-arguments,too-many-locals
import json
import os
import click
from platformio import fs
from platformio.package.commands.install import install_project_dependencies
from platformio.package.manager.platform import PlatformPackageManager
from platformio.platform.exception import UnknownBoard
from platformio.project.config import ProjectConfig
from platformio.project.generator import ProjectGenerator
from platformio.project.helpers import is_platformio_project
def validate_boards(ctx, param, value): # pylint: disable=W0613
pm = PlatformPackageManager()
for id_ in value:
try:
pm.board_config(id_)
except UnknownBoard:
raise click.BadParameter(
"`%s`. Please search for board ID using `platformio boards` "
"command" % id_
)
return value
@click.command("init", short_help="Initialize a project or update existing")
@click.option(
"--project-dir",
"-d",
default=os.getcwd,
type=click.Path(
exists=True, file_okay=False, dir_okay=True, writable=True, resolve_path=True
),
)
@click.option("-b", "--board", multiple=True, metavar="ID", callback=validate_boards)
@click.option("--ide", type=click.Choice(ProjectGenerator.get_supported_ides()))
@click.option("-e", "--environment", help="Update existing environment")
@click.option("-O", "--project-option", multiple=True)
@click.option("--env-prefix", default="")
@click.option("--no-install-dependencies", is_flag=True)
@click.option("-s", "--silent", is_flag=True)
def project_init_cmd(
project_dir,
board,
ide,
environment,
project_option,
env_prefix,
no_install_dependencies,
silent,
):
is_new_project = not is_platformio_project(project_dir)
if is_new_project:
if not silent:
print_header(project_dir)
init_base_project(project_dir)
if environment:
update_project_env(project_dir, environment, project_option)
elif board:
update_board_envs(project_dir, board, project_option, env_prefix)
# resolve project dependencies
if not no_install_dependencies and (environment or board):
install_project_dependencies(
options=dict(
project_dir=project_dir,
environments=[environment] if environment else [],
silent=silent,
)
)
if ide:
if not silent:
click.echo(
"Updating metadata for the %s IDE..." % click.style(ide, fg="cyan")
)
with fs.cd(project_dir):
config = ProjectConfig.get_instance(
os.path.join(project_dir, "platformio.ini")
)
config.validate()
ProjectGenerator(config, environment, ide, board).generate()
if is_new_project:
init_cvs_ignore(project_dir)
if not silent:
print_footer(is_new_project)
def print_header(project_dir):
if project_dir == os.getcwd():
click.secho("\nThe current working directory ", fg="yellow", nl=False)
try:
click.secho(project_dir, fg="cyan", nl=False)
except UnicodeEncodeError:
click.secho(json.dumps(project_dir), fg="cyan", nl=False)
click.secho(" will be used for the project.", fg="yellow")
click.echo("")
click.echo("The next files/directories have been created in ", nl=False)
try:
click.secho(project_dir, fg="cyan")
except UnicodeEncodeError:
click.secho(json.dumps(project_dir), fg="cyan")
click.echo("%s - Put project header files here" % click.style("include", fg="cyan"))
click.echo(
"%s - Put here project specific (private) libraries"
% click.style("lib", fg="cyan")
)
click.echo("%s - Put project source files here" % click.style("src", fg="cyan"))
click.echo(
"%s - Project Configuration File" % click.style("platformio.ini", fg="cyan")
)
def print_footer(is_new_project):
if is_new_project:
return click.secho(
"\nProject has been successfully initialized! Useful commands:\n"
"`pio run` - process/build project from the current directory\n"
"`pio run --target upload` or `pio run -t upload` "
"- upload firmware to a target\n"
"`pio run --target clean` - clean project (remove compiled files)"
"\n`pio run --help` - additional information",
fg="green",
)
return click.secho(
"Project has been successfully updated!",
fg="green",
)
def init_base_project(project_dir):
with fs.cd(project_dir):
config = ProjectConfig()
config.save()
dir_to_readme = [
(config.get("platformio", "src_dir"), None),
(config.get("platformio", "include_dir"), init_include_readme),
(config.get("platformio", "lib_dir"), init_lib_readme),
(config.get("platformio", "test_dir"), init_test_readme),
]
for (path, cb) in dir_to_readme:
if os.path.isdir(path):
continue
os.makedirs(path)
if cb:
cb(path)
def init_include_readme(include_dir):
with open(os.path.join(include_dir, "README"), mode="w", encoding="utf8") as fp:
fp.write(
"""
This directory is intended for project header files.
A header file is a file containing C declarations and macro definitions
to be shared between several project source files. You request the use of a
header file in your project source file (C, C++, etc) located in `src` folder
by including it, with the C preprocessing directive `#include'.
```src/main.c
#include "header.h"
int main (void)
{
...
}
```
Including a header file produces the same results as copying the header file
into each source file that needs it. Such copying would be time-consuming
and error-prone. With a header file, the related declarations appear
in only one place. If they need to be changed, they can be changed in one
place, and programs that include the header file will automatically use the
new version when next recompiled. The header file eliminates the labor of
finding and changing all the copies as well as the risk that a failure to
find one copy will result in inconsistencies within a program.
In C, the usual convention is to give header files names that end with `.h'.
It is most portable to use only letters, digits, dashes, and underscores in
header file names, and at most one dot.
Read more about using header files in official GCC documentation:
* Include Syntax
* Include Operation
* Once-Only Headers
* Computed Includes
https://gcc.gnu.org/onlinedocs/cpp/Header-Files.html
""",
)
def init_lib_readme(lib_dir):
with open(os.path.join(lib_dir, "README"), mode="w", encoding="utf8") as fp:
fp.write(
"""
This directory is intended for project specific (private) libraries.
PlatformIO will compile them to static libraries and link into executable file.
The source code of each library should be placed in a an own separate directory
("lib/your_library_name/[here are source files]").
For example, see a structure of the following two libraries `Foo` and `Bar`:
|--lib
| |
| |--Bar
| | |--docs
| | |--examples
| | |--src
| | |- Bar.c
| | |- Bar.h
| | |- library.json (optional, custom build options, etc) https://docs.platformio.org/page/librarymanager/config.html
| |
| |--Foo
| | |- Foo.c
| | |- Foo.h
| |
| |- README --> THIS FILE
|
|- platformio.ini
|--src
|- main.c
and a contents of `src/main.c`:
```
#include <Foo.h>
#include <Bar.h>
int main (void)
{
...
}
```
PlatformIO Library Dependency Finder will find automatically dependent
libraries scanning project source files.
More information about PlatformIO Library Dependency Finder
- https://docs.platformio.org/page/librarymanager/ldf.html
""",
)
def init_test_readme(test_dir):
with open(os.path.join(test_dir, "README"), mode="w", encoding="utf8") as fp:
fp.write(
"""
This directory is intended for PlatformIO Test Runner and project tests.
Unit Testing is a software testing method by which individual units of
source code, sets of one or more MCU program modules together with associated
control data, usage procedures, and operating procedures, are tested to
determine whether they are fit for use. Unit testing finds problems early
in the development cycle.
More information about PlatformIO Unit Testing:
- https://docs.platformio.org/en/latest/advanced/unit-testing/index.html
""",
)
def init_cvs_ignore(project_dir):
conf_path = os.path.join(project_dir, ".gitignore")
if os.path.isfile(conf_path):
return
with open(conf_path, mode="w", encoding="utf8") as fp:
fp.write(".pio\n")
def update_board_envs(project_dir, board_ids, project_option, env_prefix):
config = ProjectConfig(
os.path.join(project_dir, "platformio.ini"), parse_extra=False
)
used_boards = []
for section in config.sections():
cond = [section.startswith("env:"), config.has_option(section, "board")]
if all(cond):
used_boards.append(config.get(section, "board"))
pm = PlatformPackageManager()
modified = False
for id_ in board_ids:
board_config = pm.board_config(id_)
if id_ in used_boards:
continue
used_boards.append(id_)
modified = True
envopts = {"platform": board_config["platform"], "board": id_}
# find default framework for board
frameworks = board_config.get("frameworks")
if frameworks:
envopts["framework"] = frameworks[0]
for item in project_option:
if "=" not in item:
continue
_name, _value = item.split("=", 1)
envopts[_name.strip()] = _value.strip()
section = "env:%s%s" % (env_prefix, id_)
config.add_section(section)
for option, value in envopts.items():
config.set(section, option, value)
if modified:
config.save()
def update_project_env(project_dir, environment, project_option):
if not project_option:
return
config = ProjectConfig(
os.path.join(project_dir, "platformio.ini"), parse_extra=False
)
section = "env:%s" % environment
if not config.has_section(section):
config.add_section(section)
for item in project_option:
if "=" not in item:
continue
_name, _value = item.split("=", 1)
config.set(section, _name.strip(), _value.strip())
config.save()
| [
"click.echo",
"platformio.project.generator.ProjectGenerator",
"platformio.project.config.ProjectConfig",
"click.BadParameter",
"platformio.fs.cd",
"click.secho",
"click.option",
"json.dumps",
"os.path.isdir",
"click.command",
"platformio.project.helpers.is_platformio_project",
"platformio.project.generator.ProjectGenerator.get_supported_ides",
"os.path.isfile",
"os.makedirs",
"click.style",
"os.path.join",
"os.getcwd",
"platformio.package.manager.platform.PlatformPackageManager",
"click.Path"
] | [((1482, 1557), 'click.command', 'click.command', (['"""init"""'], {'short_help': '"""Initialize a project or update existing"""'}), "('init', short_help='Initialize a project or update existing')\n", (1495, 1557), False, 'import click\n'), ((1744, 1833), 'click.option', 'click.option', (['"""-b"""', '"""--board"""'], {'multiple': '(True)', 'metavar': '"""ID"""', 'callback': 'validate_boards'}), "('-b', '--board', multiple=True, metavar='ID', callback=\n validate_boards)\n", (1756, 1833), False, 'import click\n'), ((1911, 1982), 'click.option', 'click.option', (['"""-e"""', '"""--environment"""'], {'help': '"""Update existing environment"""'}), "('-e', '--environment', help='Update existing environment')\n", (1923, 1982), False, 'import click\n'), ((1984, 2037), 'click.option', 'click.option', (['"""-O"""', '"""--project-option"""'], {'multiple': '(True)'}), "('-O', '--project-option', multiple=True)\n", (1996, 2037), False, 'import click\n'), ((2039, 2079), 'click.option', 'click.option', (['"""--env-prefix"""'], {'default': '""""""'}), "('--env-prefix', default='')\n", (2051, 2079), False, 'import click\n'), ((2081, 2136), 'click.option', 'click.option', (['"""--no-install-dependencies"""'], {'is_flag': '(True)'}), "('--no-install-dependencies', is_flag=True)\n", (2093, 2136), False, 'import click\n'), ((2138, 2182), 'click.option', 'click.option', (['"""-s"""', '"""--silent"""'], {'is_flag': '(True)'}), "('-s', '--silent', is_flag=True)\n", (2150, 2182), False, 'import click\n'), ((1178, 1202), 'platformio.package.manager.platform.PlatformPackageManager', 'PlatformPackageManager', ([], {}), '()\n', (1200, 1202), False, 'from platformio.package.manager.platform import PlatformPackageManager\n'), ((3981, 4053), 'click.echo', 'click.echo', (['"""The next files/directories have been created in """'], {'nl': '(False)'}), "('The next files/directories have been created in ', nl=False)\n", (3991, 4053), False, 'import click\n'), ((5133, 5198), 'click.secho', 'click.secho', (['"""Project has been successfully updated!"""'], {'fg': '"""green"""'}), "('Project has been successfully updated!', fg='green')\n", (5144, 5198), False, 'import click\n'), ((9318, 9357), 'os.path.join', 'os.path.join', (['project_dir', '""".gitignore"""'], {}), "(project_dir, '.gitignore')\n", (9330, 9357), False, 'import os\n'), ((9365, 9390), 'os.path.isfile', 'os.path.isfile', (['conf_path'], {}), '(conf_path)\n', (9379, 9390), False, 'import os\n'), ((9908, 9932), 'platformio.package.manager.platform.PlatformPackageManager', 'PlatformPackageManager', ([], {}), '()\n', (9930, 9932), False, 'from platformio.package.manager.platform import PlatformPackageManager\n'), ((2364, 2398), 'platformio.project.helpers.is_platformio_project', 'is_platformio_project', (['project_dir'], {}), '(project_dir)\n', (2385, 2398), False, 'from platformio.project.helpers import is_platformio_project\n'), ((1636, 1729), 'click.Path', 'click.Path', ([], {'exists': '(True)', 'file_okay': '(False)', 'dir_okay': '(True)', 'writable': '(True)', 'resolve_path': '(True)'}), '(exists=True, file_okay=False, dir_okay=True, writable=True,\n resolve_path=True)\n', (1646, 1729), False, 'import click\n'), ((3618, 3629), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3627, 3629), False, 'import os\n'), ((3639, 3712), 'click.secho', 'click.secho', (['"""\nThe current working directory """'], {'fg': '"""yellow"""', 'nl': '(False)'}), '("""\nThe current working directory """, fg=\'yellow\', nl=False)\n', (3650, 3712), False, 'import click\n'), ((3894, 3952), 'click.secho', 'click.secho', (['""" will be used for the project."""'], {'fg': '"""yellow"""'}), "(' will be used for the project.', fg='yellow')\n", (3905, 3952), False, 'import click\n'), ((3961, 3975), 'click.echo', 'click.echo', (['""""""'], {}), "('')\n", (3971, 3975), False, 'import click\n'), ((4071, 4106), 'click.secho', 'click.secho', (['project_dir'], {'fg': '"""cyan"""'}), "(project_dir, fg='cyan')\n", (4082, 4106), False, 'import click\n'), ((4672, 5020), 'click.secho', 'click.secho', (['"""\nProject has been successfully initialized! Useful commands:\n`pio run` - process/build project from the current directory\n`pio run --target upload` or `pio run -t upload` - upload firmware to a target\n`pio run --target clean` - clean project (remove compiled files)\n`pio run --help` - additional information"""'], {'fg': '"""green"""'}), '(\n """\nProject has been successfully initialized! Useful commands:\n`pio run` - process/build project from the current directory\n`pio run --target upload` or `pio run -t upload` - upload firmware to a target\n`pio run --target clean` - clean project (remove compiled files)\n`pio run --help` - additional information"""\n , fg=\'green\')\n', (4683, 5020), False, 'import click\n'), ((5269, 5287), 'platformio.fs.cd', 'fs.cd', (['project_dir'], {}), '(project_dir)\n', (5274, 5287), False, 'from platformio import fs\n'), ((5306, 5321), 'platformio.project.config.ProjectConfig', 'ProjectConfig', ([], {}), '()\n', (5319, 5321), False, 'from platformio.project.config import ProjectConfig\n'), ((9606, 9649), 'os.path.join', 'os.path.join', (['project_dir', '"""platformio.ini"""'], {}), "(project_dir, 'platformio.ini')\n", (9618, 9649), False, 'import os\n'), ((10930, 10973), 'os.path.join', 'os.path.join', (['project_dir', '"""platformio.ini"""'], {}), "(project_dir, 'platformio.ini')\n", (10942, 10973), False, 'import os\n'), ((3199, 3217), 'platformio.fs.cd', 'fs.cd', (['project_dir'], {}), '(project_dir)\n', (3204, 3217), False, 'from platformio import fs\n'), ((1870, 1907), 'platformio.project.generator.ProjectGenerator.get_supported_ides', 'ProjectGenerator.get_supported_ides', ([], {}), '()\n', (1905, 1907), False, 'from platformio.project.generator import ProjectGenerator\n'), ((3735, 3780), 'click.secho', 'click.secho', (['project_dir'], {'fg': '"""cyan"""', 'nl': '(False)'}), "(project_dir, fg='cyan', nl=False)\n", (3746, 3780), False, 'import click\n'), ((4248, 4281), 'click.style', 'click.style', (['"""include"""'], {'fg': '"""cyan"""'}), "('include', fg='cyan')\n", (4259, 4281), False, 'import click\n'), ((4370, 4399), 'click.style', 'click.style', (['"""lib"""'], {'fg': '"""cyan"""'}), "('lib', fg='cyan')\n", (4381, 4399), False, 'import click\n'), ((4460, 4489), 'click.style', 'click.style', (['"""src"""'], {'fg': '"""cyan"""'}), "('src', fg='cyan')\n", (4471, 4489), False, 'import click\n'), ((4551, 4591), 'click.style', 'click.style', (['"""platformio.ini"""'], {'fg': '"""cyan"""'}), "('platformio.ini', fg='cyan')\n", (4562, 4591), False, 'import click\n'), ((5707, 5726), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (5720, 5726), False, 'import os\n'), ((5765, 5782), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (5776, 5782), False, 'import os\n'), ((5881, 5916), 'os.path.join', 'os.path.join', (['include_dir', '"""README"""'], {}), "(include_dir, 'README')\n", (5893, 5916), False, 'import os\n'), ((7432, 7463), 'os.path.join', 'os.path.join', (['lib_dir', '"""README"""'], {}), "(lib_dir, 'README')\n", (7444, 7463), False, 'import os\n'), ((8632, 8664), 'os.path.join', 'os.path.join', (['test_dir', '"""README"""'], {}), "(test_dir, 'README')\n", (8644, 8664), False, 'import os\n'), ((1318, 1417), 'click.BadParameter', 'click.BadParameter', (["('`%s`. Please search for board ID using `platformio boards` command' % id_)"], {}), "(\n '`%s`. Please search for board ID using `platformio boards` command' % id_)\n", (1336, 1417), False, 'import click\n'), ((3284, 3327), 'os.path.join', 'os.path.join', (['project_dir', '"""platformio.ini"""'], {}), "(project_dir, 'platformio.ini')\n", (3296, 3327), False, 'import os\n'), ((4158, 4181), 'json.dumps', 'json.dumps', (['project_dir'], {}), '(project_dir)\n', (4168, 4181), False, 'import json\n'), ((3144, 3171), 'click.style', 'click.style', (['ide'], {'fg': '"""cyan"""'}), "(ide, fg='cyan')\n", (3155, 3171), False, 'import click\n'), ((3384, 3433), 'platformio.project.generator.ProjectGenerator', 'ProjectGenerator', (['config', 'environment', 'ide', 'board'], {}), '(config, environment, ide, board)\n', (3400, 3433), False, 'from platformio.project.generator import ProjectGenerator\n'), ((3840, 3863), 'json.dumps', 'json.dumps', (['project_dir'], {}), '(project_dir)\n', (3850, 3863), False, 'import json\n')] |
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
import os
from osc_choochoo.tests import base
from osc_choochoo.tests import fakes
from osc_choochoo.v1 import train
# Load the plugin init module for the plugin list and show commands
plugin_name = 'osc_choochoo'
plugin_client = 'osc_choochoo.plugin'
class FakeTrainV1Client(object):
def __init__(self, **kwargs):
self.auth_token = kwargs['token']
self.management_url = kwargs['endpoint']
class TestTrainV1(base.TestCommand):
def setUp(self):
super(TestTrainV1, self).setUp()
self.app.client_manager.osc_choochoo = FakeTrainV1Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
class TestTrainList(TestTrainV1):
def setUp(self):
super(TestTrainList, self).setUp()
# Get the command object to test
self.cmd = train.TrainList(self.app, None)
def test_train_list(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
collist = ('Name', )
datalist = ['1.txt', '2.txt']
with mock.patch('os.listdir') as mock_list:
mock_list.return_value = datalist
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
self.assertEqual(collist, columns)
for d in data:
self.assertTrue(d[0] + '.txt' in datalist)
class TestTrainShow(TestTrainV1):
def setUp(self):
super(TestTrainShow, self).setUp()
# Get the command object to test
self.cmd = train.TrainShow(self.app, None)
def test_train_show(self):
arglist = [
plugin_name,
]
verifylist = [
('name', plugin_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
collist = ['name', 'data']
datalist = [
plugin_name,
'dummy',
]
with mock.patch('io.open') as mock_open:
mock_open.return_value = mock.MagicMock()
m_file = mock_open.return_value.__enter__.return_value
m_file.read.return_value = 'dummy'
columns, data = self.cmd.take_action(parsed_args)
mock_open.assert_called_once_with(
os.path.join(
train.DATA_PATH,
plugin_name + '.txt',
)
)
self.assertEqual(collist, columns)
self.assertEqual(datalist, data)
| [
"osc_choochoo.v1.train.TrainList",
"mock.patch",
"os.path.join",
"osc_choochoo.v1.train.TrainShow",
"mock.MagicMock"
] | [((1438, 1469), 'osc_choochoo.v1.train.TrainList', 'train.TrainList', (['self.app', 'None'], {}), '(self.app, None)\n', (1453, 1469), False, 'from osc_choochoo.v1 import train\n'), ((2209, 2240), 'osc_choochoo.v1.train.TrainShow', 'train.TrainShow', (['self.app', 'None'], {}), '(self.app, None)\n', (2224, 2240), False, 'from osc_choochoo.v1 import train\n'), ((1700, 1724), 'mock.patch', 'mock.patch', (['"""os.listdir"""'], {}), "('os.listdir')\n", (1710, 1724), False, 'import mock\n'), ((2594, 2615), 'mock.patch', 'mock.patch', (['"""io.open"""'], {}), "('io.open')\n", (2604, 2615), False, 'import mock\n'), ((2667, 2683), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (2681, 2683), False, 'import mock\n'), ((2925, 2976), 'os.path.join', 'os.path.join', (['train.DATA_PATH', "(plugin_name + '.txt')"], {}), "(train.DATA_PATH, plugin_name + '.txt')\n", (2937, 2976), False, 'import os\n')] |
#!/usr/bin/env python3
import sys
from os.path import dirname, abspath, join
import subprocess
# Note this does not resolve symbolic links
# https://stackoverflow.com/a/17806123
FIREFOX_BINARY = join(dirname(abspath(__file__)), 'firefox')
argvs = list(sys.argv)
argvs[0] = FIREFOX_BINARY
# geckdriver will run `firefox -version` first to check the version
if len(sys.argv) == 2 and sys.argv[1] == '-version':
subprocess.check_call(argvs)
exit(0)
# First search for the -tmpprofile option
new_profile_path = None
for idx, argv in enumerate(sys.argv):
if argv == '-tmpprofile':
new_profile_path = sys.argv[idx + 1]
break
# If it's present, replace profile with tmp_profile
if new_profile_path:
for idx, argv in enumerate(sys.argv):
if argv == '-profile':
old_profile_path = sys.argv[idx + 1]
subprocess.check_call(['rm', '-r', new_profile_path])
subprocess.check_call(['cp', '-r', old_profile_path, new_profile_path])
argvs[idx+1] = new_profile_path
break
# Firefox will ignore the -tmpprofile option
subprocess.check_call(argvs)
| [
"os.path.abspath",
"subprocess.check_call"
] | [((1105, 1133), 'subprocess.check_call', 'subprocess.check_call', (['argvs'], {}), '(argvs)\n', (1126, 1133), False, 'import subprocess\n'), ((417, 445), 'subprocess.check_call', 'subprocess.check_call', (['argvs'], {}), '(argvs)\n', (438, 445), False, 'import subprocess\n'), ((210, 227), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (217, 227), False, 'from os.path import dirname, abspath, join\n'), ((859, 912), 'subprocess.check_call', 'subprocess.check_call', (["['rm', '-r', new_profile_path]"], {}), "(['rm', '-r', new_profile_path])\n", (880, 912), False, 'import subprocess\n'), ((925, 996), 'subprocess.check_call', 'subprocess.check_call', (["['cp', '-r', old_profile_path, new_profile_path]"], {}), "(['cp', '-r', old_profile_path, new_profile_path])\n", (946, 996), False, 'import subprocess\n')] |
import json
import logging
from collections import OrderedDict
from decimal import ROUND_HALF_UP, Decimal
from typing import Any, Dict, Union
import pytz
from django import forms
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import receiver
from django.forms import Form
from django.http import HttpRequest
from django.template.loader import get_template
from django.utils.timezone import now
from django.utils.translation import pgettext_lazy, ugettext_lazy as _
from django_countries import Countries
from i18nfield.forms import I18nFormField, I18nTextarea, I18nTextInput
from i18nfield.strings import LazyI18nString
from pretix.base.forms import PlaceholderValidator
from pretix.base.models import (
CartPosition, Event, InvoiceAddress, Order, OrderPayment, OrderRefund,
Quota,
)
from pretix.base.reldate import RelativeDateField, RelativeDateWrapper
from pretix.base.settings import SettingsSandbox
from pretix.base.signals import register_payment_providers
from pretix.base.templatetags.money import money_filter
from pretix.base.templatetags.rich_text import rich_text
from pretix.helpers.money import DecimalTextInput
from pretix.presale.views import get_cart_total
from pretix.presale.views.cart import cart_session, get_or_create_cart_id
logger = logging.getLogger(__name__)
class PaymentProviderForm(Form):
def clean(self):
cleaned_data = super().clean()
for k, v in self.fields.items():
val = cleaned_data.get(k)
if v._required and not val:
self.add_error(k, _('This field is required.'))
class BasePaymentProvider:
"""
This is the base class for all payment providers.
"""
def __init__(self, event: Event):
self.event = event
self.settings = SettingsSandbox('payment', self.identifier, event)
# Default values
if self.settings.get('_fee_reverse_calc') is None:
self.settings.set('_fee_reverse_calc', True)
def __str__(self):
return self.identifier
@property
def is_implicit(self) -> bool:
"""
Returns whether or whether not this payment provider is an "implicit" payment provider that will
*always* and unconditionally be used if is_allowed() returns True and does not require any input.
This is intended to be used by the FreePaymentProvider, which skips the payment choice page.
By default, this returns ``False``. Please do not set this if you don't know exactly what you are doing.
"""
return False
@property
def is_meta(self) -> bool:
"""
Returns whether or whether not this payment provider is a "meta" payment provider that only
works as a settings holder for other payment providers and should never be used directly. This
is a trick to implement payment gateways with multiple payment methods but unified payment settings.
Take a look at the built-in stripe provider to see how this might be used.
By default, this returns ``False``.
"""
return False
@property
def is_enabled(self) -> bool:
"""
Returns whether or whether not this payment provider is enabled.
By default, this is determined by the value of the ``_enabled`` setting.
"""
return self.settings.get('_enabled', as_type=bool)
@property
def test_mode_message(self) -> str:
"""
If this property is set to a string, this will be displayed when this payment provider is selected
while the event is in test mode. You should use it to explain to your user how your plugin behaves,
e.g. if it falls back to a test mode automatically as well or if actual payments will be performed.
If you do not set this (or, return ``None``), pretix will show a default message warning the user
that this plugin does not support test mode payments.
"""
return None
def calculate_fee(self, price: Decimal) -> Decimal:
"""
Calculate the fee for this payment provider which will be added to
final price before fees (but after taxes). It should include any taxes.
The default implementation makes use of the setting ``_fee_abs`` for an
absolute fee and ``_fee_percent`` for a percentage.
:param price: The total value without the payment method fee, after taxes.
"""
fee_abs = self.settings.get('_fee_abs', as_type=Decimal, default=0)
fee_percent = self.settings.get('_fee_percent', as_type=Decimal, default=0)
fee_reverse_calc = self.settings.get('_fee_reverse_calc', as_type=bool, default=True)
places = settings.CURRENCY_PLACES.get(self.event.currency, 2)
if fee_reverse_calc:
return ((price + fee_abs) * (1 / (1 - fee_percent / 100)) - price).quantize(
Decimal('1') / 10 ** places, ROUND_HALF_UP
)
else:
return (price * fee_percent / 100 + fee_abs).quantize(
Decimal('1') / 10 ** places, ROUND_HALF_UP
)
@property
def verbose_name(self) -> str:
"""
A human-readable name for this payment provider. This should
be short but self-explaining. Good examples include 'Bank transfer'
and 'Credit card via Stripe'.
"""
raise NotImplementedError() # NOQA
@property
def public_name(self) -> str:
"""
A human-readable name for this payment provider to be shown to the public.
This should be short but self-explaining. Good examples include 'Bank transfer'
and 'Credit card', but 'Credit card via Stripe' might be to explicit. By default,
this is the same as ``verbose_name``
"""
return self.verbose_name
@property
def identifier(self) -> str:
"""
A short and unique identifier for this payment provider.
This should only contain lowercase letters and in most
cases will be the same as your package name.
"""
raise NotImplementedError() # NOQA
@property
def abort_pending_allowed(self) -> bool:
"""
Whether or not a user can abort a payment in pending start to switch to another
payment method. This returns ``False`` by default which is no guarantee that
aborting a pending payment can never happen, it just hides the frontend button
to avoid users accidentally committing double payments.
"""
return False
@property
def settings_form_fields(self) -> dict:
"""
When the event's administrator visits the event configuration
page, this method is called to return the configuration fields available.
It should therefore return a dictionary where the keys should be (unprefixed)
settings keys and the values should be corresponding Django form fields.
The default implementation returns the appropriate fields for the ``_enabled``,
``_fee_abs``, ``_fee_percent`` and ``_availability_date`` settings mentioned above.
We suggest that you return an ``OrderedDict`` object instead of a dictionary
and make use of the default implementation. Your implementation could look
like this::
@property
def settings_form_fields(self):
return OrderedDict(
list(super().settings_form_fields.items()) + [
('bank_details',
forms.CharField(
widget=forms.Textarea,
label=_('Bank account details'),
required=False
))
]
)
.. WARNING:: It is highly discouraged to alter the ``_enabled`` field of the default
implementation.
"""
places = settings.CURRENCY_PLACES.get(self.event.currency, 2)
d = OrderedDict([
('_enabled',
forms.BooleanField(
label=_('Enable payment method'),
required=False,
)),
('_availability_date',
RelativeDateField(
label=_('Available until'),
help_text=_('Users will not be able to choose this payment provider after the given date.'),
required=False,
)),
('_invoice_text',
I18nFormField(
label=_('Text on invoices'),
help_text=_('Will be printed just below the payment figures and above the closing text on invoices. '
'This will only be used if the invoice is generated before the order is paid. If the '
'invoice is generated later, it will show a text stating that it has already been paid.'),
required=False,
widget=I18nTextarea,
widget_kwargs={'attrs': {'rows': '2'}}
)),
('_total_min',
forms.DecimalField(
label=_('Minimum order total'),
help_text=_('This payment will be available only if the order total is equal to or exceeds the given '
'value. The order total for this purpose may be computed without taking the fees imposed '
'by this payment method into account.'),
localize=True,
required=False,
decimal_places=places,
widget=DecimalTextInput(places=places)
)),
('_total_max',
forms.DecimalField(
label=_('Maximum order total'),
help_text=_('This payment will be available only if the order total is equal to or below the given '
'value. The order total for this purpose may be computed without taking the fees imposed '
'by this payment method into account.'),
localize=True,
required=False,
decimal_places=places,
widget=DecimalTextInput(places=places)
)),
('_fee_abs',
forms.DecimalField(
label=_('Additional fee'),
help_text=_('Absolute value'),
localize=True,
required=False,
decimal_places=places,
widget=DecimalTextInput(places=places)
)),
('_fee_percent',
forms.DecimalField(
label=_('Additional fee'),
help_text=_('Percentage of the order total.'),
localize=True,
required=False,
)),
('_fee_reverse_calc',
forms.BooleanField(
label=_('Calculate the fee from the total value including the fee.'),
help_text=_('We recommend to enable this if you want your users to pay the payment fees of your '
'payment provider. <a href="{docs_url}" target="_blank" rel="noopener">Click here '
'for detailed information on what this does.</a> Don\'t forget to set the correct fees '
'above!').format(docs_url='https://docs.pretix.eu/en/latest/user/payments/fees.html'),
required=False
)),
('_restricted_countries',
forms.MultipleChoiceField(
label=_('Restrict to countries'),
choices=Countries(),
help_text=_('Only allow choosing this payment provider for invoice addresses in the selected '
'countries. If you don\'t select any country, all countries are allowed. This is only '
'enabled if the invoice address is required.'),
widget=forms.CheckboxSelectMultiple(
attrs={'class': 'scrolling-multiple-choice'}
),
required=False,
disabled=not self.event.settings.invoice_address_required
)),
])
d['_restricted_countries']._as_type = list
return d
def settings_form_clean(self, cleaned_data):
"""
Overriding this method allows you to inject custom validation into the settings form.
:param cleaned_data: Form data as per previous validations.
:return: Please return the modified cleaned_data
"""
return cleaned_data
def settings_content_render(self, request: HttpRequest) -> str:
"""
When the event's administrator visits the event configuration
page, this method is called. It may return HTML containing additional information
that is displayed below the form fields configured in ``settings_form_fields``.
"""
return ""
def render_invoice_text(self, order: Order, payment: OrderPayment) -> str:
"""
This is called when an invoice for an order with this payment provider is generated.
The default implementation returns the content of the _invoice_text configuration
variable (an I18nString), or an empty string if unconfigured. For paid orders, the
default implementation always renders a string stating that the invoice is already paid.
"""
if order.status == Order.STATUS_PAID:
return pgettext_lazy('invoice', 'The payment for this invoice has already been received.')
return self.settings.get('_invoice_text', as_type=LazyI18nString, default='')
@property
def payment_form_fields(self) -> dict:
"""
This is used by the default implementation of :py:meth:`payment_form`.
It should return an object similar to :py:attr:`settings_form_fields`.
The default implementation returns an empty dictionary.
"""
return {}
def payment_form(self, request: HttpRequest) -> Form:
"""
This is called by the default implementation of :py:meth:`payment_form_render`
to obtain the form that is displayed to the user during the checkout
process. The default implementation constructs the form using
:py:attr:`payment_form_fields` and sets appropriate prefixes for the form
and all fields and fills the form with data form the user's session.
If you overwrite this, we strongly suggest that you inherit from
``PaymentProviderForm`` (from this module) that handles some nasty issues about
required fields for you.
"""
form = PaymentProviderForm(
data=(request.POST if request.method == 'POST' and request.POST.get("payment") == self.identifier else None),
prefix='payment_%s' % self.identifier,
initial={
k.replace('payment_%s_' % self.identifier, ''): v
for k, v in request.session.items()
if k.startswith('payment_%s_' % self.identifier)
}
)
form.fields = self.payment_form_fields
for k, v in form.fields.items():
v._required = v.required
v.required = False
v.widget.is_required = False
return form
def _is_still_available(self, now_dt=None, cart_id=None, order=None):
now_dt = now_dt or now()
tz = pytz.timezone(self.event.settings.timezone)
availability_date = self.settings.get('_availability_date', as_type=RelativeDateWrapper)
if availability_date:
if self.event.has_subevents and cart_id:
availability_date = min([
availability_date.datetime(se).date()
for se in self.event.subevents.filter(
id__in=CartPosition.objects.filter(
cart_id=cart_id, event=self.event
).values_list('subevent', flat=True)
)
])
elif self.event.has_subevents and order:
availability_date = min([
availability_date.datetime(se).date()
for se in self.event.subevents.filter(
id__in=order.positions.values_list('subevent', flat=True)
)
])
elif self.event.has_subevents:
logger.error('Payment provider is not subevent-ready.')
return False
else:
availability_date = availability_date.datetime(self.event).date()
return availability_date >= now_dt.astimezone(tz).date()
return True
def is_allowed(self, request: HttpRequest, total: Decimal=None) -> bool:
"""
You can use this method to disable this payment provider for certain groups
of users, products or other criteria. If this method returns ``False``, the
user will not be able to select this payment method. This will only be called
during checkout, not on retrying.
The default implementation checks for the _availability_date setting to be either unset or in the future
and for the _total_max and _total_min requirements to be met. It also checks the ``_restrict_countries``
setting.
:param total: The total value without the payment method fee, after taxes.
.. versionchanged:: 1.17.0
The ``total`` parameter has been added. For backwards compatibility, this method is called again
without this parameter if it raises a ``TypeError`` on first try.
"""
timing = self._is_still_available(cart_id=get_or_create_cart_id(request))
pricing = True
if (self.settings._total_max is not None or self.settings._total_min is not None) and total is None:
raise ImproperlyConfigured('This payment provider does not support maximum or minimum amounts.')
if self.settings._total_max is not None:
pricing = pricing and total <= Decimal(self.settings._total_max)
if self.settings._total_min is not None:
pricing = pricing and total >= Decimal(self.settings._total_min)
def get_invoice_address():
if not hasattr(request, '_checkout_flow_invoice_address'):
cs = cart_session(request)
iapk = cs.get('invoice_address')
if not iapk:
request._checkout_flow_invoice_address = InvoiceAddress()
else:
try:
request._checkout_flow_invoice_address = InvoiceAddress.objects.get(pk=iapk, order__isnull=True)
except InvoiceAddress.DoesNotExist:
request._checkout_flow_invoice_address = InvoiceAddress()
return request._checkout_flow_invoice_address
if self.event.settings.invoice_address_required:
restricted_countries = self.settings.get('_restricted_countries', as_type=list)
if restricted_countries:
ia = get_invoice_address()
if str(ia.country) not in restricted_countries:
return False
return timing and pricing
def payment_form_render(self, request: HttpRequest, total: Decimal) -> str:
"""
When the user selects this provider as their preferred payment method,
they will be shown the HTML you return from this method.
The default implementation will call :py:meth:`payment_form`
and render the returned form. If your payment method doesn't require
the user to fill out form fields, you should just return a paragraph
of explanatory text.
"""
form = self.payment_form(request)
template = get_template('pretixpresale/event/checkout_payment_form_default.html')
ctx = {'request': request, 'form': form}
return template.render(ctx)
def checkout_confirm_render(self, request) -> str:
"""
If the user has successfully filled in their payment data, they will be redirected
to a confirmation page which lists all details of their order for a final review.
This method should return the HTML which should be displayed inside the
'Payment' box on this page.
In most cases, this should include a short summary of the user's input and
a short explanation on how the payment process will continue.
"""
raise NotImplementedError() # NOQA
def payment_pending_render(self, request: HttpRequest, payment: OrderPayment) -> str:
"""
Render customer-facing instructions on how to proceed with a pending payment
:return: HTML
"""
return ""
def checkout_prepare(self, request: HttpRequest, cart: Dict[str, Any]) -> Union[bool, str]:
"""
Will be called after the user selects this provider as their payment method.
If you provided a form to the user to enter payment data, this method should
at least store the user's input into their session.
This method should return ``False`` if the user's input was invalid, ``True``
if the input was valid and the frontend should continue with default behavior
or a string containing a URL if the user should be redirected somewhere else.
On errors, you should use Django's message framework to display an error message
to the user (or the normal form validation error messages).
The default implementation stores the input into the form returned by
:py:meth:`payment_form` in the user's session.
If your payment method requires you to redirect the user to an external provider,
this might be the place to do so.
.. IMPORTANT:: If this is called, the user has not yet confirmed their order.
You may NOT do anything which actually moves money.
:param cart: This dictionary contains at least the following keys:
positions:
A list of ``CartPosition`` objects that are annotated with the special
attributes ``count`` and ``total`` because multiple objects of the
same content are grouped into one.
raw:
The raw list of ``CartPosition`` objects in the users cart
total:
The overall total *including* the fee for the payment method.
payment_fee:
The fee for the payment method.
"""
form = self.payment_form(request)
if form.is_valid():
for k, v in form.cleaned_data.items():
request.session['payment_%s_%s' % (self.identifier, k)] = v
return True
else:
return False
def payment_is_valid_session(self, request: HttpRequest) -> bool:
"""
This is called at the time the user tries to place the order. It should return
``True`` if the user's session is valid and all data your payment provider requires
in future steps is present.
"""
raise NotImplementedError() # NOQA
def execute_payment(self, request: HttpRequest, payment: OrderPayment) -> str:
"""
After the user has confirmed their purchase, this method will be called to complete
the payment process. This is the place to actually move the money if applicable.
You will be passed an :py:class:`pretix.base.models.OrderPayment` object that contains
the amount of money that should be paid.
If you need any special behavior, you can return a string
containing the URL the user will be redirected to. If you are done with your process
you should return the user to the order's detail page.
If the payment is completed, you should call ``payment.confirm()``. Please note that ``this`` might
raise a ``Quota.QuotaExceededException`` if (and only if) the payment term of this order is over and
some of the items are sold out. You should use the exception message to display a meaningful error
to the user.
The default implementation just returns ``None`` and therefore leaves the
order unpaid. The user will be redirected to the order's detail page by default.
On errors, you should raise a ``PaymentException``.
:param order: The order object
:param payment: An ``OrderPayment`` instance
"""
return None
def order_pending_mail_render(self, order: Order, payment: OrderPayment) -> str:
"""
After the user has submitted their order, they will receive a confirmation
email. You can return a string from this method if you want to add additional
information to this email.
:param order: The order object
:param payment: The payment object
"""
return ""
def order_change_allowed(self, order: Order) -> bool:
"""
Will be called to check whether it is allowed to change the payment method of
an order to this one.
The default implementation checks for the _availability_date setting to be either unset or in the future,
as well as for the _total_max, _total_min and _restricted_countries settings.
:param order: The order object
"""
ps = order.pending_sum
if self.settings._total_max is not None and ps > Decimal(self.settings._total_max):
return False
if self.settings._total_min is not None and ps < Decimal(self.settings._total_min):
return False
restricted_countries = self.settings.get('_restricted_countries', as_type=list)
if restricted_countries:
try:
ia = order.invoice_address
except InvoiceAddress.DoesNotExist:
return True
else:
if str(ia.country) not in restricted_countries:
return False
return self._is_still_available(order=order)
def payment_prepare(self, request: HttpRequest, payment: OrderPayment) -> Union[bool, str]:
"""
Will be called if the user retries to pay an unpaid order (after the user filled in
e.g. the form returned by :py:meth:`payment_form`) or if the user changes the payment
method.
It should return and report errors the same way as :py:meth:`checkout_prepare`, but
receives an ``Order`` object instead of a cart object.
Note: The ``Order`` object given to this method might be different from the version
stored in the database as it's total will already contain the payment fee for the
new payment method.
"""
form = self.payment_form(request)
if form.is_valid():
for k, v in form.cleaned_data.items():
request.session['payment_%s_%s' % (self.identifier, k)] = v
return True
else:
return False
def payment_control_render(self, request: HttpRequest, payment: OrderPayment) -> str:
"""
Will be called if the *event administrator* views the details of a payment.
It should return HTML code containing information regarding the current payment
status and, if applicable, next steps.
The default implementation returns the verbose name of the payment provider.
:param order: The order object
"""
return ''
def payment_refund_supported(self, payment: OrderPayment) -> bool:
"""
Will be called to check if the provider supports automatic refunding for this
payment.
"""
return False
def payment_partial_refund_supported(self, payment: OrderPayment) -> bool:
"""
Will be called to check if the provider supports automatic partial refunding for this
payment.
"""
return False
def execute_refund(self, refund: OrderRefund):
"""
Will be called to execute an refund. Note that refunds have an amount property and can be partial.
This should transfer the money back (if possible).
On success, you should call ``refund.done()``.
On failure, you should raise a PaymentException.
"""
raise PaymentException(_('Automatic refunds are not supported by this payment provider.'))
def shred_payment_info(self, obj: Union[OrderPayment, OrderRefund]):
"""
When personal data is removed from an event, this method is called to scrub payment-related data
from a payment or refund. By default, it removes all info from the ``info`` attribute. You can override
this behavior if you want to retain attributes that are not personal data on their own, i.e. a
reference to a transaction in an external system. You can also override this to scrub more data, e.g.
data from external sources that is saved in LogEntry objects or other places.
:param order: An order
"""
obj.info = '{}'
obj.save(update_fields=['info'])
class PaymentException(Exception):
pass
class FreeOrderProvider(BasePaymentProvider):
is_implicit = True
is_enabled = True
identifier = "free"
def checkout_confirm_render(self, request: HttpRequest) -> str:
return _("No payment is required as this order only includes products which are free of charge.")
def payment_is_valid_session(self, request: HttpRequest) -> bool:
return True
@property
def verbose_name(self) -> str:
return _("Free of charge")
def execute_payment(self, request: HttpRequest, payment: OrderPayment):
try:
payment.confirm(send_mail=False)
except Quota.QuotaExceededException as e:
raise PaymentException(str(e))
@property
def settings_form_fields(self) -> dict:
return {}
def is_allowed(self, request: HttpRequest, total: Decimal=None) -> bool:
from .services.cart import get_fees
total = get_cart_total(request)
total += sum([f.value for f in get_fees(self.event, request, total, None, None)])
return total == 0
def order_change_allowed(self, order: Order) -> bool:
return False
class BoxOfficeProvider(BasePaymentProvider):
is_implicit = True
is_enabled = True
identifier = "boxoffice"
verbose_name = _("Box office")
def execute_payment(self, request: HttpRequest, payment: OrderPayment):
try:
payment.confirm(send_mail=False)
except Quota.QuotaExceededException as e:
raise PaymentException(str(e))
@property
def settings_form_fields(self) -> dict:
return {}
def is_allowed(self, request: HttpRequest, total: Decimal=None) -> bool:
return False
def order_change_allowed(self, order: Order) -> bool:
return False
def payment_control_render(self, request, payment) -> str:
if not payment.info:
return
payment_info = json.loads(payment.info)
template = get_template('pretixcontrol/boxoffice/payment.html')
ctx = {
'request': request,
'event': self.event,
'settings': self.settings,
'payment_info': payment_info,
'payment': payment,
'provider': self,
}
return template.render(ctx)
class ManualPayment(BasePaymentProvider):
identifier = 'manual'
verbose_name = _('Manual payment')
@property
def test_mode_message(self):
return _('In test mode, you can just manually mark this order as paid in the backend after it has been '
'created.')
@property
def is_implicit(self):
return 'pretix.plugins.manualpayment' not in self.event.plugins
def is_allowed(self, request: HttpRequest, total: Decimal=None):
return 'pretix.plugins.manualpayment' in self.event.plugins and super().is_allowed(request, total)
def order_change_allowed(self, order: Order):
return 'pretix.plugins.manualpayment' in self.event.plugins and super().order_change_allowed(order)
@property
def public_name(self):
return str(self.settings.get('public_name', as_type=LazyI18nString))
@property
def settings_form_fields(self):
d = OrderedDict(
[
('public_name', I18nFormField(
label=_('Payment method name'),
widget=I18nTextInput,
)),
('checkout_description', I18nFormField(
label=_('Payment process description during checkout'),
help_text=_('This text will be shown during checkout when the user selects this payment method. '
'It should give a short explanation on this payment method.'),
widget=I18nTextarea,
)),
('email_instructions', I18nFormField(
label=_('Payment process description in order confirmation emails'),
help_text=_('This text will be included for the {payment_info} placeholder in order confirmation '
'mails. It should instruct the user on how to proceed with the payment. You can use'
'the placeholders {order}, {total}, {currency} and {total_with_currency}'),
widget=I18nTextarea,
validators=[PlaceholderValidator(['{order}', '{total}', '{currency}', '{total_with_currency}'])],
)),
('pending_description', I18nFormField(
label=_('Payment process description for pending orders'),
help_text=_('This text will be shown on the order confirmation page for pending orders. '
'It should instruct the user on how to proceed with the payment. You can use'
'the placeholders {order}, {total}, {currency} and {total_with_currency}'),
widget=I18nTextarea,
validators=[PlaceholderValidator(['{order}', '{total}', '{currency}', '{total_with_currency}'])],
)),
] + list(super().settings_form_fields.items())
)
d.move_to_end('_enabled', last=False)
return d
def payment_form_render(self, request) -> str:
return rich_text(
str(self.settings.get('checkout_description', as_type=LazyI18nString))
)
def checkout_prepare(self, request, total):
return True
def payment_is_valid_session(self, request):
return True
def checkout_confirm_render(self, request):
return self.payment_form_render(request)
def format_map(self, order):
return {
'order': order.code,
'total': order.total,
'currency': self.event.currency,
'total_with_currency': money_filter(order.total, self.event.currency)
}
def order_pending_mail_render(self, order) -> str:
msg = str(self.settings.get('email_instructions', as_type=LazyI18nString)).format_map(self.format_map(order))
return msg
def payment_pending_render(self, request, payment) -> str:
return rich_text(
str(self.settings.get('pending_description', as_type=LazyI18nString)).format_map(self.format_map(payment.order))
)
class OffsettingProvider(BasePaymentProvider):
is_enabled = True
identifier = "offsetting"
verbose_name = _("Offsetting")
is_implicit = True
def execute_payment(self, request: HttpRequest, payment: OrderPayment):
try:
payment.confirm()
except Quota.QuotaExceededException as e:
raise PaymentException(str(e))
def execute_refund(self, refund: OrderRefund):
code = refund.info_data['orders'][0]
try:
order = Order.objects.get(code=code, event__organizer=self.event.organizer)
except Order.DoesNotExist:
raise PaymentException(_('You entered an order that could not be found.'))
p = order.payments.create(
state=OrderPayment.PAYMENT_STATE_PENDING,
amount=refund.amount,
payment_date=now(),
provider='offsetting',
info=json.dumps({'orders': [refund.order.code]})
)
p.confirm()
@property
def settings_form_fields(self) -> dict:
return {}
def is_allowed(self, request: HttpRequest, total: Decimal=None) -> bool:
return False
def order_change_allowed(self, order: Order) -> bool:
return False
def payment_control_render(self, request: HttpRequest, payment: OrderPayment) -> str:
return _('Balanced against orders: %s' % ', '.join(payment.info_data['orders']))
@receiver(register_payment_providers, dispatch_uid="payment_free")
def register_payment_provider(sender, **kwargs):
return [FreeOrderProvider, BoxOfficeProvider, OffsettingProvider, ManualPayment]
| [
"logging.getLogger",
"django.forms.CheckboxSelectMultiple",
"pytz.timezone",
"pretix.base.models.Order.objects.get",
"json.dumps",
"django_countries.Countries",
"django.utils.timezone.now",
"pretix.presale.views.cart.cart_session",
"django.dispatch.receiver",
"django.conf.settings.CURRENCY_PLACES.get",
"pretix.base.models.CartPosition.objects.filter",
"json.loads",
"django.utils.translation.ugettext_lazy",
"pretix.presale.views.get_cart_total",
"pretix.base.forms.PlaceholderValidator",
"pretix.base.models.InvoiceAddress.objects.get",
"pretix.helpers.money.DecimalTextInput",
"pretix.base.models.InvoiceAddress",
"django.core.exceptions.ImproperlyConfigured",
"pretix.presale.views.cart.get_or_create_cart_id",
"pretix.base.templatetags.money.money_filter",
"django.utils.translation.pgettext_lazy",
"pretix.base.settings.SettingsSandbox",
"django.template.loader.get_template",
"decimal.Decimal"
] | [((1331, 1358), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1348, 1358), False, 'import logging\n'), ((36977, 37042), 'django.dispatch.receiver', 'receiver', (['register_payment_providers'], {'dispatch_uid': '"""payment_free"""'}), "(register_payment_providers, dispatch_uid='payment_free')\n", (36985, 37042), False, 'from django.dispatch import receiver\n'), ((30517, 30532), 'django.utils.translation.ugettext_lazy', '_', (['"""Box office"""'], {}), "('Box office')\n", (30518, 30532), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((31609, 31628), 'django.utils.translation.ugettext_lazy', '_', (['"""Manual payment"""'], {}), "('Manual payment')\n", (31610, 31628), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((35685, 35700), 'django.utils.translation.ugettext_lazy', '_', (['"""Offsetting"""'], {}), "('Offsetting')\n", (35686, 35700), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((1826, 1876), 'pretix.base.settings.SettingsSandbox', 'SettingsSandbox', (['"""payment"""', 'self.identifier', 'event'], {}), "('payment', self.identifier, event)\n", (1841, 1876), False, 'from pretix.base.settings import SettingsSandbox\n'), ((4732, 4784), 'django.conf.settings.CURRENCY_PLACES.get', 'settings.CURRENCY_PLACES.get', (['self.event.currency', '(2)'], {}), '(self.event.currency, 2)\n', (4760, 4784), False, 'from django.conf import settings\n'), ((7969, 8021), 'django.conf.settings.CURRENCY_PLACES.get', 'settings.CURRENCY_PLACES.get', (['self.event.currency', '(2)'], {}), '(self.event.currency, 2)\n', (7997, 8021), False, 'from django.conf import settings\n'), ((15499, 15542), 'pytz.timezone', 'pytz.timezone', (['self.event.settings.timezone'], {}), '(self.event.settings.timezone)\n', (15512, 15542), False, 'import pytz\n'), ((19898, 19968), 'django.template.loader.get_template', 'get_template', (['"""pretixpresale/event/checkout_payment_form_default.html"""'], {}), "('pretixpresale/event/checkout_payment_form_default.html')\n", (19910, 19968), False, 'from django.template.loader import get_template\n'), ((29445, 29540), 'django.utils.translation.ugettext_lazy', '_', (['"""No payment is required as this order only includes products which are free of charge."""'], {}), "('No payment is required as this order only includes products which are free of charge.'\n )\n", (29446, 29540), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((29692, 29711), 'django.utils.translation.ugettext_lazy', '_', (['"""Free of charge"""'], {}), "('Free of charge')\n", (29693, 29711), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((30156, 30179), 'pretix.presale.views.get_cart_total', 'get_cart_total', (['request'], {}), '(request)\n', (30170, 30179), False, 'from pretix.presale.views import get_cart_total\n'), ((31152, 31176), 'json.loads', 'json.loads', (['payment.info'], {}), '(payment.info)\n', (31162, 31176), False, 'import json\n'), ((31196, 31248), 'django.template.loader.get_template', 'get_template', (['"""pretixcontrol/boxoffice/payment.html"""'], {}), "('pretixcontrol/boxoffice/payment.html')\n", (31208, 31248), False, 'from django.template.loader import get_template\n'), ((31692, 31803), 'django.utils.translation.ugettext_lazy', '_', (['"""In test mode, you can just manually mark this order as paid in the backend after it has been created."""'], {}), "('In test mode, you can just manually mark this order as paid in the backend after it has been created.'\n )\n", (31693, 31803), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((13557, 13644), 'django.utils.translation.pgettext_lazy', 'pgettext_lazy', (['"""invoice"""', '"""The payment for this invoice has already been received."""'], {}), "('invoice',\n 'The payment for this invoice has already been received.')\n", (13570, 13644), False, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((15480, 15485), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (15483, 15485), False, 'from django.utils.timezone import now\n'), ((17958, 18053), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (['"""This payment provider does not support maximum or minimum amounts."""'], {}), "(\n 'This payment provider does not support maximum or minimum amounts.')\n", (17978, 18053), False, 'from django.core.exceptions import ImproperlyConfigured\n'), ((28419, 28485), 'django.utils.translation.ugettext_lazy', '_', (['"""Automatic refunds are not supported by this payment provider."""'], {}), "('Automatic refunds are not supported by this payment provider.')\n", (28420, 28485), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((35090, 35136), 'pretix.base.templatetags.money.money_filter', 'money_filter', (['order.total', 'self.event.currency'], {}), '(order.total, self.event.currency)\n', (35102, 35136), False, 'from pretix.base.templatetags.money import money_filter\n'), ((36067, 36134), 'pretix.base.models.Order.objects.get', 'Order.objects.get', ([], {'code': 'code', 'event__organizer': 'self.event.organizer'}), '(code=code, event__organizer=self.event.organizer)\n', (36084, 36134), False, 'from pretix.base.models import CartPosition, Event, InvoiceAddress, Order, OrderPayment, OrderRefund, Quota\n'), ((17775, 17805), 'pretix.presale.views.cart.get_or_create_cart_id', 'get_or_create_cart_id', (['request'], {}), '(request)\n', (17796, 17805), False, 'from pretix.presale.views.cart import cart_session, get_or_create_cart_id\n'), ((18431, 18452), 'pretix.presale.views.cart.cart_session', 'cart_session', (['request'], {}), '(request)\n', (18443, 18452), False, 'from pretix.presale.views.cart import cart_session, get_or_create_cart_id\n'), ((25543, 25576), 'decimal.Decimal', 'Decimal', (['self.settings._total_max'], {}), '(self.settings._total_max)\n', (25550, 25576), False, 'from decimal import ROUND_HALF_UP, Decimal\n'), ((25661, 25694), 'decimal.Decimal', 'Decimal', (['self.settings._total_min'], {}), '(self.settings._total_min)\n', (25668, 25694), False, 'from decimal import ROUND_HALF_UP, Decimal\n'), ((36405, 36410), 'django.utils.timezone.now', 'now', ([], {}), '()\n', (36408, 36410), False, 'from django.utils.timezone import now\n'), ((36464, 36507), 'json.dumps', 'json.dumps', (["{'orders': [refund.order.code]}"], {}), "({'orders': [refund.order.code]})\n", (36474, 36507), False, 'import json\n'), ((1607, 1635), 'django.utils.translation.ugettext_lazy', '_', (['"""This field is required."""'], {}), "('This field is required.')\n", (1608, 1635), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((4919, 4931), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (4926, 4931), False, 'from decimal import ROUND_HALF_UP, Decimal\n'), ((5073, 5085), 'decimal.Decimal', 'Decimal', (['"""1"""'], {}), "('1')\n", (5080, 5085), False, 'from decimal import ROUND_HALF_UP, Decimal\n'), ((18142, 18175), 'decimal.Decimal', 'Decimal', (['self.settings._total_max'], {}), '(self.settings._total_max)\n', (18149, 18175), False, 'from decimal import ROUND_HALF_UP, Decimal\n'), ((18269, 18302), 'decimal.Decimal', 'Decimal', (['self.settings._total_min'], {}), '(self.settings._total_min)\n', (18276, 18302), False, 'from decimal import ROUND_HALF_UP, Decimal\n'), ((18592, 18608), 'pretix.base.models.InvoiceAddress', 'InvoiceAddress', ([], {}), '()\n', (18606, 18608), False, 'from pretix.base.models import CartPosition, Event, InvoiceAddress, Order, OrderPayment, OrderRefund, Quota\n'), ((36205, 36255), 'django.utils.translation.ugettext_lazy', '_', (['"""You entered an order that could not be found."""'], {}), "('You entered an order that could not be found.')\n", (36206, 36255), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((18721, 18776), 'pretix.base.models.InvoiceAddress.objects.get', 'InvoiceAddress.objects.get', ([], {'pk': 'iapk', 'order__isnull': '(True)'}), '(pk=iapk, order__isnull=True)\n', (18747, 18776), False, 'from pretix.base.models import CartPosition, Event, InvoiceAddress, Order, OrderPayment, OrderRefund, Quota\n'), ((8129, 8155), 'django.utils.translation.ugettext_lazy', '_', (['"""Enable payment method"""'], {}), "('Enable payment method')\n", (8130, 8155), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((8297, 8317), 'django.utils.translation.ugettext_lazy', '_', (['"""Available until"""'], {}), "('Available until')\n", (8298, 8317), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((8346, 8432), 'django.utils.translation.ugettext_lazy', '_', (['"""Users will not be able to choose this payment provider after the given date."""'], {}), "('Users will not be able to choose this payment provider after the given date.'\n )\n", (8347, 8432), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((8560, 8581), 'django.utils.translation.ugettext_lazy', '_', (['"""Text on invoices"""'], {}), "('Text on invoices')\n", (8561, 8581), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((8610, 8877), 'django.utils.translation.ugettext_lazy', '_', (['"""Will be printed just below the payment figures and above the closing text on invoices. This will only be used if the invoice is generated before the order is paid. If the invoice is generated later, it will show a text stating that it has already been paid."""'], {}), "('Will be printed just below the payment figures and above the closing text on invoices. This will only be used if the invoice is generated before the order is paid. If the invoice is generated later, it will show a text stating that it has already been paid.'\n )\n", (8611, 8877), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((9165, 9189), 'django.utils.translation.ugettext_lazy', '_', (['"""Minimum order total"""'], {}), "('Minimum order total')\n", (9166, 9189), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((9218, 9440), 'django.utils.translation.ugettext_lazy', '_', (['"""This payment will be available only if the order total is equal to or exceeds the given value. The order total for this purpose may be computed without taking the fees imposed by this payment method into account."""'], {}), "('This payment will be available only if the order total is equal to or exceeds the given value. The order total for this purpose may be computed without taking the fees imposed by this payment method into account.'\n )\n", (9219, 9440), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((9630, 9661), 'pretix.helpers.money.DecimalTextInput', 'DecimalTextInput', ([], {'places': 'places'}), '(places=places)\n', (9646, 9661), False, 'from pretix.helpers.money import DecimalTextInput\n'), ((9762, 9786), 'django.utils.translation.ugettext_lazy', '_', (['"""Maximum order total"""'], {}), "('Maximum order total')\n", (9763, 9786), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((9815, 10035), 'django.utils.translation.ugettext_lazy', '_', (['"""This payment will be available only if the order total is equal to or below the given value. The order total for this purpose may be computed without taking the fees imposed by this payment method into account."""'], {}), "('This payment will be available only if the order total is equal to or below the given value. The order total for this purpose may be computed without taking the fees imposed by this payment method into account.'\n )\n", (9816, 10035), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((10225, 10256), 'pretix.helpers.money.DecimalTextInput', 'DecimalTextInput', ([], {'places': 'places'}), '(places=places)\n', (10241, 10256), False, 'from pretix.helpers.money import DecimalTextInput\n'), ((10355, 10374), 'django.utils.translation.ugettext_lazy', '_', (['"""Additional fee"""'], {}), "('Additional fee')\n", (10356, 10374), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((10403, 10422), 'django.utils.translation.ugettext_lazy', '_', (['"""Absolute value"""'], {}), "('Absolute value')\n", (10404, 10422), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((10553, 10584), 'pretix.helpers.money.DecimalTextInput', 'DecimalTextInput', ([], {'places': 'places'}), '(places=places)\n', (10569, 10584), False, 'from pretix.helpers.money import DecimalTextInput\n'), ((10687, 10706), 'django.utils.translation.ugettext_lazy', '_', (['"""Additional fee"""'], {}), "('Additional fee')\n", (10688, 10706), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((10735, 10770), 'django.utils.translation.ugettext_lazy', '_', (['"""Percentage of the order total."""'], {}), "('Percentage of the order total.')\n", (10736, 10770), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((10944, 11006), 'django.utils.translation.ugettext_lazy', '_', (['"""Calculate the fee from the total value including the fee."""'], {}), "('Calculate the fee from the total value including the fee.')\n", (10945, 11006), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((11620, 11646), 'django.utils.translation.ugettext_lazy', '_', (['"""Restrict to countries"""'], {}), "('Restrict to countries')\n", (11621, 11646), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((11673, 11684), 'django_countries.Countries', 'Countries', ([], {}), '()\n', (11682, 11684), False, 'from django_countries import Countries\n'), ((11713, 11930), 'django.utils.translation.ugettext_lazy', '_', (['"""Only allow choosing this payment provider for invoice addresses in the selected countries. If you don\'t select any country, all countries are allowed. This is only enabled if the invoice address is required."""'], {}), '("Only allow choosing this payment provider for invoice addresses in the selected countries. If you don\'t select any country, all countries are allowed. This is only enabled if the invoice address is required."\n )\n', (11714, 11930), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((12016, 12090), 'django.forms.CheckboxSelectMultiple', 'forms.CheckboxSelectMultiple', ([], {'attrs': "{'class': 'scrolling-multiple-choice'}"}), "(attrs={'class': 'scrolling-multiple-choice'})\n", (12044, 12090), False, 'from django import forms\n'), ((18898, 18914), 'pretix.base.models.InvoiceAddress', 'InvoiceAddress', ([], {}), '()\n', (18912, 18914), False, 'from pretix.base.models import CartPosition, Event, InvoiceAddress, Order, OrderPayment, OrderRefund, Quota\n'), ((32551, 32575), 'django.utils.translation.ugettext_lazy', '_', (['"""Payment method name"""'], {}), "('Payment method name')\n", (32552, 32575), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((32721, 32769), 'django.utils.translation.ugettext_lazy', '_', (['"""Payment process description during checkout"""'], {}), "('Payment process description during checkout')\n", (32722, 32769), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((32801, 32952), 'django.utils.translation.ugettext_lazy', '_', (['"""This text will be shown during checkout when the user selects this payment method. It should give a short explanation on this payment method."""'], {}), "('This text will be shown during checkout when the user selects this payment method. It should give a short explanation on this payment method.'\n )\n", (32802, 32952), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((33125, 33186), 'django.utils.translation.ugettext_lazy', '_', (['"""Payment process description in order confirmation emails"""'], {}), "('Payment process description in order confirmation emails')\n", (33126, 33186), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((33218, 33465), 'django.utils.translation.ugettext_lazy', '_', (['"""This text will be included for the {payment_info} placeholder in order confirmation mails. It should instruct the user on how to proceed with the payment. You can usethe placeholders {order}, {total}, {currency} and {total_with_currency}"""'], {}), "('This text will be included for the {payment_info} placeholder in order confirmation mails. It should instruct the user on how to proceed with the payment. You can usethe placeholders {order}, {total}, {currency} and {total_with_currency}'\n )\n", (33219, 33465), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((33792, 33843), 'django.utils.translation.ugettext_lazy', '_', (['"""Payment process description for pending orders"""'], {}), "('Payment process description for pending orders')\n", (33793, 33843), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((33875, 34106), 'django.utils.translation.ugettext_lazy', '_', (['"""This text will be shown on the order confirmation page for pending orders. It should instruct the user on how to proceed with the payment. You can usethe placeholders {order}, {total}, {currency} and {total_with_currency}"""'], {}), "('This text will be shown on the order confirmation page for pending orders. It should instruct the user on how to proceed with the payment. You can usethe placeholders {order}, {total}, {currency} and {total_with_currency}'\n )\n", (33876, 34106), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((11035, 11301), 'django.utils.translation.ugettext_lazy', '_', (['"""We recommend to enable this if you want your users to pay the payment fees of your payment provider. <a href="{docs_url}" target="_blank" rel="noopener">Click here for detailed information on what this does.</a> Don\'t forget to set the correct fees above!"""'], {}), '(\'We recommend to enable this if you want your users to pay the payment fees of your payment provider. <a href="{docs_url}" target="_blank" rel="noopener">Click here for detailed information on what this does.</a> Don\\\'t forget to set the correct fees above!\'\n )\n', (11036, 11301), True, 'from django.utils.translation import pgettext_lazy, ugettext_lazy as _\n'), ((33605, 33692), 'pretix.base.forms.PlaceholderValidator', 'PlaceholderValidator', (["['{order}', '{total}', '{currency}', '{total_with_currency}']"], {}), "(['{order}', '{total}', '{currency}',\n '{total_with_currency}'])\n", (33625, 33692), False, 'from pretix.base.forms import PlaceholderValidator\n'), ((34246, 34333), 'pretix.base.forms.PlaceholderValidator', 'PlaceholderValidator', (["['{order}', '{total}', '{currency}', '{total_with_currency}']"], {}), "(['{order}', '{total}', '{currency}',\n '{total_with_currency}'])\n", (34266, 34333), False, 'from pretix.base.forms import PlaceholderValidator\n'), ((15914, 15976), 'pretix.base.models.CartPosition.objects.filter', 'CartPosition.objects.filter', ([], {'cart_id': 'cart_id', 'event': 'self.event'}), '(cart_id=cart_id, event=self.event)\n', (15941, 15976), False, 'from pretix.base.models import CartPosition, Event, InvoiceAddress, Order, OrderPayment, OrderRefund, Quota\n')] |
from __future__ import absolute_import, division, print_function, unicode_literals
from unittest import TestCase
from beast.env.ReadEnvFile import read_env_file
from beast.util import Terminal
Terminal.CAN_CHANGE_COLOR = False
JSON = """
{
"FOO": "foo",
"BAR": "bar bar bar",
"CPPFLAGS": "-std=c++11 -frtti -fno-strict-aliasing -DWOMBAT"
}"""
ENV = """
# An env file.
FOO=foo
export BAR="bar bar bar"
CPPFLAGS=-std=c++11 -frtti -fno-strict-aliasing -DWOMBAT
# export BAZ=baz should be ignored.
"""
RESULT = {
'FOO': 'foo',
'BAR': 'bar bar bar',
'CPPFLAGS': '-std=c++11 -frtti -fno-strict-aliasing -DWOMBAT',
}
BAD_ENV = ENV + """
This line isn't right.
NO SPACES IN NAMES="valid value"
"""
class test_ReadEnvFile(TestCase):
def test_read_json(self):
self.assertEqual(read_env_file(JSON), RESULT)
def test_read_env(self):
self.assertEqual(read_env_file(ENV), RESULT)
def test_read_env_error(self):
errors = []
self.assertEqual(read_env_file(BAD_ENV, errors.append), RESULT)
self.assertEqual(errors, [
"WARNING: Didn't understand the following environment file lines:",
"11. >>> This line isn't right.",
'12. >>> NO SPACES IN NAMES="valid value"'])
| [
"beast.env.ReadEnvFile.read_env_file"
] | [((805, 824), 'beast.env.ReadEnvFile.read_env_file', 'read_env_file', (['JSON'], {}), '(JSON)\n', (818, 824), False, 'from beast.env.ReadEnvFile import read_env_file\n'), ((883, 901), 'beast.env.ReadEnvFile.read_env_file', 'read_env_file', (['ENV'], {}), '(ENV)\n', (896, 901), False, 'from beast.env.ReadEnvFile import read_env_file\n'), ((982, 1019), 'beast.env.ReadEnvFile.read_env_file', 'read_env_file', (['BAD_ENV', 'errors.append'], {}), '(BAD_ENV, errors.append)\n', (995, 1019), False, 'from beast.env.ReadEnvFile import read_env_file\n')] |
"""
Changes existing registered_meta on a node to new schema layout
required for the prereg-prize
"""
import json
import sys
import logging
from modularodm import Q
from framework.mongo import database as db
from framework.mongo.utils import from_mongo
from framework.transactions.context import TokuTransaction
from website.models import MetaSchema
from website.app import init_app
from website.project.metadata.schemas import _id_to_name
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def prepare_nodes(_db=None):
_db = _db or db
_db['node'].update(
{},
{
'$set': {
'registered_schema': []
}
},
multi=True
)
def from_json_or_fail(schema):
# Unstringify stored metadata
try:
schema = json.loads(schema) if schema else {}
except TypeError as e:
if isinstance(schema, dict):
pass
else:
raise e
return schema
def main(dev=False, _db=None):
_db = _db or db
init_app(routes=False)
count = 0
skipped = 0
scripts_utils.add_file_logger(logger, __file__)
logger.info("Iterating over all registrations")
# convert registered_schema to list field
prepare_nodes()
node_documents = _db['node'].find({'is_registration': True})
for node in node_documents:
registered_schemas = []
registered_meta = {}
schemas = node['registered_meta']
if not schemas:
logger.info('Node: {0} is registered but has no registered_meta'.format(node['_id']))
continue
for schema_id, schema in schemas.iteritems():
name = _id_to_name(from_mongo(schema_id))
schema = from_json_or_fail(schema)
# append matching schema to node.registered_schema
try:
meta_schema = MetaSchema.find(
Q('name', 'eq', name)
).sort('-schema_version')[0]
except IndexError as e:
logger.error('No MetaSchema matching name: {0} found for node: {1}.'.format(name, node['_id']))
# Skip over missing schemas
skipped += 1
if dev:
continue
else:
raise e
else:
registered_meta[meta_schema._id] = {
key: {
'value': value
}
for key, value in schema.items()
}
registered_schemas.append(meta_schema._id)
db['node'].update(
{'_id': node['_id']},
{'$set': {
'registered_meta': registered_meta,
'registered_schema': registered_schemas
}}
)
count = count + 1
logger.info('Done with {0} nodes migrated and {1} nodes skipped.'.format(count, skipped))
if __name__ == '__main__':
dry_run = 'dry' in sys.argv
dev = 'dev' in sys.argv
with TokuTransaction():
main(dev=dev)
if dry_run:
raise RuntimeError('Dry run, rolling back transaction.')
| [
"logging.getLogger",
"json.loads",
"framework.transactions.context.TokuTransaction",
"modularodm.Q",
"framework.mongo.utils.from_mongo",
"scripts.utils.add_file_logger",
"website.app.init_app"
] | [((497, 524), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (514, 524), False, 'import logging\n'), ((1081, 1103), 'website.app.init_app', 'init_app', ([], {'routes': '(False)'}), '(routes=False)\n', (1089, 1103), False, 'from website.app import init_app\n'), ((1138, 1185), 'scripts.utils.add_file_logger', 'scripts_utils.add_file_logger', (['logger', '__file__'], {}), '(logger, __file__)\n', (1167, 1185), True, 'from scripts import utils as scripts_utils\n'), ((3065, 3082), 'framework.transactions.context.TokuTransaction', 'TokuTransaction', ([], {}), '()\n', (3080, 3082), False, 'from framework.transactions.context import TokuTransaction\n'), ((855, 873), 'json.loads', 'json.loads', (['schema'], {}), '(schema)\n', (865, 873), False, 'import json\n'), ((1734, 1755), 'framework.mongo.utils.from_mongo', 'from_mongo', (['schema_id'], {}), '(schema_id)\n', (1744, 1755), False, 'from framework.mongo.utils import from_mongo\n'), ((1951, 1972), 'modularodm.Q', 'Q', (['"""name"""', '"""eq"""', 'name'], {}), "('name', 'eq', name)\n", (1952, 1972), False, 'from modularodm import Q\n')] |
# Generated by Django 2.2.12 on 2020-05-01 03:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Issue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('description', models.TextField(max_length=2000)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('status', models.CharField(choices=[('To Do', 'To Do'), ('In Progress', 'In Progress'), ('Done', 'Done')], default='To Do', max_length=20)),
('priority', models.CharField(choices=[('Low', 'Low'), ('Medium', 'Medium'), ('High', 'High')], default='Low', max_length=20)),
('assignee', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='assigned', to=settings.AUTH_USER_MODEL)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='issues', to=settings.AUTH_USER_MODEL)),
('linked_to', models.ManyToManyField(related_name='_issue_linked_to_+', to='issues.Issue')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('content', models.TextField(max_length=1000)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)),
('issue', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='issues.Issue')),
],
),
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to='media/files')),
('issue', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='attachments', to='issues.Issue')),
],
),
]
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.FileField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((248, 305), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (279, 305), False, 'from django.db import migrations, models\n'), ((435, 528), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (451, 528), False, 'from django.db import migrations, models\n'), ((553, 585), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (569, 585), False, 'from django.db import migrations, models\n'), ((620, 653), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(2000)'}), '(max_length=2000)\n', (636, 653), False, 'from django.db import migrations, models\n'), ((684, 723), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (704, 723), False, 'from django.db import migrations, models\n'), ((755, 790), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (775, 790), False, 'from django.db import migrations, models\n'), ((820, 953), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('To Do', 'To Do'), ('In Progress', 'In Progress'), ('Done', 'Done')]", 'default': '"""To Do"""', 'max_length': '(20)'}), "(choices=[('To Do', 'To Do'), ('In Progress', 'In Progress'\n ), ('Done', 'Done')], default='To Do', max_length=20)\n", (836, 953), False, 'from django.db import migrations, models\n'), ((980, 1096), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('Low', 'Low'), ('Medium', 'Medium'), ('High', 'High')]", 'default': '"""Low"""', 'max_length': '(20)'}), "(choices=[('Low', 'Low'), ('Medium', 'Medium'), ('High',\n 'High')], default='Low', max_length=20)\n", (996, 1096), False, 'from django.db import migrations, models\n'), ((1124, 1245), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""assigned"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='assigned', to=settings.AUTH_USER_MODEL)\n", (1141, 1245), False, 'from django.db import migrations, models\n'), ((1271, 1390), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""issues"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='issues', to=settings.AUTH_USER_MODEL)\n", (1288, 1390), False, 'from django.db import migrations, models\n'), ((1418, 1494), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""_issue_linked_to_+"""', 'to': '"""issues.Issue"""'}), "(related_name='_issue_linked_to_+', to='issues.Issue')\n", (1440, 1494), False, 'from django.db import migrations, models\n'), ((1627, 1720), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1643, 1720), False, 'from django.db import migrations, models\n'), ((1747, 1786), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1767, 1786), False, 'from django.db import migrations, models\n'), ((1817, 1850), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(1000)'}), '(max_length=1000)\n', (1833, 1850), False, 'from django.db import migrations, models\n'), ((1881, 2002), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""comments"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='comments', to=settings.AUTH_USER_MODEL)\n", (1898, 2002), False, 'from django.db import migrations, models\n'), ((2026, 2137), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""comments"""', 'to': '"""issues.Issue"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='comments', to='issues.Issue')\n", (2043, 2137), False, 'from django.db import migrations, models\n'), ((2268, 2361), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (2284, 2361), False, 'from django.db import migrations, models\n'), ((2385, 2426), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""media/files"""'}), "(upload_to='media/files')\n", (2401, 2426), False, 'from django.db import migrations, models\n'), ((2455, 2569), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""attachments"""', 'to': '"""issues.Issue"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='attachments', to='issues.Issue')\n", (2472, 2569), False, 'from django.db import migrations, models\n')] |
import asyncio
from typing import Union
import datetime
import time
from discord.ext import commands
import yaml
from cogs import checks
import database
import utils
# Loads the repeating interval dictionary
with open("conversions.yml", "r") as conversion_file:
conversion_dict = yaml.load(conversion_file, Loader=yaml.Loader)
prefix = utils.get_prefix()
class Remind(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.reminders = []
self.tasks = []
asyncio.create_task(self.update_schedule())
async def update_schedule(self):
"""Updates the schedule"""
reminders = database.get_reminders()
new_reminders = []
for reminder in reminders:
if reminder["date"] - time.time() < 0:
database.remove_reminder(reminder)
else:
new_reminders.append(reminder)
self.reminders.clear()
self.reminders.extend(new_reminders)
async def setup_reminders(self):
"""Sets up the reminders"""
await self.clear_tasks()
await self.update_schedule()
scheduled_reminders = []
for task in self.tasks:
if task.get_coro().cr_frame is not None:
scheduled_reminders.append(
task.get_coro().cr_frame.f_locals["reminder"]
)
# Create tasks for all reminders, call the remind function
for reminder in self.reminders:
if reminder not in scheduled_reminders:
task = asyncio.create_task(self.remind(reminder))
self.tasks.append(task)
scheduled_reminders.append(
task.get_coro().cr_frame.f_locals["reminder"]
)
# Run the tasks
asyncio.gather(*self.tasks)
async def clear_tasks(self):
for task in self.tasks:
if task._state == "FINISHED":
self.tasks.remove(task)
async def remind(self, reminder: dict):
"""Execute one reminder"""
# Check if the reminder is in the future and if it exists in the database
if reminder["date"] > time.time() and database.get_reminders(**reminder) != []:
await asyncio.sleep(reminder["date"] - time.time())
# Checks if the reminder is still exists, in case of deletion
if database.get_reminders(**reminder) != [] and reminder in self.reminders:
await self.bot.get_channel(reminder["channel"]).send(
f"Reminder:\n{reminder['reminder_text']}"
)
if reminder["repeating"] != False:
await self.schedule_repeat(reminder)
self.reminders.remove(reminder)
# Remove the reminder
database.remove_reminder(reminder)
# Remove a reminder that has passed
else:
database.remove_reminder(reminder)
async def schedule_repeat(self, reminder: dict):
"""Schedules a repeating reminder"""
if reminder["repeating"] and database.get_reminders(**reminder) != []:
# Calculate when the next reminder should be
reminder_date = datetime.datetime.fromtimestamp(
reminder["date"] + conversion_dict[reminder["repeating"]]
)
# Remove the old reminder
database.remove_reminder(reminder)
# Add the new reminder
database.insert_reminder(
reminder["guild"],
reminder["channel"],
reminder_date.year,
reminder_date.month,
reminder_date.day,
reminder_date.hour,
reminder_date.minute,
reminder["reminder_text"],
reminder["repeating"],
)
asyncio.create_task(self.setup_reminders())
@commands.command(
help="Date should be in month/day/year format, either with slashes or dashes (ex. month/day/year or month-day-year)\n\nRepeating is an interval of time after which the reminder should be sent again, must be either daily, weekly, biweekly, or triweekly\n\nText is the text the reminder will be sent with, wrap with quotations if this contains whitespace",
aliases=["reminder", "add_r", "ar"],
)
@commands.check(checks.is_operator)
async def add_reminder(
self,
ctx,
date: str,
user_time: str,
text: str,
repeating: Union[str, bool] = False,
):
"""Attempts to add a reminder"""
# Checks if the reminder should repeat, and if it is a valid interval
try:
_date = utils.split_date(date)
_time = utils.split_time(user_time)
except UnboundLocalError:
raise commands.UserInputError("Date or time was not in the correct format.")
if repeating and repeating not in conversion_dict:
raise commands.UserInputError()
# Tries to insert the reminder
result = database.insert_reminder(
ctx.guild.id,
ctx.channel.id,
_date["year"],
_date["month"],
_date["day"],
_time["hour"],
_time["minute"],
text,
repeating,
)
# Sends a status message, and restarts the reminders
if result:
await asyncio.create_task(self.setup_reminders())
await ctx.send(
embed=utils.generate_embed(
"Reminder Stored",
f"{date}\n{user_time}\n{text}\nrepeating: {repeating}",
)
)
# This means the insertion of the reminder failed
else:
await ctx.send(
embed=utils.generate_embed(
"Error",
"`This reminder already exists in the database or is not in the future`",
)
)
@add_reminder.error
async def add_reminder_error(self, ctx, error):
"""Called when add_reminder() errors"""
print(error)
if isinstance(error, commands.errors.MissingRequiredArgument):
await ctx.send(
embed=utils.generate_embed(
"Error", f"`{error} Run {prefix}help add_reminder`"
)
)
elif isinstance(error, commands.errors.UserInputError):
await ctx.send(
embed=utils.generate_embed(
"Error", f"`{error} Run {prefix}help add_reminder`"
)
)
elif isinstance(error, commands.errors.CheckFailure):
await ctx.send(
embed=utils.generate_embed(
"Error", "`You do not have permissions for this command`"
)
)
else:
await ctx.send(
embed=utils.generate_embed(
"Error",
f"`An unexpected error has occured, run {prefix}help add_reminder`",
)
)
def setup(bot):
cog = Remind(bot)
bot.add_cog(cog)
asyncio.create_task(cog.setup_reminders())
| [
"utils.split_date",
"datetime.datetime.fromtimestamp",
"database.insert_reminder",
"utils.generate_embed",
"yaml.load",
"utils.split_time",
"discord.ext.commands.check",
"discord.ext.commands.UserInputError",
"asyncio.gather",
"database.get_reminders",
"database.remove_reminder",
"time.time",
"discord.ext.commands.command",
"utils.get_prefix"
] | [((346, 364), 'utils.get_prefix', 'utils.get_prefix', ([], {}), '()\n', (362, 364), False, 'import utils\n'), ((288, 334), 'yaml.load', 'yaml.load', (['conversion_file'], {'Loader': 'yaml.Loader'}), '(conversion_file, Loader=yaml.Loader)\n', (297, 334), False, 'import yaml\n'), ((3895, 4313), 'discord.ext.commands.command', 'commands.command', ([], {'help': '"""Date should be in month/day/year format, either with slashes or dashes (ex. month/day/year or month-day-year)\n\nRepeating is an interval of time after which the reminder should be sent again, must be either daily, weekly, biweekly, or triweekly\n\nText is the text the reminder will be sent with, wrap with quotations if this contains whitespace"""', 'aliases': "['reminder', 'add_r', 'ar']"}), '(help=\n """Date should be in month/day/year format, either with slashes or dashes (ex. month/day/year or month-day-year)\n\nRepeating is an interval of time after which the reminder should be sent again, must be either daily, weekly, biweekly, or triweekly\n\nText is the text the reminder will be sent with, wrap with quotations if this contains whitespace"""\n , aliases=[\'reminder\', \'add_r\', \'ar\'])\n', (3911, 4313), False, 'from discord.ext import commands\n'), ((4332, 4366), 'discord.ext.commands.check', 'commands.check', (['checks.is_operator'], {}), '(checks.is_operator)\n', (4346, 4366), False, 'from discord.ext import commands\n'), ((644, 668), 'database.get_reminders', 'database.get_reminders', ([], {}), '()\n', (666, 668), False, 'import database\n'), ((1797, 1824), 'asyncio.gather', 'asyncio.gather', (['*self.tasks'], {}), '(*self.tasks)\n', (1811, 1824), False, 'import asyncio\n'), ((5041, 5194), 'database.insert_reminder', 'database.insert_reminder', (['ctx.guild.id', 'ctx.channel.id', "_date['year']", "_date['month']", "_date['day']", "_time['hour']", "_time['minute']", 'text', 'repeating'], {}), "(ctx.guild.id, ctx.channel.id, _date['year'], _date\n ['month'], _date['day'], _time['hour'], _time['minute'], text, repeating)\n", (5065, 5194), False, 'import database\n'), ((2801, 2835), 'database.remove_reminder', 'database.remove_reminder', (['reminder'], {}), '(reminder)\n', (2825, 2835), False, 'import database\n'), ((2906, 2940), 'database.remove_reminder', 'database.remove_reminder', (['reminder'], {}), '(reminder)\n', (2930, 2940), False, 'import database\n'), ((3204, 3299), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (["(reminder['date'] + conversion_dict[reminder['repeating']])"], {}), "(reminder['date'] + conversion_dict[reminder\n ['repeating']])\n", (3235, 3299), False, 'import datetime\n'), ((3375, 3409), 'database.remove_reminder', 'database.remove_reminder', (['reminder'], {}), '(reminder)\n', (3399, 3409), False, 'import database\n'), ((3457, 3685), 'database.insert_reminder', 'database.insert_reminder', (["reminder['guild']", "reminder['channel']", 'reminder_date.year', 'reminder_date.month', 'reminder_date.day', 'reminder_date.hour', 'reminder_date.minute', "reminder['reminder_text']", "reminder['repeating']"], {}), "(reminder['guild'], reminder['channel'],\n reminder_date.year, reminder_date.month, reminder_date.day,\n reminder_date.hour, reminder_date.minute, reminder['reminder_text'],\n reminder['repeating'])\n", (3481, 3685), False, 'import database\n'), ((4688, 4710), 'utils.split_date', 'utils.split_date', (['date'], {}), '(date)\n', (4704, 4710), False, 'import utils\n'), ((4731, 4758), 'utils.split_time', 'utils.split_time', (['user_time'], {}), '(user_time)\n', (4747, 4758), False, 'import utils\n'), ((4959, 4984), 'discord.ext.commands.UserInputError', 'commands.UserInputError', ([], {}), '()\n', (4982, 4984), False, 'from discord.ext import commands\n'), ((798, 832), 'database.remove_reminder', 'database.remove_reminder', (['reminder'], {}), '(reminder)\n', (822, 832), False, 'import database\n'), ((2165, 2176), 'time.time', 'time.time', ([], {}), '()\n', (2174, 2176), False, 'import time\n'), ((2181, 2215), 'database.get_reminders', 'database.get_reminders', ([], {}), '(**reminder)\n', (2203, 2215), False, 'import database\n'), ((3077, 3111), 'database.get_reminders', 'database.get_reminders', ([], {}), '(**reminder)\n', (3099, 3111), False, 'import database\n'), ((4811, 4881), 'discord.ext.commands.UserInputError', 'commands.UserInputError', (['"""Date or time was not in the correct format."""'], {}), "('Date or time was not in the correct format.')\n", (4834, 4881), False, 'from discord.ext import commands\n'), ((765, 776), 'time.time', 'time.time', ([], {}), '()\n', (774, 776), False, 'import time\n'), ((2376, 2410), 'database.get_reminders', 'database.get_reminders', ([], {}), '(**reminder)\n', (2398, 2410), False, 'import database\n'), ((2274, 2285), 'time.time', 'time.time', ([], {}), '()\n', (2283, 2285), False, 'import time\n'), ((5501, 5601), 'utils.generate_embed', 'utils.generate_embed', (['"""Reminder Stored"""', 'f"""{date}\n{user_time}\n{text}\nrepeating: {repeating}"""'], {}), '(\'Reminder Stored\',\n f"""{date}\n{user_time}\n{text}\nrepeating: {repeating}""")\n', (5521, 5601), False, 'import utils\n'), ((5792, 5899), 'utils.generate_embed', 'utils.generate_embed', (['"""Error"""', '"""`This reminder already exists in the database or is not in the future`"""'], {}), "('Error',\n '`This reminder already exists in the database or is not in the future`')\n", (5812, 5899), False, 'import utils\n'), ((6236, 6309), 'utils.generate_embed', 'utils.generate_embed', (['"""Error"""', 'f"""`{error} Run {prefix}help add_reminder`"""'], {}), "('Error', f'`{error} Run {prefix}help add_reminder`')\n", (6256, 6309), False, 'import utils\n'), ((6476, 6549), 'utils.generate_embed', 'utils.generate_embed', (['"""Error"""', 'f"""`{error} Run {prefix}help add_reminder`"""'], {}), "('Error', f'`{error} Run {prefix}help add_reminder`')\n", (6496, 6549), False, 'import utils\n'), ((6714, 6793), 'utils.generate_embed', 'utils.generate_embed', (['"""Error"""', '"""`You do not have permissions for this command`"""'], {}), "('Error', '`You do not have permissions for this command`')\n", (6734, 6793), False, 'import utils\n'), ((6910, 7012), 'utils.generate_embed', 'utils.generate_embed', (['"""Error"""', 'f"""`An unexpected error has occured, run {prefix}help add_reminder`"""'], {}), "('Error',\n f'`An unexpected error has occured, run {prefix}help add_reminder`')\n", (6930, 7012), False, 'import utils\n')] |
import setuptools
from toraman.version import __version__
with open('README.md', 'r') as input_file:
long_description = input_file.read()
setuptools.setup(
name='toraman',
version=__version__,
author='<NAME>',
author_email='<EMAIL>',
description='A computer-assisted translation tool package',
keywords = ['CAT', 'computer-assisted translation', 'computer-aided translation', 'translation', 'free-to-use'],
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/csengor/toraman-py',
packages=setuptools.find_packages(),
install_requires=[
'lxml',
'python-levenshtein',
'regex'
],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
],
)
| [
"setuptools.find_packages"
] | [((590, 616), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (614, 616), False, 'import setuptools\n')] |
"""
CTC-like decoder utilitis.
"""
from itertools import groupby
import numpy as np
def ctc_best_path_decode(probs_seq, vocabulary):
"""
Best path decoding, also called argmax decoding or greedy decoding.
Path consisting of the most probable tokens are further post-processed to
remove consecutive repetitions and all blanks.
:param probs_seq: 2-D list of probabilities over the vocabulary for each
character. Each element is a list of float probabilities
for one character.
:type probs_seq: list
:param vocabulary: Vocabulary list.
:type vocabulary: list
:return: Decoding result string.
:rtype: baseline
"""
# dimension verification
for probs in probs_seq:
if not len(probs) == len(vocabulary) + 1:
raise ValueError("probs_seq dimension mismatchedd with vocabulary")
# argmax to get the best index for each time step
max_index_list = list(np.array(probs_seq).argmax(axis=1))
# remove consecutive duplicate indexes
index_list = [index_group[0] for index_group in groupby(max_index_list)]
# remove blank indexes
blank_index = len(vocabulary)
index_list = [index for index in index_list if index != blank_index]
# convert index list to string
return ''.join([vocabulary[index] for index in index_list])
def ctc_decode(probs_seq, vocabulary, method):
"""
CTC-like sequence decoding from a sequence of likelihood probablilites.
:param probs_seq: 2-D list of probabilities over the vocabulary for each
character. Each element is a list of float probabilities
for one character.
:type probs_seq: list
:param vocabulary: Vocabulary list.
:type vocabulary: list
:param method: Decoding method name, with options: "best_path".
:type method: basestring
:return: Decoding result string.
:rtype: baseline
"""
for prob_list in probs_seq:
if not len(prob_list) == len(vocabulary) + 1:
raise ValueError("probs dimension mismatchedd with vocabulary")
if method == "best_path":
return ctc_best_path_decode(probs_seq, vocabulary)
else:
raise ValueError("Decoding method [%s] is not supported.")
| [
"numpy.array",
"itertools.groupby"
] | [((1104, 1127), 'itertools.groupby', 'groupby', (['max_index_list'], {}), '(max_index_list)\n', (1111, 1127), False, 'from itertools import groupby\n'), ((973, 992), 'numpy.array', 'np.array', (['probs_seq'], {}), '(probs_seq)\n', (981, 992), True, 'import numpy as np\n')] |
"""dbt_airflow_factory module."""
from setuptools import find_packages, setup
with open("README.md") as f:
README = f.read()
# Runtime Requirements.
INSTALL_REQUIRES = ["pytimeparse==1.1.8"]
# Dev Requirements
EXTRA_REQUIRE = {
"tests": [
"pytest>=6.2.2, <7.0.0",
"pytest-cov>=2.8.0, <3.0.0",
"tox==3.21.1",
"pre-commit==2.9.3",
"pandas==1.2.5",
"apache-airflow[kubernetes]==2.2.0",
],
"docs": [
"sphinx==4.3.1",
"sphinx-rtd-theme==1.0.0",
"sphinx-click>=3.0,<3.1",
"myst-parser>=0.16, <0.17",
"docutils>=0.17,<0.18",
],
}
setup(
name="dbt-airflow-factory",
version="0.18.0",
description="Library to convert DBT manifest metadata to Airflow tasks",
long_description=README,
long_description_content_type="text/markdown",
license="Apache Software License (Apache 2.0)",
python_requires=">=3",
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
keywords="dbt airflow manifest parser python",
author=u"<NAME>",
author_email="<EMAIL>",
url="https://github.com/getindata/dbt-airflow-factory/",
packages=find_packages(exclude=["ez_setup", "examples", "tests", "docs"]),
include_package_data=True,
zip_safe=False,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRA_REQUIRE,
)
| [
"setuptools.find_packages"
] | [((1274, 1338), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['ez_setup', 'examples', 'tests', 'docs']"}), "(exclude=['ez_setup', 'examples', 'tests', 'docs'])\n", (1287, 1338), False, 'from setuptools import find_packages, setup\n')] |
import numpy as np
import pytest
from karabo_bridge import serialize, deserialize
from .utils import compare_nested_dict
def test_serialize(data, protocol_version):
msg = serialize(data, protocol_version=protocol_version)
assert isinstance(msg, list)
d, m = deserialize(msg)
compare_nested_dict(data, d)
assert m['source1'] == {'timestamp.tid': 9876543210, 'timestamp': 12345678}
assert m['XMPL/DET/MOD0'] == {}
def test_serialize_with_metadata(data, metadata, protocol_version):
msg = serialize(data, metadata, protocol_version=protocol_version)
d, m = deserialize(msg)
compare_nested_dict(metadata, m)
def test_serialize_with_dummy_timestamps(data, protocol_version):
msg = serialize(data, protocol_version=protocol_version,
dummy_timestamps=True)
d, m = deserialize(msg)
assert set(m['XMPL/DET/MOD0']) == {'timestamp', 'timestamp.sec', 'timestamp.frac'}
assert set(m['source1']) == {'timestamp', 'timestamp.tid'}
assert m['source1']['timestamp.tid'] == 9876543210
assert m['source1']['timestamp'] == 12345678
def test_serialize_with_metadata_and_dummy_timestamp(data, metadata, protocol_version):
msg = serialize(data, metadata, protocol_version=protocol_version,
dummy_timestamps=True)
d, m = deserialize(msg)
compare_nested_dict(metadata, m)
def test_wrong_version(data):
with pytest.raises(ValueError):
serialize(data, protocol_version='3.0')
| [
"karabo_bridge.serialize",
"pytest.raises",
"karabo_bridge.deserialize"
] | [((179, 229), 'karabo_bridge.serialize', 'serialize', (['data'], {'protocol_version': 'protocol_version'}), '(data, protocol_version=protocol_version)\n', (188, 229), False, 'from karabo_bridge import serialize, deserialize\n'), ((275, 291), 'karabo_bridge.deserialize', 'deserialize', (['msg'], {}), '(msg)\n', (286, 291), False, 'from karabo_bridge import serialize, deserialize\n'), ((521, 581), 'karabo_bridge.serialize', 'serialize', (['data', 'metadata'], {'protocol_version': 'protocol_version'}), '(data, metadata, protocol_version=protocol_version)\n', (530, 581), False, 'from karabo_bridge import serialize, deserialize\n'), ((594, 610), 'karabo_bridge.deserialize', 'deserialize', (['msg'], {}), '(msg)\n', (605, 610), False, 'from karabo_bridge import serialize, deserialize\n'), ((726, 799), 'karabo_bridge.serialize', 'serialize', (['data'], {'protocol_version': 'protocol_version', 'dummy_timestamps': '(True)'}), '(data, protocol_version=protocol_version, dummy_timestamps=True)\n', (735, 799), False, 'from karabo_bridge import serialize, deserialize\n'), ((832, 848), 'karabo_bridge.deserialize', 'deserialize', (['msg'], {}), '(msg)\n', (843, 848), False, 'from karabo_bridge import serialize, deserialize\n'), ((1203, 1290), 'karabo_bridge.serialize', 'serialize', (['data', 'metadata'], {'protocol_version': 'protocol_version', 'dummy_timestamps': '(True)'}), '(data, metadata, protocol_version=protocol_version,\n dummy_timestamps=True)\n', (1212, 1290), False, 'from karabo_bridge import serialize, deserialize\n'), ((1319, 1335), 'karabo_bridge.deserialize', 'deserialize', (['msg'], {}), '(msg)\n', (1330, 1335), False, 'from karabo_bridge import serialize, deserialize\n'), ((1414, 1439), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1427, 1439), False, 'import pytest\n'), ((1449, 1488), 'karabo_bridge.serialize', 'serialize', (['data'], {'protocol_version': '"""3.0"""'}), "(data, protocol_version='3.0')\n", (1458, 1488), False, 'from karabo_bridge import serialize, deserialize\n')] |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import inspect
from datetime import datetime
import freezegun
import pytest
from sqlalchemy import DateTime, cast
from sqlalchemy.sql.functions import _FunctionGenerator
@pytest.fixture
def monkeypatch_methods(monkeypatch):
"""Monkeypatch all methods from `cls` onto `target`.
This utility lets you easily mock multiple methods in an existing class.
In case of classmethods the binding will not be changed, i.e. `cls` will
keep pointing to the source class and not the target class.
"""
def _monkeypatch_methods(target, cls):
for name, method in inspect.getmembers(cls, inspect.ismethod):
if method.__self__ is None:
# For unbound methods we need to copy the underlying function
method = method.__func__
monkeypatch.setattr(f'{target}.{name}', method)
return _monkeypatch_methods
@pytest.fixture
def freeze_time(monkeypatch):
"""Return a function that freezes the current time.
It affects datetime.now, date.today, etc. and also SQLAlchemy's `func.now()`
which simply returns the current time from `datetime.now()` instead of
retrieving it using the actual `now()` function of PostgreSQL.
"""
freezers = []
orig_call = _FunctionGenerator.__call__
def FunctionGenerator_call(self, *args, **kwargs):
if self._FunctionGenerator__names == ['now']:
return cast(datetime.now().isoformat(), DateTime)
return orig_call(self, *args, **kwargs)
monkeypatch.setattr(_FunctionGenerator, '__call__', FunctionGenerator_call)
def _freeze_time(time_to_freeze):
freezer = freezegun.freeze_time(time_to_freeze)
freezer.start()
freezers.append(freezer)
yield _freeze_time
for freezer in reversed(freezers):
freezer.stop()
| [
"datetime.datetime.now",
"freezegun.freeze_time",
"inspect.getmembers"
] | [((797, 838), 'inspect.getmembers', 'inspect.getmembers', (['cls', 'inspect.ismethod'], {}), '(cls, inspect.ismethod)\n', (815, 838), False, 'import inspect\n'), ((1848, 1885), 'freezegun.freeze_time', 'freezegun.freeze_time', (['time_to_freeze'], {}), '(time_to_freeze)\n', (1869, 1885), False, 'import freezegun\n'), ((1624, 1638), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1636, 1638), False, 'from datetime import datetime\n')] |
#!/usr/bin/env python3
import rospy
from std_msgs.msg import Int32
import time
rospy.init_node('count') # ノード名「count」に設定
pub = rospy.Publisher('count_up', Int32, queue_size=1) # パブリッシャ「count_up」を作成
rate = rospy.Rate(10) # 10Hzで実行
n = 0
time.sleep(2)
while not rospy.is_shutdown():
n += 1
if n % 3 == 0:
print("これは%d" % n)
pub.publish(n)
else:
pub.publish(n)
if n == 227:
print("\nThis is unko\n")
rate.sleep()
| [
"rospy.is_shutdown",
"rospy.init_node",
"time.sleep",
"rospy.Rate",
"rospy.Publisher"
] | [((80, 104), 'rospy.init_node', 'rospy.init_node', (['"""count"""'], {}), "('count')\n", (95, 104), False, 'import rospy\n'), ((159, 207), 'rospy.Publisher', 'rospy.Publisher', (['"""count_up"""', 'Int32'], {'queue_size': '(1)'}), "('count_up', Int32, queue_size=1)\n", (174, 207), False, 'import rospy\n'), ((238, 252), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (248, 252), False, 'import rospy\n'), ((304, 317), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (314, 317), False, 'import time\n'), ((329, 348), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (346, 348), False, 'import rospy\n')] |
import requests
from bs4 import BeautifulSoup
def Check(auth, mirrors):
response = requests.get("https://www.x.org/releases/individual/proto/")
html = response.content.decode("utf-8")
parsedHtml = BeautifulSoup(html, "html.parser")
links = parsedHtml.find_all("a")
maxVersionMajor = -1
maxVersionMinor = -1
maxVersionPatch = -1
maxVersionSubpatch = -1
for link in links:
if ("inputproto-" in link.text and ".tar.gz" in link.text
and ".sig" not in link.text):
version = link.text.split("-")[1].split(".tar.gz")[0].split(".")
versionMajor = int(version[0])
versionMinor = int(version[1])
if len(version) == 3:
versionPatch = int(version[2])
versionSubpatch = 0
elif len(version) == 2:
versionPatch = 0
versionSubpatch = 0
else:
versionPatch = int(version[2])
versionSubpatch = int(version[3])
if versionMajor > maxVersionMajor:
maxVersionMajor = versionMajor
maxVersionMinor = versionMinor
maxVersionPatch = versionPatch
maxVersionSubpatch = versionSubpatch
versionText = link.text.split("-")[1].split(".tar.gz")[0]
elif (versionMajor == maxVersionMajor
and versionMinor > maxVersionMinor):
maxVersionMinor = versionMinor
maxVersionPatch = versionPatch
maxVersionSubpatch = versionSubpatch
versionText = link.text.split("-")[1].split(".tar.gz")[0]
elif (versionMajor == maxVersionMajor
and versionMinor == maxVersionMinor
and versionPatch > maxVersionPatch):
maxVersionPatch = versionPatch
maxVersionSubpatch = versionSubpatch
versionText = link.text.split("-")[1].split(".tar.gz")[0]
elif (versionMajor == maxVersionMajor
and versionMinor == maxVersionMinor
and versionPatch == maxVersionPatch
and versionSubpatch > maxVersionSubpatch):
maxVersionSubpatch = versionSubpatch
versionText = link.text.split("-")[1].split(".tar.gz")[0]
return versionText
| [
"bs4.BeautifulSoup",
"requests.get"
] | [((88, 148), 'requests.get', 'requests.get', (['"""https://www.x.org/releases/individual/proto/"""'], {}), "('https://www.x.org/releases/individual/proto/')\n", (100, 148), False, 'import requests\n'), ((210, 244), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (223, 244), False, 'from bs4 import BeautifulSoup\n')] |
import os
import string
from collections import Counter
from datetime import datetime
from functools import partial
from pathlib import Path
from typing import Optional
import numpy as np
import pandas as pd
from scipy.stats.stats import chisquare
from tangled_up_in_unicode import block, block_abbr, category, category_long, script
from pandas_profiling.config import Settings
from pandas_profiling.model.summary_helpers_image import (
extract_exif,
hash_image,
is_image_truncated,
open_image,
)
def mad(arr: np.ndarray) -> np.ndarray:
"""Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variability of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
return np.median(np.abs(arr - np.median(arr)))
def named_aggregate_summary(series: pd.Series, key: str) -> dict:
summary = {
f"max_{key}": np.max(series),
f"mean_{key}": np.mean(series),
f"median_{key}": np.median(series),
f"min_{key}": np.min(series),
}
return summary
def length_summary(series: pd.Series, summary: dict = None) -> dict:
if summary is None:
summary = {}
length = series.str.len()
summary.update({"length": length})
summary.update(named_aggregate_summary(length, "length"))
return summary
def file_summary(series: pd.Series) -> dict:
"""
Args:
series: series to summarize
Returns:
"""
# Transform
stats = series.map(lambda x: os.stat(x))
def convert_datetime(x: float) -> str:
return datetime.fromtimestamp(x).strftime("%Y-%m-%d %H:%M:%S")
# Transform some more
summary = {
"file_size": stats.map(lambda x: x.st_size),
"file_created_time": stats.map(lambda x: x.st_ctime).map(convert_datetime),
"file_accessed_time": stats.map(lambda x: x.st_atime).map(convert_datetime),
"file_modified_time": stats.map(lambda x: x.st_mtime).map(convert_datetime),
}
return summary
def path_summary(series: pd.Series) -> dict:
"""
Args:
series: series to summarize
Returns:
"""
# TODO: optimize using value counts
summary = {
"common_prefix": os.path.commonprefix(series.values.tolist())
or "No common prefix",
"stem_counts": series.map(lambda x: os.path.splitext(x)[0]).value_counts(),
"suffix_counts": series.map(lambda x: os.path.splitext(x)[1]).value_counts(),
"name_counts": series.map(lambda x: os.path.basename(x)).value_counts(),
"parent_counts": series.map(lambda x: os.path.dirname(x)).value_counts(),
"anchor_counts": series.map(lambda x: os.path.splitdrive(x)[0]).value_counts(),
}
summary["n_stem_unique"] = len(summary["stem_counts"])
summary["n_suffix_unique"] = len(summary["suffix_counts"])
summary["n_name_unique"] = len(summary["name_counts"])
summary["n_parent_unique"] = len(summary["parent_counts"])
summary["n_anchor_unique"] = len(summary["anchor_counts"])
return summary
def url_summary(series: pd.Series) -> dict:
"""
Args:
series: series to summarize
Returns:
"""
summary = {
"scheme_counts": series.map(lambda x: x.scheme).value_counts(),
"netloc_counts": series.map(lambda x: x.netloc).value_counts(),
"path_counts": series.map(lambda x: x.path).value_counts(),
"query_counts": series.map(lambda x: x.query).value_counts(),
"fragment_counts": series.map(lambda x: x.fragment).value_counts(),
}
return summary
def count_duplicate_hashes(image_descriptions: dict) -> int:
"""
Args:
image_descriptions:
Returns:
"""
counts = pd.Series(
[x["hash"] for x in image_descriptions if "hash" in x]
).value_counts()
return counts.sum() - len(counts)
def extract_exif_series(image_exifs: list) -> dict:
"""
Args:
image_exifs:
Returns:
"""
exif_keys = []
exif_values: dict = {}
for image_exif in image_exifs:
# Extract key
exif_keys.extend(list(image_exif.keys()))
# Extract values per key
for exif_key, exif_val in image_exif.items():
if exif_key not in exif_values:
exif_values[exif_key] = []
exif_values[exif_key].append(exif_val)
series = {"exif_keys": pd.Series(exif_keys, dtype=object).value_counts().to_dict()}
for k, v in exif_values.items():
series[k] = pd.Series(v).value_counts()
return series
def extract_image_information(
path: Path, exif: bool = False, hash: bool = False
) -> dict:
"""Extracts all image information per file, as opening files is slow
Args:
path: Path to the image
exif: extract exif information
hash: calculate hash (for duplicate detection)
Returns:
A dict containing image information
"""
information: dict = {}
image = open_image(path)
information["opened"] = image is not None
if image is not None:
information["truncated"] = is_image_truncated(image)
if not information["truncated"]:
information["size"] = image.size
if exif:
information["exif"] = extract_exif(image)
if hash:
information["hash"] = hash_image(image)
return information
def image_summary(series: pd.Series, exif: bool = False, hash: bool = False) -> dict:
"""
Args:
series: series to summarize
exif: extract exif information
hash: calculate hash (for duplicate detection)
Returns:
"""
image_information = series.apply(
partial(extract_image_information, exif=exif, hash=hash)
)
summary = {
"n_truncated": sum(
[1 for x in image_information if "truncated" in x and x["truncated"]]
),
"image_dimensions": pd.Series(
[x["size"] for x in image_information if "size" in x],
name="image_dimensions",
),
}
image_widths = summary["image_dimensions"].map(lambda x: x[0])
summary.update(named_aggregate_summary(image_widths, "width"))
image_heights = summary["image_dimensions"].map(lambda x: x[1])
summary.update(named_aggregate_summary(image_heights, "height"))
image_areas = image_widths * image_heights
summary.update(named_aggregate_summary(image_areas, "area"))
if hash:
summary["n_duplicate_hash"] = count_duplicate_hashes(image_information)
if exif:
exif_series = extract_exif_series(
[x["exif"] for x in image_information if "exif" in x]
)
summary["exif_keys_counts"] = exif_series["exif_keys"]
summary["exif_data"] = exif_series
return summary
def get_character_counts(series: pd.Series) -> Counter:
"""Function to return the character counts
Args:
series: the Series to process
Returns:
A dict with character counts
"""
return Counter(series.str.cat())
def counter_to_series(counter: Counter) -> pd.Series:
if not counter:
return pd.Series([], dtype=object)
counter_as_tuples = counter.most_common()
items, counts = zip(*counter_as_tuples)
return pd.Series(counts, index=items)
def unicode_summary(series: pd.Series) -> dict:
# Unicode Character Summaries (category and script name)
character_counts = get_character_counts(series)
character_counts_series = counter_to_series(character_counts)
char_to_block = {key: block(key) for key in character_counts.keys()}
char_to_category_short = {key: category(key) for key in character_counts.keys()}
char_to_script = {key: script(key) for key in character_counts.keys()}
summary = {
"n_characters": len(character_counts_series),
"character_counts": character_counts_series,
"category_alias_values": {
key: category_long(value) for key, value in char_to_category_short.items()
},
"block_alias_values": {
key: block_abbr(value) for key, value in char_to_block.items()
},
}
# Retrieve original distribution
block_alias_counts: Counter = Counter()
per_block_char_counts: dict = {
k: Counter() for k in summary["block_alias_values"].values()
}
for char, n_char in character_counts.items():
block_name = summary["block_alias_values"][char]
block_alias_counts[block_name] += n_char
per_block_char_counts[block_name][char] = n_char
summary["block_alias_counts"] = counter_to_series(block_alias_counts)
summary["block_alias_char_counts"] = {
k: counter_to_series(v) for k, v in per_block_char_counts.items()
}
script_counts: Counter = Counter()
per_script_char_counts: dict = {k: Counter() for k in char_to_script.values()}
for char, n_char in character_counts.items():
script_name = char_to_script[char]
script_counts[script_name] += n_char
per_script_char_counts[script_name][char] = n_char
summary["script_counts"] = counter_to_series(script_counts)
summary["script_char_counts"] = {
k: counter_to_series(v) for k, v in per_script_char_counts.items()
}
category_alias_counts: Counter = Counter()
per_category_alias_char_counts: dict = {
k: Counter() for k in summary["category_alias_values"].values()
}
for char, n_char in character_counts.items():
category_alias_name = summary["category_alias_values"][char]
category_alias_counts[category_alias_name] += n_char
per_category_alias_char_counts[category_alias_name][char] += n_char
summary["category_alias_counts"] = counter_to_series(category_alias_counts)
summary["category_alias_char_counts"] = {
k: counter_to_series(v) for k, v in per_category_alias_char_counts.items()
}
# Unique counts
summary["n_category"] = len(summary["category_alias_counts"])
summary["n_scripts"] = len(summary["script_counts"])
summary["n_block_alias"] = len(summary["block_alias_counts"])
if len(summary["category_alias_counts"]) > 0:
summary["category_alias_counts"].index = summary[
"category_alias_counts"
].index.str.replace("_", " ")
return summary
def histogram_compute(
config: Settings,
finite_values: np.ndarray,
n_unique: int,
name: str = "histogram",
weights: Optional[np.ndarray] = None,
) -> dict:
stats = {}
bins = config.plot.histogram.bins
bins_arg = "auto" if bins == 0 else min(bins, n_unique)
stats[name] = np.histogram(finite_values, bins=bins_arg, weights=weights)
max_bins = config.plot.histogram.max_bins
if bins_arg == "auto" and len(stats[name][1]) > max_bins:
stats[name] = np.histogram(finite_values, bins=max_bins, weights=None)
return stats
def chi_square(
values: Optional[np.ndarray] = None, histogram: Optional[np.ndarray] = None
) -> dict:
if histogram is None:
histogram, _ = np.histogram(values, bins="auto")
return dict(chisquare(histogram)._asdict())
def word_summary(series: pd.Series) -> dict:
# TODO: preprocess (stopwords)
# TODO: configurable lowercase/punctuation etc.
word_lists = series.str.lower().str.split()
words = word_lists.explode()
words = words.str.strip(string.punctuation)
return {"word_counts": words.value_counts()}
| [
"pandas_profiling.model.summary_helpers_image.open_image",
"numpy.mean",
"numpy.histogram",
"tangled_up_in_unicode.category_long",
"os.path.splitdrive",
"tangled_up_in_unicode.category",
"numpy.max",
"numpy.min",
"tangled_up_in_unicode.block",
"tangled_up_in_unicode.script",
"scipy.stats.stats.chisquare",
"pandas_profiling.model.summary_helpers_image.extract_exif",
"pandas_profiling.model.summary_helpers_image.hash_image",
"os.path.splitext",
"os.path.dirname",
"tangled_up_in_unicode.block_abbr",
"pandas.Series",
"numpy.median",
"datetime.datetime.fromtimestamp",
"collections.Counter",
"functools.partial",
"os.path.basename",
"pandas_profiling.model.summary_helpers_image.is_image_truncated",
"os.stat"
] | [((5143, 5159), 'pandas_profiling.model.summary_helpers_image.open_image', 'open_image', (['path'], {}), '(path)\n', (5153, 5159), False, 'from pandas_profiling.model.summary_helpers_image import extract_exif, hash_image, is_image_truncated, open_image\n'), ((7511, 7541), 'pandas.Series', 'pd.Series', (['counts'], {'index': 'items'}), '(counts, index=items)\n', (7520, 7541), True, 'import pandas as pd\n'), ((8482, 8491), 'collections.Counter', 'Counter', ([], {}), '()\n', (8489, 8491), False, 'from collections import Counter\n'), ((9056, 9065), 'collections.Counter', 'Counter', ([], {}), '()\n', (9063, 9065), False, 'from collections import Counter\n'), ((9578, 9587), 'collections.Counter', 'Counter', ([], {}), '()\n', (9585, 9587), False, 'from collections import Counter\n'), ((10939, 10998), 'numpy.histogram', 'np.histogram', (['finite_values'], {'bins': 'bins_arg', 'weights': 'weights'}), '(finite_values, bins=bins_arg, weights=weights)\n', (10951, 10998), True, 'import numpy as np\n'), ((929, 943), 'numpy.max', 'np.max', (['series'], {}), '(series)\n', (935, 943), True, 'import numpy as np\n'), ((969, 984), 'numpy.mean', 'np.mean', (['series'], {}), '(series)\n', (976, 984), True, 'import numpy as np\n'), ((1012, 1029), 'numpy.median', 'np.median', (['series'], {}), '(series)\n', (1021, 1029), True, 'import numpy as np\n'), ((1054, 1068), 'numpy.min', 'np.min', (['series'], {}), '(series)\n', (1060, 1068), True, 'import numpy as np\n'), ((5270, 5295), 'pandas_profiling.model.summary_helpers_image.is_image_truncated', 'is_image_truncated', (['image'], {}), '(image)\n', (5288, 5295), False, 'from pandas_profiling.model.summary_helpers_image import extract_exif, hash_image, is_image_truncated, open_image\n'), ((5893, 5949), 'functools.partial', 'partial', (['extract_image_information'], {'exif': 'exif', 'hash': 'hash'}), '(extract_image_information, exif=exif, hash=hash)\n', (5900, 5949), False, 'from functools import partial\n'), ((6127, 6221), 'pandas.Series', 'pd.Series', (["[x['size'] for x in image_information if 'size' in x]"], {'name': '"""image_dimensions"""'}), "([x['size'] for x in image_information if 'size' in x], name=\n 'image_dimensions')\n", (6136, 6221), True, 'import pandas as pd\n'), ((7377, 7404), 'pandas.Series', 'pd.Series', (['[]'], {'dtype': 'object'}), '([], dtype=object)\n', (7386, 7404), True, 'import pandas as pd\n'), ((7808, 7818), 'tangled_up_in_unicode.block', 'block', (['key'], {}), '(key)\n', (7813, 7818), False, 'from tangled_up_in_unicode import block, block_abbr, category, category_long, script\n'), ((7891, 7904), 'tangled_up_in_unicode.category', 'category', (['key'], {}), '(key)\n', (7899, 7904), False, 'from tangled_up_in_unicode import block, block_abbr, category, category_long, script\n'), ((7969, 7980), 'tangled_up_in_unicode.script', 'script', (['key'], {}), '(key)\n', (7975, 7980), False, 'from tangled_up_in_unicode import block, block_abbr, category, category_long, script\n'), ((8541, 8550), 'collections.Counter', 'Counter', ([], {}), '()\n', (8548, 8550), False, 'from collections import Counter\n'), ((9106, 9115), 'collections.Counter', 'Counter', ([], {}), '()\n', (9113, 9115), False, 'from collections import Counter\n'), ((9646, 9655), 'collections.Counter', 'Counter', ([], {}), '()\n', (9653, 9655), False, 'from collections import Counter\n'), ((11134, 11190), 'numpy.histogram', 'np.histogram', (['finite_values'], {'bins': 'max_bins', 'weights': 'None'}), '(finite_values, bins=max_bins, weights=None)\n', (11146, 11190), True, 'import numpy as np\n'), ((11376, 11409), 'numpy.histogram', 'np.histogram', (['values'], {'bins': '"""auto"""'}), "(values, bins='auto')\n", (11388, 11409), True, 'import numpy as np\n'), ((1569, 1579), 'os.stat', 'os.stat', (['x'], {}), '(x)\n', (1576, 1579), False, 'import os\n'), ((3853, 3918), 'pandas.Series', 'pd.Series', (["[x['hash'] for x in image_descriptions if 'hash' in x]"], {}), "([x['hash'] for x in image_descriptions if 'hash' in x])\n", (3862, 3918), True, 'import pandas as pd\n'), ((8199, 8219), 'tangled_up_in_unicode.category_long', 'category_long', (['value'], {}), '(value)\n', (8212, 8219), False, 'from tangled_up_in_unicode import block, block_abbr, category, category_long, script\n'), ((8332, 8349), 'tangled_up_in_unicode.block_abbr', 'block_abbr', (['value'], {}), '(value)\n', (8342, 8349), False, 'from tangled_up_in_unicode import block, block_abbr, category, category_long, script\n'), ((801, 815), 'numpy.median', 'np.median', (['arr'], {}), '(arr)\n', (810, 815), True, 'import numpy as np\n'), ((1643, 1668), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['x'], {}), '(x)\n', (1665, 1668), False, 'from datetime import datetime\n'), ((4663, 4675), 'pandas.Series', 'pd.Series', (['v'], {}), '(v)\n', (4672, 4675), True, 'import pandas as pd\n'), ((5445, 5464), 'pandas_profiling.model.summary_helpers_image.extract_exif', 'extract_exif', (['image'], {}), '(image)\n', (5457, 5464), False, 'from pandas_profiling.model.summary_helpers_image import extract_exif, hash_image, is_image_truncated, open_image\n'), ((5526, 5543), 'pandas_profiling.model.summary_helpers_image.hash_image', 'hash_image', (['image'], {}), '(image)\n', (5536, 5543), False, 'from pandas_profiling.model.summary_helpers_image import extract_exif, hash_image, is_image_truncated, open_image\n'), ((11427, 11447), 'scipy.stats.stats.chisquare', 'chisquare', (['histogram'], {}), '(histogram)\n', (11436, 11447), False, 'from scipy.stats.stats import chisquare\n'), ((2599, 2618), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (2615, 2618), False, 'import os\n'), ((2683, 2701), 'os.path.dirname', 'os.path.dirname', (['x'], {}), '(x)\n', (2698, 2701), False, 'import os\n'), ((4541, 4575), 'pandas.Series', 'pd.Series', (['exif_keys'], {'dtype': 'object'}), '(exif_keys, dtype=object)\n', (4550, 4575), True, 'import pandas as pd\n'), ((2427, 2446), 'os.path.splitext', 'os.path.splitext', (['x'], {}), '(x)\n', (2443, 2446), False, 'import os\n'), ((2514, 2533), 'os.path.splitext', 'os.path.splitext', (['x'], {}), '(x)\n', (2530, 2533), False, 'import os\n'), ((2766, 2787), 'os.path.splitdrive', 'os.path.splitdrive', (['x'], {}), '(x)\n', (2784, 2787), False, 'import os\n')] |
# Author: <NAME>
# Copyright (c) 2019, <NAME>
# All rights reserved.
# based on github.com/ClementPinard/SfMLearner-Pytorch
from __future__ import division
import torch
from torch.autograd import Variable
pixel_coords = None
def set_id_grid(depth):
global pixel_coords
b, h, w = depth.size()
i_range = Variable(torch.arange(0, h).view(1, h, 1).expand(1,h,w)).type_as(depth) # [1, H, W]
j_range = Variable(torch.arange(0, w).view(1, 1, w).expand(1,h,w)).type_as(depth) # [1, H, W]
ones = Variable(torch.ones(1,h,w)).type_as(depth)
pixel_coords = torch.stack((j_range, i_range, ones), dim=1) # [1, 3, H, W]
def check_sizes(input, input_name, expected):
condition = [input.ndimension() == len(expected)]
for i,size in enumerate(expected):
if size.isdigit():
condition.append(input.size(i) == int(size))
assert(all(condition)), "wrong size for {}, expected {}, got {}".format(input_name, 'x'.join(expected), list(input.size()))
def pixel2cam(depth, intrinsics_inv):
global pixel_coords
"""Transform coordinates in the pixel frame to the camera frame.
Args:
depth: depth maps -- [B, H, W]
intrinsics_inv: intrinsics_inv matrix for each element of batch -- [B, 3, 3]
Returns:
array of (u,v,1) cam coordinates -- [B, 3, H, W]
"""
b, h, w = depth.size()
if (pixel_coords is None) or pixel_coords.size(2) != h or pixel_coords.size(3) != w:
set_id_grid(depth)
current_pixel_coords = pixel_coords[:,:,:h,:w].expand(b,3,h,w).contiguous().view(b, 3, -1) # [B, 3, H*W]
cam_coords = intrinsics_inv.bmm(current_pixel_coords).view(b, 3, h, w)
return cam_coords * depth.unsqueeze(1)
def cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode):
"""Transform coordinates in the camera frame to the pixel frame.
Args:
cam_coords: pixel coordinates defined in the first camera coordinates system -- [B, 4, H, W]
proj_c2p_rot: rotation matrix of cameras -- [B, 3, 4]
proj_c2p_tr: translation vectors of cameras -- [B, 3, 1]
Returns:
array of [-1,1] coordinates -- [B, 2, H, W]
"""
b, _, h, w = cam_coords.size()
cam_coords_flat = cam_coords.view(b, 3, -1) # [B, 3, H*W]
if proj_c2p_rot is not None:
pcoords = proj_c2p_rot.bmm(cam_coords_flat)
else:
pcoords = cam_coords_flat
if proj_c2p_tr is not None:
pcoords = pcoords + proj_c2p_tr # [B, 3, H*W]
X = pcoords[:, 0]
Y = pcoords[:, 1]
Z = pcoords[:, 2].clamp(min=1e-3)
X_norm = 2*(X / Z)/(w-1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]
Y_norm = 2*(Y / Z)/(h-1) - 1 # Idem [B, H*W]
if padding_mode == 'zeros':
X_mask = ((X_norm > 1)+(X_norm < -1)).detach()
X_norm[X_mask] = 2 # make sure that no point in warped image is a combinaison of im and gray
Y_mask = ((Y_norm > 1)+(Y_norm < -1)).detach()
Y_norm[Y_mask] = 2
pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]
return pixel_coords.view(b,h,w,2)
def euler2mat(angle):
"""Convert euler angles to rotation matrix.
Reference: https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174
Args:
angle: rotation angle along 3 axis (in radians) -- size = [B, 3]
Returns:
Rotation matrix corresponding to the euler angles -- size = [B, 3, 3]
"""
B = angle.size(0)
x, y, z = angle[:,0], angle[:,1], angle[:,2]
cosz = torch.cos(z)
sinz = torch.sin(z)
zeros = z.detach()*0
ones = zeros.detach()+1
zmat = torch.stack([cosz, -sinz, zeros,
sinz, cosz, zeros,
zeros, zeros, ones], dim=1).view(B, 3, 3)
cosy = torch.cos(y)
siny = torch.sin(y)
ymat = torch.stack([cosy, zeros, siny,
zeros, ones, zeros,
-siny, zeros, cosy], dim=1).view(B, 3, 3)
cosx = torch.cos(x)
sinx = torch.sin(x)
xmat = torch.stack([ones, zeros, zeros,
zeros, cosx, -sinx,
zeros, sinx, cosx], dim=1).view(B, 3, 3)
rotMat = xmat.bmm(ymat).bmm(zmat)
return rotMat
def quat2mat(quat):
"""Convert quaternion coefficients to rotation matrix.
Args:
quat: first three coeff of quaternion of rotation. fourht is then computed to have a norm of 1 -- size = [B, 3]
Returns:
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
"""
norm_quat = torch.cat([quat[:,:1].detach()*0 + 1, quat], dim=1)
norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w*x, w*y, w*z
xy, xz, yz = x*y, x*z, y*z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,
2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,
2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)
return rotMat
def pose_vec2mat(vec, rotation_mode='euler'):
"""
Convert 6DoF parameters to transformation matrix.
Args:s
vec: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz -- [B, 6]
Returns:
A transformation matrix -- [B, 3, 4]
"""
translation = vec[:, :3].unsqueeze(-1) # [B, 3, 1]
rot = vec[:,3:]
if rotation_mode == 'euler':
rot_mat = euler2mat(rot) # [B, 3, 3]
elif rotation_mode == 'quat':
rot_mat = quat2mat(rot) # [B, 3, 3]
transform_mat = torch.cat([rot_mat, translation], dim=2) # [B, 3, 4]
return transform_mat
def flow_warp(img, flow, padding_mode='zeros'):
"""
Inverse warp a source image to the target image plane.
Args:
img: the source image (where to sample pixels) -- [B, 3, H, W]
flow: flow map of the target image -- [B, 2, H, W]
Returns:
Source image warped to the target image plane
"""
check_sizes(img, 'img', 'BCHW')
check_sizes(flow, 'flow', 'B2HW')
bs, _, h, w = flow.size()
u = flow[:,0,:,:]
v = flow[:,1,:,:]
grid_x = Variable(torch.arange(0, w).view(1, 1, w).expand(1,h,w), requires_grad=False).type_as(u).expand_as(u) # [bs, H, W]
grid_y = Variable(torch.arange(0, h).view(1, h, 1).expand(1,h,w), requires_grad=False).type_as(v).expand_as(v) # [bs, H, W]
X = grid_x + u
Y = grid_y + v
X = 2*(X/(w-1.0) - 0.5)
Y = 2*(Y/(h-1.0) - 0.5)
grid_tf = torch.stack((X,Y), dim=3)
img_tf = torch.nn.functional.grid_sample(img, grid_tf, padding_mode=padding_mode)
return img_tf
def pose2flow(depth, pose, intrinsics, intrinsics_inv, rotation_mode='euler', padding_mode=None):
"""
Converts pose parameters to rigid optical flow
"""
check_sizes(depth, 'depth', 'BHW')
check_sizes(pose, 'pose', 'B6')
check_sizes(intrinsics, 'intrinsics', 'B33')
check_sizes(intrinsics_inv, 'intrinsics', 'B33')
assert(intrinsics_inv.size() == intrinsics.size())
bs, h, w = depth.size()
grid_x = Variable(torch.arange(0, w).view(1, 1, w).expand(1,h,w), requires_grad=False).type_as(depth).expand_as(depth) # [bs, H, W]
grid_y = Variable(torch.arange(0, h).view(1, h, 1).expand(1,h,w), requires_grad=False).type_as(depth).expand_as(depth) # [bs, H, W]
cam_coords = pixel2cam(depth, intrinsics_inv) # [B,3,H,W]
pose_mat = pose_vec2mat(pose, rotation_mode) # [B,3,4]
# Get projection matrix for tgt camera frame to source pixel frame
proj_cam_to_src_pixel = intrinsics.bmm(pose_mat) # [B, 3, 4]
src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:], padding_mode) # [B,H,W,2]
X = (w-1)*(src_pixel_coords[:,:,:,0]/2.0 + 0.5) - grid_x
Y = (h-1)*(src_pixel_coords[:,:,:,1]/2.0 + 0.5) - grid_y
return torch.stack((X,Y), dim=1)
def flow2oob(flow):
check_sizes(flow, 'flow', 'B2HW')
bs, _, h, w = flow.size()
u = flow[:,0,:,:]
v = flow[:,1,:,:]
grid_x = Variable(torch.arange(0, w).view(1, 1, w).expand(1,h,w), requires_grad=False).type_as(u).expand_as(u) # [bs, H, W]
grid_y = Variable(torch.arange(0, h).view(1, h, 1).expand(1,h,w), requires_grad=False).type_as(v).expand_as(v) # [bs, H, W]
X = grid_x + u
Y = grid_y + v
X = 2*(X/(w-1.0) - 0.5)
Y = 2*(Y/(h-1.0) - 0.5)
oob = (X.abs()>1).add(Y.abs()>1)>0
return oob
def occlusion_mask(grid, depth):
check_sizes(img, 'grid', 'BHW2')
check_sizes(depth, 'depth', 'BHW')
mask = grid
return mask
def inverse_warp(img, depth, pose, intrinsics, intrinsics_inv, rotation_mode='euler', padding_mode='zeros'):
"""
Inverse warp a source image to the target image plane.
Args:
img: the source image (where to sample pixels) -- [B, 3, H, W]
depth: depth map of the target image -- [B, H, W]
pose: 6DoF pose parameters from target to source -- [B, 6]
intrinsics: camera intrinsic matrix -- [B, 3, 3]
intrinsics_inv: inverse of the intrinsic matrix -- [B, 3, 3]
Returns:
Source image warped to the target image plane
"""
check_sizes(img, 'img', 'B3HW')
check_sizes(depth, 'depth', 'BHW')
check_sizes(pose, 'pose', 'B6')
check_sizes(intrinsics, 'intrinsics', 'B33')
check_sizes(intrinsics_inv, 'intrinsics', 'B33')
assert(intrinsics_inv.size() == intrinsics.size())
batch_size, _, img_height, img_width = img.size()
cam_coords = pixel2cam(depth, intrinsics_inv) # [B,3,H,W]
pose_mat = pose_vec2mat(pose, rotation_mode) # [B,3,4]
# Get projection matrix for tgt camera frame to source pixel frame
proj_cam_to_src_pixel = intrinsics.bmm(pose_mat) # [B, 3, 4]
src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:], padding_mode) # [B,H,W,2]
projected_img = torch.nn.functional.grid_sample(img, src_pixel_coords, padding_mode=padding_mode)
return projected_img
| [
"torch.nn.functional.grid_sample",
"torch.stack",
"torch.sin",
"torch.cos",
"torch.arange",
"torch.cat",
"torch.ones"
] | [((576, 620), 'torch.stack', 'torch.stack', (['(j_range, i_range, ones)'], {'dim': '(1)'}), '((j_range, i_range, ones), dim=1)\n', (587, 620), False, 'import torch\n'), ((3008, 3044), 'torch.stack', 'torch.stack', (['[X_norm, Y_norm]'], {'dim': '(2)'}), '([X_norm, Y_norm], dim=2)\n', (3019, 3044), False, 'import torch\n'), ((3525, 3537), 'torch.cos', 'torch.cos', (['z'], {}), '(z)\n', (3534, 3537), False, 'import torch\n'), ((3549, 3561), 'torch.sin', 'torch.sin', (['z'], {}), '(z)\n', (3558, 3561), False, 'import torch\n'), ((3783, 3795), 'torch.cos', 'torch.cos', (['y'], {}), '(y)\n', (3792, 3795), False, 'import torch\n'), ((3807, 3819), 'torch.sin', 'torch.sin', (['y'], {}), '(y)\n', (3816, 3819), False, 'import torch\n'), ((3989, 4001), 'torch.cos', 'torch.cos', (['x'], {}), '(x)\n', (3998, 4001), False, 'import torch\n'), ((4013, 4025), 'torch.sin', 'torch.sin', (['x'], {}), '(x)\n', (4022, 4025), False, 'import torch\n'), ((5683, 5723), 'torch.cat', 'torch.cat', (['[rot_mat, translation]'], {'dim': '(2)'}), '([rot_mat, translation], dim=2)\n', (5692, 5723), False, 'import torch\n'), ((6612, 6638), 'torch.stack', 'torch.stack', (['(X, Y)'], {'dim': '(3)'}), '((X, Y), dim=3)\n', (6623, 6638), False, 'import torch\n'), ((6651, 6723), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['img', 'grid_tf'], {'padding_mode': 'padding_mode'}), '(img, grid_tf, padding_mode=padding_mode)\n', (6682, 6723), False, 'import torch\n'), ((7978, 8004), 'torch.stack', 'torch.stack', (['(X, Y)'], {'dim': '(1)'}), '((X, Y), dim=1)\n', (7989, 8004), False, 'import torch\n'), ((10021, 10107), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['img', 'src_pixel_coords'], {'padding_mode': 'padding_mode'}), '(img, src_pixel_coords, padding_mode=\n padding_mode)\n', (10052, 10107), False, 'import torch\n'), ((3627, 3706), 'torch.stack', 'torch.stack', (['[cosz, -sinz, zeros, sinz, cosz, zeros, zeros, zeros, ones]'], {'dim': '(1)'}), '([cosz, -sinz, zeros, sinz, cosz, zeros, zeros, zeros, ones], dim=1)\n', (3638, 3706), False, 'import torch\n'), ((3832, 3911), 'torch.stack', 'torch.stack', (['[cosy, zeros, siny, zeros, ones, zeros, -siny, zeros, cosy]'], {'dim': '(1)'}), '([cosy, zeros, siny, zeros, ones, zeros, -siny, zeros, cosy], dim=1)\n', (3843, 3911), False, 'import torch\n'), ((4038, 4117), 'torch.stack', 'torch.stack', (['[ones, zeros, zeros, zeros, cosx, -sinx, zeros, sinx, cosx]'], {'dim': '(1)'}), '([ones, zeros, zeros, zeros, cosx, -sinx, zeros, sinx, cosx], dim=1)\n', (4049, 4117), False, 'import torch\n'), ((4923, 5111), 'torch.stack', 'torch.stack', (['[w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + 2 * xy, w2 -\n x2 + y2 - z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 -\n x2 - y2 + z2]'], {'dim': '(1)'}), '([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, 2 * wz + \n 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx, 2 * xz - 2 * wy, 2 * wx + 2 *\n yz, w2 - x2 - y2 + z2], dim=1)\n', (4934, 5111), False, 'import torch\n'), ((522, 541), 'torch.ones', 'torch.ones', (['(1)', 'h', 'w'], {}), '(1, h, w)\n', (532, 541), False, 'import torch\n'), ((327, 345), 'torch.arange', 'torch.arange', (['(0)', 'h'], {}), '(0, h)\n', (339, 345), False, 'import torch\n'), ((426, 444), 'torch.arange', 'torch.arange', (['(0)', 'w'], {}), '(0, w)\n', (438, 444), False, 'import torch\n'), ((6266, 6284), 'torch.arange', 'torch.arange', (['(0)', 'w'], {}), '(0, w)\n', (6278, 6284), False, 'import torch\n'), ((6395, 6413), 'torch.arange', 'torch.arange', (['(0)', 'h'], {}), '(0, h)\n', (6407, 6413), False, 'import torch\n'), ((7194, 7212), 'torch.arange', 'torch.arange', (['(0)', 'w'], {}), '(0, w)\n', (7206, 7212), False, 'import torch\n'), ((7331, 7349), 'torch.arange', 'torch.arange', (['(0)', 'h'], {}), '(0, h)\n', (7343, 7349), False, 'import torch\n'), ((8161, 8179), 'torch.arange', 'torch.arange', (['(0)', 'w'], {}), '(0, w)\n', (8173, 8179), False, 'import torch\n'), ((8290, 8308), 'torch.arange', 'torch.arange', (['(0)', 'h'], {}), '(0, h)\n', (8302, 8308), False, 'import torch\n')] |
import uuid
from invenio_indexer.api import RecordIndexer
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_records_draft.api import RecordContext
from invenio_records_draft.proxies import current_drafts
from invenio_search import RecordsSearch, current_search, current_search_client
from sample.records.config import DraftRecord, PublishedRecord
from tests.helpers import disable_test_authenticated
def test_publish(app, db, schemas, mappings, prepare_es):
with disable_test_authenticated():
with db.session.begin_nested():
draft_uuid = uuid.uuid4()
rec1 = DraftRecord.create({
'id': '1',
'title': 'rec1'
}, id_=draft_uuid)
draft1_pid = PersistentIdentifier.create(
pid_type='drecid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=draft_uuid
)
published_uuid = uuid.uuid4()
published = PublishedRecord.create({
'id': '3',
'title': 'rec1a'
}, id_=published_uuid)
published_pid = PersistentIdentifier.create(
pid_type='recid', pid_value='3', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=published_uuid
)
draft2_uuid = uuid.uuid4()
rec2 = DraftRecord.create({
'id': '2',
'title': 'rec2',
'ref': {'$ref': 'http://localhost/drafts/records/1'},
'ref_pub': {'$ref': 'http://localhost/records/3'}
}, id_=draft2_uuid)
draft2_pid = PersistentIdentifier.create(
pid_type='drecid', pid_value='2', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=draft2_uuid
)
RecordIndexer().index(rec2)
current_search_client.indices.refresh()
current_search_client.indices.flush()
es_draft2 = RecordsSearch(index='draft-records-record-v1.0.0').\
get_record(draft2_pid.object_uuid).execute()
assert len(es_draft2.hits) == 1
current_drafts.publish(RecordContext(record=rec2, record_pid=draft2_pid))
published2_pid = PersistentIdentifier.get(pid_type='recid', pid_value=draft2_pid.pid_value)
pr = PublishedRecord.get_record(published2_pid.object_uuid)
assert pr.dumps() == {
'$schema': 'https://localhost/schemas/records/record-v1.0.0.json',
'id': '2',
'ref': {'$ref': 'http://localhost/records/1'},
'ref_pub': {'$ref': 'http://localhost/records/3'},
'title': 'rec2'
}
current_search_client.indices.refresh()
current_search_client.indices.flush()
es_published2 = RecordsSearch(index='records-record-v1.0.0').\
get_record(published2_pid.object_uuid).execute()
assert len(es_published2.hits) == 1
es_published2 = es_published2.hits[0].to_dict()
es_published2.pop('_created')
es_published2.pop('_updated')
assert es_published2 == {
'$schema': 'https://localhost/schemas/records/record-v1.0.0.json',
'id': '2',
'ref': {'published': '1'},
'ref_pub': {'published': '3'},
'title': 'rec2'}
es_draft2 = RecordsSearch(index='draft-records-record-v1.0.0').\
get_record(draft2_pid.object_uuid).execute()
assert len(es_draft2.hits) == 0
| [
"sample.records.config.PublishedRecord.get_record",
"invenio_indexer.api.RecordIndexer",
"invenio_pidstore.models.PersistentIdentifier.get",
"uuid.uuid4",
"sample.records.config.PublishedRecord.create",
"invenio_pidstore.models.PersistentIdentifier.create",
"sample.records.config.DraftRecord.create",
"invenio_search.RecordsSearch",
"invenio_records_draft.api.RecordContext",
"tests.helpers.disable_test_authenticated",
"invenio_search.current_search_client.indices.refresh",
"invenio_search.current_search_client.indices.flush"
] | [((501, 529), 'tests.helpers.disable_test_authenticated', 'disable_test_authenticated', ([], {}), '()\n', (527, 529), False, 'from tests.helpers import disable_test_authenticated\n'), ((1906, 1945), 'invenio_search.current_search_client.indices.refresh', 'current_search_client.indices.refresh', ([], {}), '()\n', (1943, 1945), False, 'from invenio_search import RecordsSearch, current_search, current_search_client\n'), ((1954, 1991), 'invenio_search.current_search_client.indices.flush', 'current_search_client.indices.flush', ([], {}), '()\n', (1989, 1991), False, 'from invenio_search import RecordsSearch, current_search, current_search_client\n'), ((2272, 2346), 'invenio_pidstore.models.PersistentIdentifier.get', 'PersistentIdentifier.get', ([], {'pid_type': '"""recid"""', 'pid_value': 'draft2_pid.pid_value'}), "(pid_type='recid', pid_value=draft2_pid.pid_value)\n", (2296, 2346), False, 'from invenio_pidstore.models import PersistentIdentifier, PIDStatus\n'), ((2360, 2414), 'sample.records.config.PublishedRecord.get_record', 'PublishedRecord.get_record', (['published2_pid.object_uuid'], {}), '(published2_pid.object_uuid)\n', (2386, 2414), False, 'from sample.records.config import DraftRecord, PublishedRecord\n'), ((2717, 2756), 'invenio_search.current_search_client.indices.refresh', 'current_search_client.indices.refresh', ([], {}), '()\n', (2754, 2756), False, 'from invenio_search import RecordsSearch, current_search, current_search_client\n'), ((2765, 2802), 'invenio_search.current_search_client.indices.flush', 'current_search_client.indices.flush', ([], {}), '()\n', (2800, 2802), False, 'from invenio_search import RecordsSearch, current_search, current_search_client\n'), ((596, 608), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (606, 608), False, 'import uuid\n'), ((629, 693), 'sample.records.config.DraftRecord.create', 'DraftRecord.create', (["{'id': '1', 'title': 'rec1'}"], {'id_': 'draft_uuid'}), "({'id': '1', 'title': 'rec1'}, id_=draft_uuid)\n", (647, 693), False, 'from sample.records.config import DraftRecord, PublishedRecord\n'), ((765, 903), 'invenio_pidstore.models.PersistentIdentifier.create', 'PersistentIdentifier.create', ([], {'pid_type': '"""drecid"""', 'pid_value': '"""1"""', 'status': 'PIDStatus.REGISTERED', 'object_type': '"""rec"""', 'object_uuid': 'draft_uuid'}), "(pid_type='drecid', pid_value='1', status=\n PIDStatus.REGISTERED, object_type='rec', object_uuid=draft_uuid)\n", (792, 903), False, 'from invenio_pidstore.models import PersistentIdentifier, PIDStatus\n'), ((975, 987), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (985, 987), False, 'import uuid\n'), ((1012, 1085), 'sample.records.config.PublishedRecord.create', 'PublishedRecord.create', (["{'id': '3', 'title': 'rec1a'}"], {'id_': 'published_uuid'}), "({'id': '3', 'title': 'rec1a'}, id_=published_uuid)\n", (1034, 1085), False, 'from sample.records.config import DraftRecord, PublishedRecord\n'), ((1160, 1301), 'invenio_pidstore.models.PersistentIdentifier.create', 'PersistentIdentifier.create', ([], {'pid_type': '"""recid"""', 'pid_value': '"""3"""', 'status': 'PIDStatus.REGISTERED', 'object_type': '"""rec"""', 'object_uuid': 'published_uuid'}), "(pid_type='recid', pid_value='3', status=\n PIDStatus.REGISTERED, object_type='rec', object_uuid=published_uuid)\n", (1187, 1301), False, 'from invenio_pidstore.models import PersistentIdentifier, PIDStatus\n'), ((1370, 1382), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1380, 1382), False, 'import uuid\n'), ((1402, 1580), 'sample.records.config.DraftRecord.create', 'DraftRecord.create', (["{'id': '2', 'title': 'rec2', 'ref': {'$ref':\n 'http://localhost/drafts/records/1'}, 'ref_pub': {'$ref':\n 'http://localhost/records/3'}}"], {'id_': 'draft2_uuid'}), "({'id': '2', 'title': 'rec2', 'ref': {'$ref':\n 'http://localhost/drafts/records/1'}, 'ref_pub': {'$ref':\n 'http://localhost/records/3'}}, id_=draft2_uuid)\n", (1420, 1580), False, 'from sample.records.config import DraftRecord, PublishedRecord\n'), ((1676, 1815), 'invenio_pidstore.models.PersistentIdentifier.create', 'PersistentIdentifier.create', ([], {'pid_type': '"""drecid"""', 'pid_value': '"""2"""', 'status': 'PIDStatus.REGISTERED', 'object_type': '"""rec"""', 'object_uuid': 'draft2_uuid'}), "(pid_type='drecid', pid_value='2', status=\n PIDStatus.REGISTERED, object_type='rec', object_uuid=draft2_uuid)\n", (1703, 1815), False, 'from invenio_pidstore.models import PersistentIdentifier, PIDStatus\n'), ((2195, 2244), 'invenio_records_draft.api.RecordContext', 'RecordContext', ([], {'record': 'rec2', 'record_pid': 'draft2_pid'}), '(record=rec2, record_pid=draft2_pid)\n', (2208, 2244), False, 'from invenio_records_draft.api import RecordContext\n'), ((1869, 1884), 'invenio_indexer.api.RecordIndexer', 'RecordIndexer', ([], {}), '()\n', (1882, 1884), False, 'from invenio_indexer.api import RecordIndexer\n'), ((2013, 2063), 'invenio_search.RecordsSearch', 'RecordsSearch', ([], {'index': '"""draft-records-record-v1.0.0"""'}), "(index='draft-records-record-v1.0.0')\n", (2026, 2063), False, 'from invenio_search import RecordsSearch, current_search, current_search_client\n'), ((2828, 2872), 'invenio_search.RecordsSearch', 'RecordsSearch', ([], {'index': '"""records-record-v1.0.0"""'}), "(index='records-record-v1.0.0')\n", (2841, 2872), False, 'from invenio_search import RecordsSearch, current_search, current_search_client\n'), ((3380, 3430), 'invenio_search.RecordsSearch', 'RecordsSearch', ([], {'index': '"""draft-records-record-v1.0.0"""'}), "(index='draft-records-record-v1.0.0')\n", (3393, 3430), False, 'from invenio_search import RecordsSearch, current_search, current_search_client\n')] |
import os
import subprocess
from collections import namedtuple
import gi
gi.require_version("Gst", "1.0")
gi.require_version("Tcam", "0.1")
from gi.repository import Tcam, Gst, GLib, GObject
DeviceInfo = namedtuple("DeviceInfo", "status name identifier connection_type")
CameraProperty = namedtuple("CameraProperty", "status value min max default step type flags category group")
# Disable pylint false positives
# pylint:disable=E0712
class Camera:
""""""
def __init__(self, serial, width, height, framerate, color, liveview):
""" Constructor.
Creates the sink pipeline and the source pipeline.
:param serial: Serial number of the camera to use.
:param width: Width of the video format, e.g. 640, 1920 etc,
:param height: Height of the video format, e.g. 480, 1080
:param framerate: Numerator of the frame rate, e.g. 15, 30, 60 etc
:param color: If True, color is used, else gray scale
:param liveview: If True an own live window is opened.
"""
Gst.init([])
self.height = height
self.width = width
self.sample = None
self.samplelocked = False
self.newsample = False
self.pid = -1
self.__remove_tmp_file()
pixelformat = "BGRx"
if not color:
pixelformat = "GRAY8"
if liveview:
p = 'tcambin serial="%s" name=source ! video/x-raw,format=%s,width=%d,height=%d,framerate=%d/1' % (serial, pixelformat, width, height, framerate,)
p += ' ! tee name=t'
p += ' t. ! queue ! videoconvert ! video/x-raw,format=RGB ,width=%d,height=%d,framerate=%d/1! shmsink socket-path=/tmp/ros_mem' % (width, height, framerate,)
p += ' t. ! queue ! videoconvert ! ximagesink'
else:
p = 'tcambin serial="%s" name=source ! video/x-raw,format=%s,width=%d,height=%d,framerate=%d/1' % (
serial, pixelformat, width, height, framerate,)
p += ' ! videoconvert ! video/x-raw,format=RGB ,width=%d,height=%d,framerate=%d/1! shmsink socket-path=/tmp/ros_mem' % (width, height, framerate,)
print(p)
try:
self.pipeline = Gst.parse_launch(p)
except GLib.Error as error:
raise RuntimeError("Error creating pipeline: {0}".format(error))
self.pipeline.set_state(Gst.State.READY)
if self.pipeline.get_state(10 * Gst.SECOND)[0] != Gst.StateChangeReturn.SUCCESS:
raise RuntimeError("Failed to start video stream.")
# Query a pointer to our source, so we can set properties.
self.source = self.pipeline.get_by_name("source")
# Create gscam_config variable with content
gscam = 'shmsrc socket-path=/tmp/ros_mem ! video/x-raw-rgb, width=%d,height=%d,framerate=%d/1' % (width, height, framerate,)
gscam += ',bpp=24,depth=24,blue_mask=16711680, green_mask=65280, red_mask=255 ! ffmpegcolorspace'
os.environ["GSCAM_CONFIG"] = gscam
def start_pipeline(self):
""" Starts the camera sink pipeline and the rosrun process
:return:
"""
try:
self.pipeline.set_state(Gst.State.PLAYING)
self.pid = subprocess.Popen(["rosrun", "gscam", "gscam"])
except GLib.Error as error:
print("Error starting pipeline: {0}".format(error))
raise
def stop_pipeline(self):
""" Stops the camera pipeline. Should also kill the rosrun process, but is not implemented
:return:
"""
self.pipeline.set_state(Gst.State.PAUSED)
self.pipeline.set_state(Gst.State.READY)
self.pipeline.set_state(Gst.State.NULL)
self.pid.kill()
def list_properties(self):
""" Helper function. List available properties
:return:
"""
for name in self.source.get_tcam_property_names():
print(name)
def get_property(self, property_name):
""" Return the value of the passed property.
Use list_properties for querying names of available properties.
:param property_name: Name of the property, e.g. Gain, Exposure, Gain Auto.
:return: Current value of the property.
"""
try:
return CameraProperty(*self.source.get_tcam_property(property_name))
except GLib.Error as error:
raise RuntimeError("Error get Property {0}: {1}", property_name, format(error))
def set_property(self, property_name, value):
""" Set a property. Use list_properties for querying names of available properties.
:param property_name: Name of the property, e.g. Gain, Exposure, Gain Auto.
:param value: Value to be set.
:return:
"""
try:
self.source.set_tcam_property(property_name, value)
except GLib.Error as error:
raise RuntimeError("Error set Property {0}: {1}", property_name, format(error))
def push_property(self, property_name):
""" Simplify push properties, like Auto Focus one push
:param property_name: Name of the property to be pushed
:return:
"""
try:
self.source.set_tcam_property(property_name, True)
except GLib.Error as error:
raise RuntimeError("Error set Property {0}: {1}", property_name, format(error))
def __remove_tmp_file(self):
""" Delete the memory file used by the pipelines to share memory
:return:
"""
try:
os.remove('/tmp/ros_mem')
except OSError:
pass
| [
"collections.namedtuple",
"gi.repository.Gst.parse_launch",
"gi.repository.Gst.init",
"subprocess.Popen",
"gi.require_version",
"os.remove"
] | [((74, 106), 'gi.require_version', 'gi.require_version', (['"""Gst"""', '"""1.0"""'], {}), "('Gst', '1.0')\n", (92, 106), False, 'import gi\n'), ((107, 140), 'gi.require_version', 'gi.require_version', (['"""Tcam"""', '"""0.1"""'], {}), "('Tcam', '0.1')\n", (125, 140), False, 'import gi\n'), ((207, 273), 'collections.namedtuple', 'namedtuple', (['"""DeviceInfo"""', '"""status name identifier connection_type"""'], {}), "('DeviceInfo', 'status name identifier connection_type')\n", (217, 273), False, 'from collections import namedtuple\n'), ((291, 386), 'collections.namedtuple', 'namedtuple', (['"""CameraProperty"""', '"""status value min max default step type flags category group"""'], {}), "('CameraProperty',\n 'status value min max default step type flags category group')\n", (301, 386), False, 'from collections import namedtuple\n'), ((1041, 1053), 'gi.repository.Gst.init', 'Gst.init', (['[]'], {}), '([])\n', (1049, 1053), False, 'from gi.repository import Tcam, Gst, GLib, GObject\n'), ((2194, 2213), 'gi.repository.Gst.parse_launch', 'Gst.parse_launch', (['p'], {}), '(p)\n', (2210, 2213), False, 'from gi.repository import Tcam, Gst, GLib, GObject\n'), ((3210, 3256), 'subprocess.Popen', 'subprocess.Popen', (["['rosrun', 'gscam', 'gscam']"], {}), "(['rosrun', 'gscam', 'gscam'])\n", (3226, 3256), False, 'import subprocess\n'), ((5513, 5538), 'os.remove', 'os.remove', (['"""/tmp/ros_mem"""'], {}), "('/tmp/ros_mem')\n", (5522, 5538), False, 'import os\n')] |
from sqlalchemy import exc
from sqlalchemy.sql.expression import func
from models import Watchlist, Portfolio, Activity
from app import db
import metric
def buy_stock(ticker, units):
unit_price = metric.get_price(ticker)
total_price = units * unit_price
max_id = db.session.query(func.max(Activity.activity_id)).scalar()
if max_id is None:
old_buying_power = 100000
else:
old_buying_power = Activity.query.filter(Activity.activity_id == max_id).all()[0].buying_power
new_buying_power = old_buying_power - total_price
if new_buying_power > 0:
try:
db.session.add( Activity(ticker=ticker,
units=units, order_type= "b", unit_price=unit_price, total_price=total_price, buying_power=new_buying_power) )
update_portfolio_buy(ticker, units, total_price)
db.session.commit()
return { 'status': True, 'error': None }
except exc.SQLAlchemyError:
return { 'status': False, 'error': 'database error' }
else:
return { 'status': False, 'error': 'Insufficient Funds' }
def sell_stock(ticker, units):
unit_price = metric.get_price(ticker)
row = Portfolio.query.filter(Portfolio.ticker == ticker).all()
if len(row):
available_units = int(row[0].total_units)
units = min(available_units, units) if units >= 1 else int(available_units*units)
total_price = units * unit_price
max_id = db.session.query(func.max(Activity.activity_id)).scalar()
old_buying_power = Activity.query.filter(Activity.activity_id == max_id).all()[0].buying_power
new_buying_power = old_buying_power + total_price
try:
db.session.add( Activity(ticker=ticker,
units=units, order_type= "s", unit_price=unit_price, total_price=total_price, buying_power=new_buying_power) )
update_portfolio_sell(ticker, units, total_price)
db.session.commit()
return { 'status': True, 'amount': units, 'error': None }
except exc.SQLAlchemyError:
return { 'status': False, 'error': 'database error' }
else:
return { 'status': False, 'error': 'No Stock by this name' }
def update_portfolio_buy(ticker, units, total_price):
row = Portfolio.query.filter(Portfolio.ticker == ticker).all()
if len(row):
row[0].total_units = int(row[0].total_units) + units
row[0].total_invested = int(row[0].total_invested) + total_price
else:
db.session.add( Portfolio(ticker=ticker, total_units=units, total_invested=total_price) )
def update_portfolio_sell(ticker, units, total_price):
row = Portfolio.query.filter(Portfolio.ticker == ticker).all()
if len(row):
row[0].total_invested = int(row[0].total_invested) - ((int(row[0].total_invested)/int(row[0].total_units)) * units)
row[0].total_units = int(row[0].total_units) - units
Portfolio.query.filter(Portfolio.total_units == 0).delete()
def get_watchlist():
rows = Watchlist.query.all()
if len(rows):
watchlist = [row.ticker for row in rows]
else:
watchlist = []
return watchlist
def get_portfolio():
rows = Portfolio.query.all()
portfolio = [{'ticker':row.ticker, 'total_units':row.total_units, 'total_invested':row.total_invested} for row in rows]
return portfolio
def is_stock_in_watchlist(ticker):
rows = Watchlist.query.filter(Watchlist.ticker == ticker).all()
return True if len(rows) else False
def add_to_watchlist(ticker):
industry = metric.get_company(ticker)["industry"]
try:
db.session.add( Watchlist(ticker=ticker, industry=industry) )
db.session.commit()
return True
except exc.SQLAlchemyError:
return False
def remove_from_watchlist(ticker):
try:
Watchlist.query.filter(Watchlist.ticker == ticker).delete()
db.session.commit()
return True
except exc.SQLAlchemyError:
return False
| [
"models.Watchlist",
"app.db.session.commit",
"models.Portfolio",
"models.Watchlist.query.all",
"metric.get_company",
"models.Portfolio.query.filter",
"models.Activity",
"models.Portfolio.query.all",
"models.Activity.query.filter",
"models.Watchlist.query.filter",
"sqlalchemy.sql.expression.func.max",
"metric.get_price"
] | [((202, 226), 'metric.get_price', 'metric.get_price', (['ticker'], {}), '(ticker)\n', (218, 226), False, 'import metric\n'), ((1165, 1189), 'metric.get_price', 'metric.get_price', (['ticker'], {}), '(ticker)\n', (1181, 1189), False, 'import metric\n'), ((3039, 3060), 'models.Watchlist.query.all', 'Watchlist.query.all', ([], {}), '()\n', (3058, 3060), False, 'from models import Watchlist, Portfolio, Activity\n'), ((3216, 3237), 'models.Portfolio.query.all', 'Portfolio.query.all', ([], {}), '()\n', (3235, 3237), False, 'from models import Watchlist, Portfolio, Activity\n'), ((3573, 3599), 'metric.get_company', 'metric.get_company', (['ticker'], {}), '(ticker)\n', (3591, 3599), False, 'import metric\n'), ((3699, 3718), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3716, 3718), False, 'from app import db\n'), ((3913, 3932), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3930, 3932), False, 'from app import db\n'), ((865, 884), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (882, 884), False, 'from app import db\n'), ((1200, 1250), 'models.Portfolio.query.filter', 'Portfolio.query.filter', (['(Portfolio.ticker == ticker)'], {}), '(Portfolio.ticker == ticker)\n', (1222, 1250), False, 'from models import Watchlist, Portfolio, Activity\n'), ((1960, 1979), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1977, 1979), False, 'from app import db\n'), ((2296, 2346), 'models.Portfolio.query.filter', 'Portfolio.query.filter', (['(Portfolio.ticker == ticker)'], {}), '(Portfolio.ticker == ticker)\n', (2318, 2346), False, 'from models import Watchlist, Portfolio, Activity\n'), ((2538, 2609), 'models.Portfolio', 'Portfolio', ([], {'ticker': 'ticker', 'total_units': 'units', 'total_invested': 'total_price'}), '(ticker=ticker, total_units=units, total_invested=total_price)\n', (2547, 2609), False, 'from models import Watchlist, Portfolio, Activity\n'), ((2678, 2728), 'models.Portfolio.query.filter', 'Portfolio.query.filter', (['(Portfolio.ticker == ticker)'], {}), '(Portfolio.ticker == ticker)\n', (2700, 2728), False, 'from models import Watchlist, Portfolio, Activity\n'), ((2946, 2996), 'models.Portfolio.query.filter', 'Portfolio.query.filter', (['(Portfolio.total_units == 0)'], {}), '(Portfolio.total_units == 0)\n', (2968, 2996), False, 'from models import Watchlist, Portfolio, Activity\n'), ((3430, 3480), 'models.Watchlist.query.filter', 'Watchlist.query.filter', (['(Watchlist.ticker == ticker)'], {}), '(Watchlist.ticker == ticker)\n', (3452, 3480), False, 'from models import Watchlist, Portfolio, Activity\n'), ((3645, 3688), 'models.Watchlist', 'Watchlist', ([], {'ticker': 'ticker', 'industry': 'industry'}), '(ticker=ticker, industry=industry)\n', (3654, 3688), False, 'from models import Watchlist, Portfolio, Activity\n'), ((294, 324), 'sqlalchemy.sql.expression.func.max', 'func.max', (['Activity.activity_id'], {}), '(Activity.activity_id)\n', (302, 324), False, 'from sqlalchemy.sql.expression import func\n'), ((640, 775), 'models.Activity', 'Activity', ([], {'ticker': 'ticker', 'units': 'units', 'order_type': '"""b"""', 'unit_price': 'unit_price', 'total_price': 'total_price', 'buying_power': 'new_buying_power'}), "(ticker=ticker, units=units, order_type='b', unit_price=unit_price,\n total_price=total_price, buying_power=new_buying_power)\n", (648, 775), False, 'from models import Watchlist, Portfolio, Activity\n'), ((1734, 1869), 'models.Activity', 'Activity', ([], {'ticker': 'ticker', 'units': 'units', 'order_type': '"""s"""', 'unit_price': 'unit_price', 'total_price': 'total_price', 'buying_power': 'new_buying_power'}), "(ticker=ticker, units=units, order_type='s', unit_price=unit_price,\n total_price=total_price, buying_power=new_buying_power)\n", (1742, 1869), False, 'from models import Watchlist, Portfolio, Activity\n'), ((3845, 3895), 'models.Watchlist.query.filter', 'Watchlist.query.filter', (['(Watchlist.ticker == ticker)'], {}), '(Watchlist.ticker == ticker)\n', (3867, 3895), False, 'from models import Watchlist, Portfolio, Activity\n'), ((1490, 1520), 'sqlalchemy.sql.expression.func.max', 'func.max', (['Activity.activity_id'], {}), '(Activity.activity_id)\n', (1498, 1520), False, 'from sqlalchemy.sql.expression import func\n'), ((434, 487), 'models.Activity.query.filter', 'Activity.query.filter', (['(Activity.activity_id == max_id)'], {}), '(Activity.activity_id == max_id)\n', (455, 487), False, 'from models import Watchlist, Portfolio, Activity\n'), ((1558, 1611), 'models.Activity.query.filter', 'Activity.query.filter', (['(Activity.activity_id == max_id)'], {}), '(Activity.activity_id == max_id)\n', (1579, 1611), False, 'from models import Watchlist, Portfolio, Activity\n')] |
import unittest
from pyavl3 import AVLTree
class AVLTreeUpdateTest(unittest.TestCase):
def test_add_one(self):
a = AVLTree()
a.update({1:'a'})
self.assertEqual(len(a), 1)
| [
"pyavl3.AVLTree"
] | [((129, 138), 'pyavl3.AVLTree', 'AVLTree', ([], {}), '()\n', (136, 138), False, 'from pyavl3 import AVLTree\n')] |
#!/usr/bin/env python
import SimpleHTTPServer
import SocketServer
import logging
PORT = 8000
class GetHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write("Hello World ! '{}'".format(self.path))
return
for i in range(4):
Handler = GetHandler
httpd = SocketServer.TCPServer(("", PORT + i), Handler)
httpd.serve_forever() | [
"SocketServer.TCPServer"
] | [((428, 475), 'SocketServer.TCPServer', 'SocketServer.TCPServer', (["('', PORT + i)", 'Handler'], {}), "(('', PORT + i), Handler)\n", (450, 475), False, 'import SocketServer\n')] |
#!/usr/bin/env python3
# Usage:
# $0 -o log.txt products/
#
# concatenates .log files (even those in subdirs or .zip) and combines into a single combined.log
from xdfile.utils import find_files_with_time, open_output, get_args
import boto3
# from boto.s3.connection import S3Connection
import os
def main():
args = get_args('aggregates all .log files')
outf = open_output()
s3 = boto3.resource('s3')
s3path = "logs/"
# bucket = conn.get_bucket(s3path)
bucket = s3.Bucket(os.environ['DOMAIN'])
for obj in sorted(bucket.objects.all(), key=lambda x: x.last_modified):
# last_modified
if s3path in obj.key:
print("Name: %s LastModified:%s" % (obj.key.encode('utf-8'), obj.last_modified))
for fn, contents, dt in sorted(find_files_with_time(*args.inputs, ext=".log"), key=lambda x: x[2]): # earliest first
outf.write_file(fn, contents.decode("utf-8"))
main()
| [
"boto3.resource",
"xdfile.utils.open_output",
"xdfile.utils.get_args",
"xdfile.utils.find_files_with_time"
] | [((326, 363), 'xdfile.utils.get_args', 'get_args', (['"""aggregates all .log files"""'], {}), "('aggregates all .log files')\n", (334, 363), False, 'from xdfile.utils import find_files_with_time, open_output, get_args\n'), ((375, 388), 'xdfile.utils.open_output', 'open_output', ([], {}), '()\n', (386, 388), False, 'from xdfile.utils import find_files_with_time, open_output, get_args\n'), ((399, 419), 'boto3.resource', 'boto3.resource', (['"""s3"""'], {}), "('s3')\n", (413, 419), False, 'import boto3\n'), ((785, 831), 'xdfile.utils.find_files_with_time', 'find_files_with_time', (['*args.inputs'], {'ext': '""".log"""'}), "(*args.inputs, ext='.log')\n", (805, 831), False, 'from xdfile.utils import find_files_with_time, open_output, get_args\n')] |
import torch
import numpy as np
import pickle
torch.manual_seed(17)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(17)
import argparse
import torch.nn as nn
import torch.nn.functional as F
import matplotlib
import os
from rational.torch import Rational, RecurrentRational, RecurrentRationalModule
from torchvision import datasets, transforms
from torch.utils.tensorboard import SummaryWriter
from mnist import VGG, LeNet5, actfvs
from matplotlib import pyplot as plt
font = {'family': 'normal',
'weight': 'bold',
'size': 22}
matplotlib.rc('font', **font)
torch.set_anomaly_enabled(True)
def test(args, model, device, test_loader, epoch):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
acc = 100. * correct / len(test_loader.dataset)
print('\nTest set: Epoch: {}, Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(epoch, test_loss,
correct,
len(test_loader.dataset),
acc))
return acc
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=17, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--dataset', type=str, default='mnist',
help='dataset to use')
parser.add_argument('--arch', type=str, required=True)
parser.add_argument('--init', type=str, default="", choices=["", "xavier", "he"])
args = parser.parse_args()
networks = dict({
"vgg": VGG,
"lenet": LeNet5,
})
network = networks[args.arch]
# activation_function_keys = [x for x in list(actfvs.keys()) if 'pau' in x]
# activation_function_keys = ['pau']
# activation_function_keys = ['recurrent_pau']
activation_function_keys = ['pau', 'recurrent_pau']
optimizer = 'sgd'
epochs = ['final']
for activation_function_key in activation_function_keys:
for epoch in epochs:
print("---" * 42)
print("Starting with dataset: {}, activation function: {}".format(args.dataset, activation_function_key))
print("---" * 42)
load_path = 'examples/runs/mnist/paper_{}_{}_{}{}_seed{}/'.format(args.dataset, args.arch, optimizer,
"_init_{}".format(args.init) if args.init != "" else "",
args.seed) + activation_function_key
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
if args.dataset == 'mnist':
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
lr_scheduler_milestones = [30, 60, 90] # Simple CNN with 3 Conv
# lr_scheduler_milestones = [40, 80] # VGG
elif args.dataset == 'fmnist':
test_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST('../data', train=False, transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
lr_scheduler_milestones = [40, 80]
else:
raise ValueError('dataset error')
model = network(activation_func=activation_function_key).to(device)
model.load_state_dict(torch.load(os.path.join(load_path, 'model_{}.pt'.format(epoch))))
paus = list()
for name, layer in model.named_modules():
if isinstance(layer, Rational):
layer.input_retrieve_mode(max_saves=10)
paus.append(('rational', name, layer))
if isinstance(layer, RecurrentRationalModule):
layer.input_retrieve_mode(max_saves=10)
paus.append(('recurrent_rational', name, layer))
if len(paus) > 0:
os.makedirs(os.path.join(load_path, 'plots'), exist_ok=True)
# dict(model.named_parameters())["features.3.0.bias"][0]
# dict(model.named_parameters())["features.4.2.numerator"][0]
print("Starting model eval")
acc = test(args, model, device, test_loader, epoch)
print("Finished model eval -> Plot")
# fig = plt.figure(1, figsize=(6*len(paus),6))
fig_dicts = []
for i, p in enumerate(paus):
fig = p[2].show(display=False)
print(fig)
fig_dicts.append(fig)
pickle.dump(fig_dicts, open(f'{args.dataset}_{args.arch}_{activation_function_key}_(acc{acc}%).fig.pkl', "wb"))
else:
print("No Rational Activations found. Exit without plotting")
if __name__ == '__main__':
main()
| [
"torch.manual_seed",
"argparse.ArgumentParser",
"torch.nn.functional.nll_loss",
"os.path.join",
"torch.cuda.is_available",
"torch.set_anomaly_enabled",
"matplotlib.rc",
"numpy.random.seed",
"torchvision.transforms.Resize",
"torch.no_grad",
"torchvision.transforms.ToTensor",
"torchvision.transforms.Normalize",
"torch.device"
] | [((47, 68), 'torch.manual_seed', 'torch.manual_seed', (['(17)'], {}), '(17)\n', (64, 68), False, 'import torch\n'), ((150, 168), 'numpy.random.seed', 'np.random.seed', (['(17)'], {}), '(17)\n', (164, 168), True, 'import numpy as np\n'), ((592, 621), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (605, 621), False, 'import matplotlib\n'), ((623, 654), 'torch.set_anomaly_enabled', 'torch.set_anomaly_enabled', (['(True)'], {}), '(True)\n', (648, 654), False, 'import torch\n'), ((1771, 1831), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch MNIST Example"""'}), "(description='PyTorch MNIST Example')\n", (1794, 1831), False, 'import argparse\n'), ((768, 783), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (781, 783), False, 'import torch\n'), ((3773, 3801), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (3790, 3801), False, 'import torch\n'), ((3919, 3944), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (3933, 3944), True, 'import numpy as np\n'), ((3967, 4010), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (3979, 4010), False, 'import torch\n'), ((3735, 3760), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3758, 3760), False, 'import torch\n'), ((946, 989), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {'reduction': '"""sum"""'}), "(output, target, reduction='sum')\n", (956, 989), True, 'import torch.nn.functional as F\n'), ((5948, 5980), 'os.path.join', 'os.path.join', (['load_path', '"""plots"""'], {}), "(load_path, 'plots')\n", (5960, 5980), False, 'import os\n'), ((4305, 4332), 'torchvision.transforms.Resize', 'transforms.Resize', (['(32, 32)'], {}), '((32, 32))\n', (4322, 4332), False, 'from torchvision import datasets, transforms\n'), ((4358, 4379), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4377, 4379), False, 'from torchvision import datasets, transforms\n'), ((4405, 4447), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (4425, 4447), False, 'from torchvision import datasets, transforms\n'), ((4909, 4936), 'torchvision.transforms.Resize', 'transforms.Resize', (['(32, 32)'], {}), '((32, 32))\n', (4926, 4936), False, 'from torchvision import datasets, transforms\n'), ((4962, 4983), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4981, 4983), False, 'from torchvision import datasets, transforms\n'), ((5009, 5051), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (5029, 5051), False, 'from torchvision import datasets, transforms\n')] |
from typing import Union
import torch
import torch.nn as nn
from kornia.color.hsv import rgb_to_hsv, hsv_to_rgb
from kornia.constants import pi
def adjust_saturation_raw(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust color saturation of an image. Expecting input to be in hsv format already.
See :class:`~kornia.color.AdjustSaturation` for details.
"""
if not torch.is_tensor(input):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(saturation_factor, (float, torch.Tensor,)):
raise TypeError(f"The saturation_factor should be a float number or torch.Tensor."
f"Got {type(saturation_factor)}")
if isinstance(saturation_factor, float):
saturation_factor = torch.tensor([saturation_factor])
saturation_factor = saturation_factor.to(input.device).to(input.dtype)
if (saturation_factor < 0).any():
raise ValueError(f"Saturation factor must be non-negative. Got {saturation_factor}")
for _ in input.shape[1:]:
saturation_factor = torch.unsqueeze(saturation_factor, dim=-1)
# unpack the hsv values
h, s, v = torch.chunk(input, chunks=3, dim=-3)
# transform the hue value and appl module
s_out: torch.Tensor = torch.clamp(s * saturation_factor, min=0, max=1)
# pack back back the corrected hue
out: torch.Tensor = torch.cat([h, s_out, v], dim=-3)
return out
def adjust_saturation(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust color saturation of an image.
See :class:`~kornia.color.AdjustSaturation` for details.
"""
# convert the rgb image to hsv
x_hsv: torch.Tensor = rgb_to_hsv(input)
# perform the conversion
x_adjusted: torch.Tensor = adjust_saturation_raw(x_hsv, saturation_factor)
# convert back to rgb
out: torch.Tensor = hsv_to_rgb(x_adjusted)
return out
def adjust_hue_raw(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust hue of an image. Expecting input to be in hsv format already.
See :class:`~kornia.color.AdjustHue` for details.
"""
if not torch.is_tensor(input):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(hue_factor, (float, torch.Tensor)):
raise TypeError(f"The hue_factor should be a float number or torch.Tensor in the range between"
f" [-PI, PI]. Got {type(hue_factor)}")
if isinstance(hue_factor, float):
hue_factor = torch.tensor([hue_factor])
hue_factor = hue_factor.to(input.device).to(input.dtype)
if ((hue_factor < -pi) | (hue_factor > pi)).any():
raise ValueError(f"Hue-factor must be in the range [-PI, PI]. Got {hue_factor}")
for _ in input.shape[1:]:
hue_factor = torch.unsqueeze(hue_factor, dim=-1)
# unpack the hsv values
h, s, v = torch.chunk(input, chunks=3, dim=-3)
# transform the hue value and appl module
divisor: float = 2 * pi.item()
h_out: torch.Tensor = torch.fmod(h + hue_factor, divisor)
# pack back back the corrected hue
out: torch.Tensor = torch.cat([h_out, s, v], dim=-3)
return out
def adjust_hue(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust hue of an image.
See :class:`~kornia.color.AdjustHue` for details.
"""
# convert the rgb image to hsv
x_hsv: torch.Tensor = rgb_to_hsv(input)
# perform the conversion
x_adjusted: torch.Tensor = adjust_hue_raw(x_hsv, hue_factor)
# convert back to rgb
out: torch.Tensor = hsv_to_rgb(x_adjusted)
return out
def adjust_gamma(input: torch.Tensor, gamma: Union[float, torch.Tensor],
gain: Union[float, torch.Tensor] = 1.) -> torch.Tensor:
r"""Perform gamma correction on an image.
See :class:`~kornia.color.AdjustGamma` for details.
"""
if not torch.is_tensor(input):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(gamma, (float, torch.Tensor)):
raise TypeError(f"The gamma should be a positive float or torch.Tensor. Got {type(gamma)}")
if not isinstance(gain, (float, torch.Tensor)):
raise TypeError(f"The gain should be a positive float or torch.Tensor. Got {type(gain)}")
if isinstance(gamma, float):
gamma = torch.tensor([gamma])
if isinstance(gain, float):
gain = torch.tensor([gain])
gamma = gamma.to(input.device).to(input.dtype)
gain = gain.to(input.device).to(input.dtype)
if (gamma < 0.0).any():
raise ValueError(f"Gamma must be non-negative. Got {gamma}")
if (gain < 0.0).any():
raise ValueError(f"Gain must be non-negative. Got {gain}")
for _ in input.shape[1:]:
gamma = torch.unsqueeze(gamma, dim=-1)
gain = torch.unsqueeze(gain, dim=-1)
# Apply the gamma correction
x_adjust: torch.Tensor = gain * torch.pow(input, gamma)
# Truncate between pixel values
out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0)
return out
def adjust_contrast(input: torch.Tensor,
contrast_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust Contrast of an image.
See :class:`~kornia.color.AdjustContrast` for details.
"""
if not torch.is_tensor(input):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(contrast_factor, (float, torch.Tensor,)):
raise TypeError(f"The factor should be either a float or torch.Tensor. "
f"Got {type(contrast_factor)}")
if isinstance(contrast_factor, float):
contrast_factor = torch.tensor([contrast_factor])
contrast_factor = contrast_factor.to(input.device).to(input.dtype)
if (contrast_factor < 0).any():
raise ValueError(f"Contrast factor must be non-negative. Got {contrast_factor}")
for _ in input.shape[1:]:
contrast_factor = torch.unsqueeze(contrast_factor, dim=-1)
# Apply contrast factor to each channel
x_adjust: torch.Tensor = input * contrast_factor
# Truncate between pixel values
out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0)
return out
def adjust_brightness(input: torch.Tensor,
brightness_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust Brightness of an image.
See :class:`~kornia.color.AdjustBrightness` for details.
"""
if not torch.is_tensor(input):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(brightness_factor, (float, torch.Tensor,)):
raise TypeError(f"The factor should be either a float or torch.Tensor. "
f"Got {type(brightness_factor)}")
if isinstance(brightness_factor, float):
brightness_factor = torch.tensor([brightness_factor])
brightness_factor = brightness_factor.to(input.device).to(input.dtype)
for _ in input.shape[1:]:
brightness_factor = torch.unsqueeze(brightness_factor, dim=-1)
# Apply brightness factor to each channel
x_adjust: torch.Tensor = input + brightness_factor
# Truncate between pixel values
out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0)
return out
class AdjustSaturation(nn.Module):
r"""Adjust color saturation of an image.
The input image is expected to be an RGB image in the range of [0, 1].
Args:
input (torch.Tensor): Image/Tensor to be adjusted in the shape of (\*, N).
saturation_factor (float): How much to adjust the saturation. 0 will give a black
and white image, 1 will give the original image while 2 will enhance the saturation
by a factor of 2.
Returns:
torch.Tensor: Adjusted image.
"""
def __init__(self, saturation_factor: Union[float, torch.Tensor]) -> None:
super(AdjustSaturation, self).__init__()
self.saturation_factor: Union[float, torch.Tensor] = saturation_factor
def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
return adjust_saturation(input, self.saturation_factor)
class AdjustHue(nn.Module):
r"""Adjust hue of an image.
The input image is expected to be an RGB image in the range of [0, 1].
Args:
input (torch.Tensor): Image/Tensor to be adjusted in the shape of (\*, N).
hue_factor (float): How much to shift the hue channel. Should be in [-PI, PI]. PI
and -PI give complete reversal of hue channel in HSV space in positive and negative
direction respectively. 0 means no shift. Therefore, both -PI and PI will give an
image with complementary colors while 0 gives the original image.
Returns:
torch.Tensor: Adjusted image.
"""
def __init__(self, hue_factor: Union[float, torch.Tensor]) -> None:
super(AdjustHue, self).__init__()
self.hue_factor: Union[float, torch.Tensor] = hue_factor
def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
return adjust_hue(input, self.hue_factor)
class AdjustGamma(nn.Module):
r"""Perform gamma correction on an image.
The input image is expected to be in the range of [0, 1].
Args:
input (torch.Tensor): Image/Tensor to be adjusted in the shape of (\*, N).
gamma (float): Non negative real number, same as γ\gammaγ in the equation.
gamma larger than 1 make the shadows darker, while gamma smaller than 1 make
dark regions lighter.
gain (float, optional): The constant multiplier. Default 1.
Returns:
torch.Tensor: Adjusted image.
"""
def __init__(self, gamma: Union[float, torch.Tensor], gain: Union[float, torch.Tensor] = 1.) -> None:
super(AdjustGamma, self).__init__()
self.gamma: Union[float, torch.Tensor] = gamma
self.gain: Union[float, torch.Tensor] = gain
def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
return adjust_gamma(input, self.gamma, self.gain)
class AdjustContrast(nn.Module):
r"""Adjust Contrast of an image. This implementation aligns OpenCV, not PIL. Hence,
the output differs from TorchVision.
The input image is expected to be in the range of [0, 1].
Args:
input (torch.Tensor): Image to be adjusted in the shape of (\*, N).
contrast_factor (Union[float, torch.Tensor]): Contrast adjust factor per element
in the batch. 0 generates a compleatly black image, 1 does not modify
the input image while any other non-negative number modify the
brightness by this factor.
Returns:
torch.Tensor: Adjusted image.
"""
def __init__(self, contrast_factor: Union[float, torch.Tensor]) -> None:
super(AdjustContrast, self).__init__()
self.contrast_factor: Union[float, torch.Tensor] = contrast_factor
def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
return adjust_contrast(input, self.contrast_factor)
class AdjustBrightness(nn.Module):
r"""Adjust Brightness of an image. This implementation aligns OpenCV, not PIL. Hence,
the output differs from TorchVision.
The input image is expected to be in the range of [0, 1].
Args:
input (torch.Tensor): Image/Input to be adjusted in the shape of (\*, N).
brightness_factor (Union[float, torch.Tensor]): Brightness adjust factor per element
in the batch. 0 does not modify the input image while any other number modify the
brightness.
Returns:
torch.Tensor: Adjusted image.
"""
def __init__(self, brightness_factor: Union[float, torch.Tensor]) -> None:
super(AdjustBrightness, self).__init__()
self.brightness_factor: Union[float, torch.Tensor] = brightness_factor
def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
return adjust_brightness(input, self.brightness_factor)
| [
"kornia.color.hsv.rgb_to_hsv",
"kornia.color.hsv.hsv_to_rgb",
"torch.unsqueeze",
"kornia.constants.pi.item",
"torch.pow",
"torch.is_tensor",
"torch.tensor",
"torch.chunk",
"torch.fmod",
"torch.clamp",
"torch.cat"
] | [((1212, 1248), 'torch.chunk', 'torch.chunk', (['input'], {'chunks': '(3)', 'dim': '(-3)'}), '(input, chunks=3, dim=-3)\n', (1223, 1248), False, 'import torch\n'), ((1322, 1370), 'torch.clamp', 'torch.clamp', (['(s * saturation_factor)'], {'min': '(0)', 'max': '(1)'}), '(s * saturation_factor, min=0, max=1)\n', (1333, 1370), False, 'import torch\n'), ((1435, 1467), 'torch.cat', 'torch.cat', (['[h, s_out, v]'], {'dim': '(-3)'}), '([h, s_out, v], dim=-3)\n', (1444, 1467), False, 'import torch\n'), ((1770, 1787), 'kornia.color.hsv.rgb_to_hsv', 'rgb_to_hsv', (['input'], {}), '(input)\n', (1780, 1787), False, 'from kornia.color.hsv import rgb_to_hsv, hsv_to_rgb\n'), ((1948, 1970), 'kornia.color.hsv.hsv_to_rgb', 'hsv_to_rgb', (['x_adjusted'], {}), '(x_adjusted)\n', (1958, 1970), False, 'from kornia.color.hsv import rgb_to_hsv, hsv_to_rgb\n'), ((2993, 3029), 'torch.chunk', 'torch.chunk', (['input'], {'chunks': '(3)', 'dim': '(-3)'}), '(input, chunks=3, dim=-3)\n', (3004, 3029), False, 'import torch\n'), ((3138, 3173), 'torch.fmod', 'torch.fmod', (['(h + hue_factor)', 'divisor'], {}), '(h + hue_factor, divisor)\n', (3148, 3173), False, 'import torch\n'), ((3238, 3270), 'torch.cat', 'torch.cat', (['[h_out, s, v]'], {'dim': '(-3)'}), '([h_out, s, v], dim=-3)\n', (3247, 3270), False, 'import torch\n'), ((3539, 3556), 'kornia.color.hsv.rgb_to_hsv', 'rgb_to_hsv', (['input'], {}), '(input)\n', (3549, 3556), False, 'from kornia.color.hsv import rgb_to_hsv, hsv_to_rgb\n'), ((3703, 3725), 'kornia.color.hsv.hsv_to_rgb', 'hsv_to_rgb', (['x_adjusted'], {}), '(x_adjusted)\n', (3713, 3725), False, 'from kornia.color.hsv import rgb_to_hsv, hsv_to_rgb\n'), ((5135, 5166), 'torch.clamp', 'torch.clamp', (['x_adjust', '(0.0)', '(1.0)'], {}), '(x_adjust, 0.0, 1.0)\n', (5146, 5166), False, 'import torch\n'), ((6288, 6319), 'torch.clamp', 'torch.clamp', (['x_adjust', '(0.0)', '(1.0)'], {}), '(x_adjust, 0.0, 1.0)\n', (6299, 6319), False, 'import torch\n'), ((7347, 7378), 'torch.clamp', 'torch.clamp', (['x_adjust', '(0.0)', '(1.0)'], {}), '(x_adjust, 0.0, 1.0)\n', (7358, 7378), False, 'import torch\n'), ((431, 453), 'torch.is_tensor', 'torch.is_tensor', (['input'], {}), '(input)\n', (446, 453), False, 'import torch\n'), ((825, 858), 'torch.tensor', 'torch.tensor', (['[saturation_factor]'], {}), '([saturation_factor])\n', (837, 858), False, 'import torch\n'), ((1126, 1168), 'torch.unsqueeze', 'torch.unsqueeze', (['saturation_factor'], {'dim': '(-1)'}), '(saturation_factor, dim=-1)\n', (1141, 1168), False, 'import torch\n'), ((2238, 2260), 'torch.is_tensor', 'torch.is_tensor', (['input'], {}), '(input)\n', (2253, 2260), False, 'import torch\n'), ((2628, 2654), 'torch.tensor', 'torch.tensor', (['[hue_factor]'], {}), '([hue_factor])\n', (2640, 2654), False, 'import torch\n'), ((2914, 2949), 'torch.unsqueeze', 'torch.unsqueeze', (['hue_factor'], {'dim': '(-1)'}), '(hue_factor, dim=-1)\n', (2929, 2949), False, 'import torch\n'), ((3102, 3111), 'kornia.constants.pi.item', 'pi.item', ([], {}), '()\n', (3109, 3111), False, 'from kornia.constants import pi\n'), ((4013, 4035), 'torch.is_tensor', 'torch.is_tensor', (['input'], {}), '(input)\n', (4028, 4035), False, 'import torch\n'), ((4472, 4493), 'torch.tensor', 'torch.tensor', (['[gamma]'], {}), '([gamma])\n', (4484, 4493), False, 'import torch\n'), ((4542, 4562), 'torch.tensor', 'torch.tensor', (['[gain]'], {}), '([gain])\n', (4554, 4562), False, 'import torch\n'), ((4904, 4934), 'torch.unsqueeze', 'torch.unsqueeze', (['gamma'], {'dim': '(-1)'}), '(gamma, dim=-1)\n', (4919, 4934), False, 'import torch\n'), ((4950, 4979), 'torch.unsqueeze', 'torch.unsqueeze', (['gain'], {'dim': '(-1)'}), '(gain, dim=-1)\n', (4965, 4979), False, 'import torch\n'), ((5050, 5073), 'torch.pow', 'torch.pow', (['input', 'gamma'], {}), '(input, gamma)\n', (5059, 5073), False, 'import torch\n'), ((5425, 5447), 'torch.is_tensor', 'torch.is_tensor', (['input'], {}), '(input)\n', (5440, 5447), False, 'import torch\n'), ((5801, 5832), 'torch.tensor', 'torch.tensor', (['[contrast_factor]'], {}), '([contrast_factor])\n', (5813, 5832), False, 'import torch\n'), ((6088, 6128), 'torch.unsqueeze', 'torch.unsqueeze', (['contrast_factor'], {'dim': '(-1)'}), '(contrast_factor, dim=-1)\n', (6103, 6128), False, 'import torch\n'), ((6588, 6610), 'torch.is_tensor', 'torch.is_tensor', (['input'], {}), '(input)\n', (6603, 6610), False, 'import torch\n'), ((6972, 7005), 'torch.tensor', 'torch.tensor', (['[brightness_factor]'], {}), '([brightness_factor])\n', (6984, 7005), False, 'import torch\n'), ((7141, 7183), 'torch.unsqueeze', 'torch.unsqueeze', (['brightness_factor'], {'dim': '(-1)'}), '(brightness_factor, dim=-1)\n', (7156, 7183), False, 'import torch\n')] |
from data.data_loader_dad import (
NASA_Anomaly,
WADI
)
from exp.exp_basic import Exp_Basic
from models.model import Informer
from utils.tools import EarlyStopping, adjust_learning_rate
from utils.metrics import metric
from sklearn.metrics import classification_report
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from torch.utils.data import DataLoader
import os
import time
import warnings
warnings.filterwarnings('ignore')
class Exp_Informer_DAD(Exp_Basic):
def __init__(self, args):
super(Exp_Informer_DAD, self).__init__(args)
def _build_model(self):
model_dict = {
'informer':Informer,
}
if self.args.model=='informer':
model = model_dict[self.args.model](
self.args.enc_in,
self.args.dec_in,
self.args.c_out,
self.args.seq_len,
self.args.label_len,
self.args.pred_len,
self.args.factor,
self.args.d_model,
self.args.n_heads,
self.args.e_layers,
self.args.d_layers,
self.args.d_ff,
self.args.dropout,
self.args.attn,
self.args.embed,
self.args.data[:-1],
self.args.activation,
self.device
)
return model.double()
def _get_data(self, flag):
args = self.args
data_dict = {
'SMAP':NASA_Anomaly,
'MSL':NASA_Anomaly,
'WADI':WADI,
}
Data = data_dict[self.args.data]
if flag == 'test':
shuffle_flag = False; drop_last = True; batch_size = args.batch_size
else:
shuffle_flag = True; drop_last = True; batch_size = args.batch_size
data_set = Data(
root_path=args.root_path,
data_path=args.data_path,
flag=flag,
size=[args.seq_len, args.label_len, args.pred_len],
features=args.features,
target=args.target
)
print(flag, len(data_set))
data_loader = DataLoader(
data_set,
batch_size=batch_size,
shuffle=shuffle_flag,
num_workers=args.num_workers,
drop_last=drop_last)
return data_set, data_loader
def _select_optimizer(self):
model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
return model_optim
def _select_criterion(self):
criterion = nn.MSELoss()
return criterion
def vali(self, vali_data, vali_loader, criterion):
self.model.eval()
total_loss = []
for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(vali_loader):
batch_x = batch_x.double().to(self.device)
batch_y = batch_y.double()
batch_x_mark = batch_x_mark.double().to(self.device)
batch_y_mark = batch_y_mark.double().to(self.device)
# decoder input
dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double()
dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device)
# encoder - decoder
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device)
pred = outputs.detach().cpu()
true = batch_y.detach().cpu()
loss = criterion(pred, true)
total_loss.append(loss)
total_loss = np.average(total_loss)
self.model.train()
return total_loss
def train(self, setting):
train_data, train_loader = self._get_data(flag = 'train')
vali_data, vali_loader = self._get_data(flag = 'val')
test_data, test_loader = self._get_data(flag = 'test')
path = './checkpoints/'+setting
if not os.path.exists(path):
os.makedirs(path)
time_now = time.time()
train_steps = len(train_loader)
early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)
model_optim = self._select_optimizer()
criterion = self._select_criterion()
for epoch in range(self.args.train_epochs):
iter_count = 0
train_loss = []
self.model.train()
for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(train_loader):
iter_count += 1
model_optim.zero_grad()
batch_x = batch_x.double().to(self.device)
batch_y = batch_y.double()
batch_x_mark = batch_x_mark.double().to(self.device)
batch_y_mark = batch_y_mark.double().to(self.device)
# decoder input
dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double()
dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device)
# encoder - decoder
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device)
loss = criterion(outputs, batch_y)
train_loss.append(loss.item())
if (i+1) % 100==0:
print("\titers: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item()))
speed = (time.time()-time_now)/iter_count
left_time = speed*((self.args.train_epochs - epoch)*train_steps - i)
print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))
iter_count = 0
time_now = time.time()
loss.backward()
model_optim.step()
train_loss = np.average(train_loss)
vali_loss = self.vali(vali_data, vali_loader, criterion)
test_loss = self.vali(test_data, test_loader, criterion)
print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}".format(
epoch + 1, train_steps, train_loss, vali_loss, test_loss))
early_stopping(vali_loss, self.model, path)
if early_stopping.early_stop:
print("Early stopping")
break
adjust_learning_rate(model_optim, epoch+1, self.args)
best_model_path = path+'/'+'checkpoint.pth'
self.model.load_state_dict(torch.load(best_model_path))
return self.model
def test(self, setting):
test_data, test_loader = self._get_data(flag='test')
self.model.eval()
preds = []
trues = []
labels = []
with torch.no_grad():
for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(test_loader):
batch_x = batch_x.double().to(self.device)
batch_y = batch_y.double()
batch_x_mark = batch_x_mark.double().to(self.device)
batch_y_mark = batch_y_mark.double().to(self.device)
# decoder input
dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double()
dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device)
# encoder - decoder
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device)
pred = outputs.detach().cpu().numpy()#.squeeze()
true = batch_y.detach().cpu().numpy()#.squeeze()
batch_label = batch_label.long().detach().numpy()
preds.append(pred)
trues.append(true)
labels.append(batch_label)
preds = np.array(preds)
trues = np.array(trues)
labels = np.array(labels)
print('test shape:', preds.shape, trues.shape)
preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])
labels = labels.reshape(-1, labels.shape[-1])
print('test shape:', preds.shape, trues.shape)
# result save
folder_path = './results/' + setting +'/'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
mae, mse, rmse, mape, mspe = metric(preds, trues)
print('mse:{}, mae:{}'.format(mse, mae))
np.save(folder_path+'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))
np.save(folder_path+'pred.npy', preds)
np.save(folder_path+'true.npy', trues)
np.save(folder_path+'label.npy', labels)
return | [
"os.path.exists",
"os.makedirs",
"numpy.average",
"torch.load",
"torch.nn.MSELoss",
"utils.tools.EarlyStopping",
"numpy.array",
"utils.tools.adjust_learning_rate",
"torch.cat",
"torch.utils.data.DataLoader",
"utils.metrics.metric",
"torch.no_grad",
"torch.zeros_like",
"time.time",
"warnings.filterwarnings",
"numpy.save"
] | [((438, 471), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (461, 471), False, 'import warnings\n'), ((2215, 2335), 'torch.utils.data.DataLoader', 'DataLoader', (['data_set'], {'batch_size': 'batch_size', 'shuffle': 'shuffle_flag', 'num_workers': 'args.num_workers', 'drop_last': 'drop_last'}), '(data_set, batch_size=batch_size, shuffle=shuffle_flag,\n num_workers=args.num_workers, drop_last=drop_last)\n', (2225, 2335), False, 'from torch.utils.data import DataLoader\n'), ((2637, 2649), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2647, 2649), True, 'import torch.nn as nn\n'), ((3705, 3727), 'numpy.average', 'np.average', (['total_loss'], {}), '(total_loss)\n', (3715, 3727), True, 'import numpy as np\n'), ((4139, 4150), 'time.time', 'time.time', ([], {}), '()\n', (4148, 4150), False, 'import time\n'), ((4225, 4281), 'utils.tools.EarlyStopping', 'EarlyStopping', ([], {'patience': 'self.args.patience', 'verbose': '(True)'}), '(patience=self.args.patience, verbose=True)\n', (4238, 4281), False, 'from utils.tools import EarlyStopping, adjust_learning_rate\n'), ((8199, 8214), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (8207, 8214), True, 'import numpy as np\n'), ((8231, 8246), 'numpy.array', 'np.array', (['trues'], {}), '(trues)\n', (8239, 8246), True, 'import numpy as np\n'), ((8264, 8280), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (8272, 8280), True, 'import numpy as np\n'), ((8773, 8793), 'utils.metrics.metric', 'metric', (['preds', 'trues'], {}), '(preds, trues)\n', (8779, 8793), False, 'from utils.metrics import metric\n'), ((8935, 8975), 'numpy.save', 'np.save', (["(folder_path + 'pred.npy')", 'preds'], {}), "(folder_path + 'pred.npy', preds)\n", (8942, 8975), True, 'import numpy as np\n'), ((8982, 9022), 'numpy.save', 'np.save', (["(folder_path + 'true.npy')", 'trues'], {}), "(folder_path + 'true.npy', trues)\n", (8989, 9022), True, 'import numpy as np\n'), ((9029, 9071), 'numpy.save', 'np.save', (["(folder_path + 'label.npy')", 'labels'], {}), "(folder_path + 'label.npy', labels)\n", (9036, 9071), True, 'import numpy as np\n'), ((4067, 4087), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4081, 4087), False, 'import os\n'), ((4101, 4118), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (4112, 4118), False, 'import os\n'), ((6113, 6135), 'numpy.average', 'np.average', (['train_loss'], {}), '(train_loss)\n', (6123, 6135), True, 'import numpy as np\n'), ((6634, 6689), 'utils.tools.adjust_learning_rate', 'adjust_learning_rate', (['model_optim', '(epoch + 1)', 'self.args'], {}), '(model_optim, epoch + 1, self.args)\n', (6654, 6689), False, 'from utils.tools import EarlyStopping, adjust_learning_rate\n'), ((6788, 6815), 'torch.load', 'torch.load', (['best_model_path'], {}), '(best_model_path)\n', (6798, 6815), False, 'import torch\n'), ((7067, 7082), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7080, 7082), False, 'import torch\n'), ((8669, 8696), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (8683, 8696), False, 'import os\n'), ((8710, 8734), 'os.makedirs', 'os.makedirs', (['folder_path'], {}), '(folder_path)\n', (8721, 8734), False, 'import os\n'), ((8887, 8925), 'numpy.array', 'np.array', (['[mae, mse, rmse, mape, mspe]'], {}), '([mae, mse, rmse, mape, mspe])\n', (8895, 8925), True, 'import numpy as np\n'), ((3156, 3209), 'torch.zeros_like', 'torch.zeros_like', (['batch_y[:, -self.args.pred_len:, :]'], {}), '(batch_y[:, -self.args.pred_len:, :])\n', (3172, 3209), False, 'import torch\n'), ((5991, 6002), 'time.time', 'time.time', ([], {}), '()\n', (6000, 6002), False, 'import time\n'), ((5049, 5102), 'torch.zeros_like', 'torch.zeros_like', (['batch_y[:, -self.args.pred_len:, :]'], {}), '(batch_y[:, -self.args.pred_len:, :])\n', (5065, 5102), False, 'import torch\n'), ((7485, 7538), 'torch.zeros_like', 'torch.zeros_like', (['batch_y[:, -self.args.pred_len:, :]'], {}), '(batch_y[:, -self.args.pred_len:, :])\n', (7501, 7538), False, 'import torch\n'), ((3239, 3303), 'torch.cat', 'torch.cat', (['[batch_y[:, :self.args.label_len, :], dec_inp]'], {'dim': '(1)'}), '([batch_y[:, :self.args.label_len, :], dec_inp], dim=1)\n', (3248, 3303), False, 'import torch\n'), ((5707, 5718), 'time.time', 'time.time', ([], {}), '()\n', (5716, 5718), False, 'import time\n'), ((5136, 5200), 'torch.cat', 'torch.cat', (['[batch_y[:, :self.args.label_len, :], dec_inp]'], {'dim': '(1)'}), '([batch_y[:, :self.args.label_len, :], dec_inp], dim=1)\n', (5145, 5200), False, 'import torch\n'), ((7572, 7636), 'torch.cat', 'torch.cat', (['[batch_y[:, :self.args.label_len, :], dec_inp]'], {'dim': '(1)'}), '([batch_y[:, :self.args.label_len, :], dec_inp], dim=1)\n', (7581, 7636), False, 'import torch\n')] |
from django.db import migrations, models
from django.conf import settings
from opaque_keys.edx.django.models import CourseKeyField
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CohortMembership',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('course_id', CourseKeyField(max_length=255)),
],
),
migrations.CreateModel(
name='CourseCohort',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('assignment_type', models.CharField(default='manual', max_length=20, choices=[('random', 'Random'), ('manual', 'Manual')])),
],
),
migrations.CreateModel(
name='CourseCohortsSettings',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_cohorted', models.BooleanField(default=False)),
('course_id', CourseKeyField(help_text='Which course are these settings associated with?', unique=True, max_length=255, db_index=True)),
('_cohorted_discussions', models.TextField(null=True, db_column='cohorted_discussions', blank=True)),
('always_cohort_inline_discussions', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='CourseUserGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='What is the name of this group? Must be unique within a course.', max_length=255)),
('course_id', CourseKeyField(help_text='Which course is this group associated with?', max_length=255, db_index=True)),
('group_type', models.CharField(max_length=20, choices=[('cohort', 'Cohort')])),
('users', models.ManyToManyField(help_text='Who is in this group?', related_name='course_groups', to=settings.AUTH_USER_MODEL, db_index=True)),
],
),
migrations.CreateModel(
name='CourseUserGroupPartitionGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('partition_id', models.IntegerField(help_text='contains the id of a cohorted partition in this course')),
('group_id', models.IntegerField(help_text='contains the id of a specific group within the cohorted partition')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('course_user_group', models.OneToOneField(to='course_groups.CourseUserGroup', on_delete=models.CASCADE)),
],
),
migrations.AddField(
model_name='coursecohort',
name='course_user_group',
field=models.OneToOneField(related_name='cohort', to='course_groups.CourseUserGroup', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='cohortmembership',
name='course_user_group',
field=models.ForeignKey(to='course_groups.CourseUserGroup', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='cohortmembership',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),
),
migrations.AlterUniqueTogether(
name='courseusergroup',
unique_together={('name', 'course_id')},
),
migrations.AlterUniqueTogether(
name='cohortmembership',
unique_together={('user', 'course_id')},
),
]
| [
"django.db.migrations.AlterUniqueTogether",
"django.db.models.OneToOneField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.ManyToManyField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"opaque_keys.edx.django.models.CourseKeyField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((202, 259), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (233, 259), False, 'from django.db import migrations, models\n'), ((3807, 3907), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""courseusergroup"""', 'unique_together': "{('name', 'course_id')}"}), "(name='courseusergroup', unique_together={(\n 'name', 'course_id')})\n", (3837, 3907), False, 'from django.db import migrations, models\n'), ((3947, 4048), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""cohortmembership"""', 'unique_together': "{('user', 'course_id')}"}), "(name='cohortmembership', unique_together={(\n 'user', 'course_id')})\n", (3977, 4048), False, 'from django.db import migrations, models\n'), ((3261, 3371), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'related_name': '"""cohort"""', 'to': '"""course_groups.CourseUserGroup"""', 'on_delete': 'models.CASCADE'}), "(related_name='cohort', to=\n 'course_groups.CourseUserGroup', on_delete=models.CASCADE)\n", (3281, 3371), False, 'from django.db import migrations, models\n'), ((3507, 3586), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': '"""course_groups.CourseUserGroup"""', 'on_delete': 'models.CASCADE'}), "(to='course_groups.CourseUserGroup', on_delete=models.CASCADE)\n", (3524, 3586), False, 'from django.db import migrations, models\n'), ((3714, 3786), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': 'settings.AUTH_USER_MODEL', 'on_delete': 'models.CASCADE'}), '(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n', (3731, 3786), False, 'from django.db import migrations, models\n'), ((400, 493), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (416, 493), False, 'from django.db import migrations, models\n'), ((522, 552), 'opaque_keys.edx.django.models.CourseKeyField', 'CourseKeyField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (536, 552), False, 'from opaque_keys.edx.django.models import CourseKeyField\n'), ((690, 783), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (706, 783), False, 'from django.db import migrations, models\n'), ((818, 925), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""manual"""', 'max_length': '(20)', 'choices': "[('random', 'Random'), ('manual', 'Manual')]"}), "(default='manual', max_length=20, choices=[('random',\n 'Random'), ('manual', 'Manual')])\n", (834, 925), False, 'from django.db import migrations, models\n'), ((1068, 1161), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (1084, 1161), False, 'from django.db import migrations, models\n'), ((1192, 1226), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1211, 1226), False, 'from django.db import migrations, models\n'), ((1259, 1383), 'opaque_keys.edx.django.models.CourseKeyField', 'CourseKeyField', ([], {'help_text': '"""Which course are these settings associated with?"""', 'unique': '(True)', 'max_length': '(255)', 'db_index': '(True)'}), "(help_text='Which course are these settings associated with?',\n unique=True, max_length=255, db_index=True)\n", (1273, 1383), False, 'from opaque_keys.edx.django.models import CourseKeyField\n'), ((1424, 1497), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'db_column': '"""cohorted_discussions"""', 'blank': '(True)'}), "(null=True, db_column='cohorted_discussions', blank=True)\n", (1440, 1497), False, 'from django.db import migrations, models\n'), ((1553, 1586), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1572, 1586), False, 'from django.db import migrations, models\n'), ((1727, 1820), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (1743, 1820), False, 'from django.db import migrations, models\n'), ((1844, 1963), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""What is the name of this group? Must be unique within a course."""', 'max_length': '(255)'}), "(help_text=\n 'What is the name of this group? Must be unique within a course.',\n max_length=255)\n", (1860, 1963), False, 'from django.db import migrations, models\n'), ((1987, 2093), 'opaque_keys.edx.django.models.CourseKeyField', 'CourseKeyField', ([], {'help_text': '"""Which course is this group associated with?"""', 'max_length': '(255)', 'db_index': '(True)'}), "(help_text='Which course is this group associated with?',\n max_length=255, db_index=True)\n", (2001, 2093), False, 'from opaque_keys.edx.django.models import CourseKeyField\n'), ((2123, 2186), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'choices': "[('cohort', 'Cohort')]"}), "(max_length=20, choices=[('cohort', 'Cohort')])\n", (2139, 2186), False, 'from django.db import migrations, models\n'), ((2215, 2351), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'help_text': '"""Who is in this group?"""', 'related_name': '"""course_groups"""', 'to': 'settings.AUTH_USER_MODEL', 'db_index': '(True)'}), "(help_text='Who is in this group?', related_name=\n 'course_groups', to=settings.AUTH_USER_MODEL, db_index=True)\n", (2237, 2351), False, 'from django.db import migrations, models\n'), ((2501, 2594), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (2517, 2594), False, 'from django.db import migrations, models\n'), ((2626, 2718), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'help_text': '"""contains the id of a cohorted partition in this course"""'}), "(help_text=\n 'contains the id of a cohorted partition in this course')\n", (2645, 2718), False, 'from django.db import migrations, models\n'), ((2745, 2848), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'help_text': '"""contains the id of a specific group within the cohorted partition"""'}), "(help_text=\n 'contains the id of a specific group within the cohorted partition')\n", (2764, 2848), False, 'from django.db import migrations, models\n'), ((2877, 2916), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2897, 2916), False, 'from django.db import migrations, models\n'), ((2950, 2985), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (2970, 2985), False, 'from django.db import migrations, models\n'), ((3026, 3113), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'to': '"""course_groups.CourseUserGroup"""', 'on_delete': 'models.CASCADE'}), "(to='course_groups.CourseUserGroup', on_delete=models.\n CASCADE)\n", (3046, 3113), False, 'from django.db import migrations, models\n')] |
from cloud.permission import Permission, NeedPermission
from cloud.message import error
# Define the input output format of the function.
# This information is used when creating the *SDK*.
info = {
'input_format': {
'session_id': 'str',
'field': 'str',
'value?': 'str',
},
'output_format': {
'user_id?': 'str',
},
'description': 'Set my information'
}
@NeedPermission(Permission.Run.Auth.set_me)
def do(data, resource):
body = {}
params = data['params']
user = data['user']
user_id = user['id']
field = params.get('field')
value = params.get('value', None)
user = resource.db_get_item(user_id)
# For security
if field in ['id', 'email', 'password_hash', 'salt', 'groups', 'login_method']:
body['error'] = error.FORBIDDEN_MODIFICATION
return body
else:
user[field] = value
resource.db_update_item(user_id, user)
body['user_id'] = user_id
return body
| [
"cloud.permission.NeedPermission"
] | [((410, 452), 'cloud.permission.NeedPermission', 'NeedPermission', (['Permission.Run.Auth.set_me'], {}), '(Permission.Run.Auth.set_me)\n', (424, 452), False, 'from cloud.permission import Permission, NeedPermission\n')] |
import names
import os
import datetime
from random import random
def generate_gedcom_file():
"""generate some gedcom file"""
db = {}
db['n_individuals'] = 0
db['max_individuals'] = 8000
db['n_families'] = 0
db['yougest'] = None
gedcom_content = """
0 HEAD
1 SOUR Gramps
2 VERS 3.3.0
2 NAME Gramps
1 DATE {}
2 TIME 15:35:24
1 SUBM @SUBM@
1 COPR Copyright (c) 2020 <NAME>,,,.
1 GEDC
2 VERS 5.5
1 CHAR UTF-8
1 LANG German
""".format(datetime.date.today())
def generate_individual(db, birth_year, sex=None, last_name=None):
if not sex:
sex = 'F' if random() < 0.5 else 'M'
first_name = names.get_first_name(
gender='male' if sex == 'M' else 'female')
if random() < 0.3:
first_name += ' ' + \
names.get_first_name(gender='male' if sex == 'M' else 'female')
if not last_name:
last_name = names.get_last_name()
birth_place = 'Paris' if random() < 0.5 else 'Rome'
death_place = 'Zorge' if random() < 0.5 else 'Bruegge'
db['n_individuals'] += 1
individual_id = '@I{}@'.format(db["n_individuals"])
death_year = birth_year + 40 + int(random()*20)
db[individual_id] = {
'birth': birth_year,
'death': death_year,
'sex': sex,
'last_name': last_name
}
birth_date = '1 JUN {}'.format(birth_year)
death_date = '1 JUN {}'.format(birth_year)
if not db['yougest']:
db['yougest'] = individual_id
elif db[db['yougest']]['birth'] < birth_year:
db['yougest'] = individual_id
db[individual_id]['string'] = """0 {individual_id} INDI
1 NAME {first_name} /{last_name}/
1 SEX {sex}
1 BIRT
2 DATE {birth_date}
2 PLAC {birth_place}
1 DEAT
2 DATE {death_date}
2 PLAC {death_place}
""".format(**locals())
return individual_id
def generate_family(db, husband_id, wife_id, children_ids, marriage_year, marriage_place=None):
if not marriage_place:
marriage_place = 'London' if random() < 0.5 else 'Tokio'
db['n_families'] += 1
marriage_date = '1 MAY {}'.format(marriage_year)
family_id = "@F{}@".format(db['n_families'])
db[family_id] = {'string': """0 {family_id} FAM
1 HUSB {husband_id}
1 WIFE {wife_id}
1 MARR
2 DATE {marriage_date}
2 PLAC {marriage_place}
""".format(
**locals()
)}
for child_id in children_ids:
db[family_id]['string'] += "1 CHIL {}\n".format(child_id)
return family_id
def find_by_birth_date(db, from_year, to_year, sex, exclude=[]):
ids = []
for individual_id, data in db.items():
if not individual_id.startswith('@I'):
continue
if 'famc' in data:
if data['birth'] > from_year and data['birth'] < to_year:
if sex == data['sex']:
if individual_id not in exclude:
ids.append(individual_id)
if ids:
return ids[int(random()*len(ids))]
return None
def generate_recursive_family(db, start_year=1000, generations=2, husband_id=None, wife_id=None, siblings=[], max_children=5):
if not husband_id:
if random() < 0.2:
exclude = siblings.copy()
if wife_id:
exclude += [wife_id]
husband_id = find_by_birth_date(
db, start_year, start_year + 10, sex='M', exclude=exclude)
if not husband_id:
husband_id = generate_individual(
db, start_year + int(random()*5), sex='M')
else:
print('reused {}'.format(husband_id))
if not wife_id:
if random() < 10.9:
exclude = siblings.copy() + [husband_id]
wife_id = find_by_birth_date(
db, start_year, start_year + 10, sex='F', exclude=exclude)
if not wife_id:
wife_id = generate_individual(
db, start_year + int(random()*5), sex='F')
else:
print('reused {}'.format(wife_id))
n_children = int((1+random()*(max_children-1)) *
(1 - db['n_individuals'] / db['max_individuals']))
marriage_year = start_year + 20 + int(random()*5)
children_ids = []
for i in range(n_children):
children_ids.append(generate_individual(
db, birth_year=marriage_year + 1 + int(random()*10), last_name=db[husband_id]['last_name']))
family_id = generate_family(
db, husband_id, wife_id, children_ids, marriage_year)
for i in range(n_children):
db[children_ids[i]]['string'] += "1 FAMC "+family_id + '\n'
db[children_ids[i]]['famc'] = family_id
if generations > 0:
generate_recursive_family(
db,
db[children_ids[i]]['birth'],
generations - 1,
children_ids[i] if db[children_ids[i]
]['sex'] == 'M' else None,
children_ids[i] if db[children_ids[i]
]['sex'] == 'F' else None,
children_ids)
db[husband_id]['string'] += "1 FAMS "+family_id + '\n'
db[wife_id]['string'] += "1 FAMS "+family_id + '\n'
generate_recursive_family(db, generations=8, max_children=4)
for k, v in db.items():
if k.startswith('@I'):
gedcom_content += v['string']
for k, v in db.items():
if k.startswith('@F'):
gedcom_content += v['string']
gedcom_content += '0 TRLR\n'
open(os.path.join(os.path.dirname(__file__), '..', 'tests',
'autogenerated.ged'), 'w').write(gedcom_content)
# generate_gedcom_file()
def generate_individual_images():
from PIL import Image, ImageDraw, ImageFont
def generate_one_image(filename, text, font_size=22, pos=(15, 40), size=(100, 100), color=(160, 160, 160)):
img = Image.new('RGB', size, color=color)
d = ImageDraw.Draw(img)
font = ImageFont.truetype(r'arial.ttf', font_size)
d.text(pos, text, fill=(0, 0, 0), font=font)
img.save(filename)
for i in range(20):
generate_one_image(
'tests/images/individual_I6_image_age_{}.png'.format(
1+i*4
), 'Age {}'.format(
1+i*4,
))
generate_individual_images()
| [
"names.get_last_name",
"PIL.Image.new",
"PIL.ImageFont.truetype",
"os.path.dirname",
"PIL.ImageDraw.Draw",
"names.get_first_name",
"random.random",
"datetime.date.today"
] | [((464, 485), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (483, 485), False, 'import datetime\n'), ((649, 712), 'names.get_first_name', 'names.get_first_name', ([], {'gender': "('male' if sex == 'M' else 'female')"}), "(gender='male' if sex == 'M' else 'female')\n", (669, 712), False, 'import names\n'), ((6146, 6181), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'size'], {'color': 'color'}), "('RGB', size, color=color)\n", (6155, 6181), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((6195, 6214), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (6209, 6214), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((6230, 6272), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""arial.ttf"""', 'font_size'], {}), "('arial.ttf', font_size)\n", (6248, 6272), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((737, 745), 'random.random', 'random', ([], {}), '()\n', (743, 745), False, 'from random import random\n'), ((917, 938), 'names.get_last_name', 'names.get_last_name', ([], {}), '()\n', (936, 938), False, 'import names\n'), ((803, 866), 'names.get_first_name', 'names.get_first_name', ([], {'gender': "('male' if sex == 'M' else 'female')"}), "(gender='male' if sex == 'M' else 'female')\n", (823, 866), False, 'import names\n'), ((972, 980), 'random.random', 'random', ([], {}), '()\n', (978, 980), False, 'from random import random\n'), ((1032, 1040), 'random.random', 'random', ([], {}), '()\n', (1038, 1040), False, 'from random import random\n'), ((3282, 3290), 'random.random', 'random', ([], {}), '()\n', (3288, 3290), False, 'from random import random\n'), ((3792, 3800), 'random.random', 'random', ([], {}), '()\n', (3798, 3800), False, 'from random import random\n'), ((604, 612), 'random.random', 'random', ([], {}), '()\n', (610, 612), False, 'from random import random\n'), ((1198, 1206), 'random.random', 'random', ([], {}), '()\n', (1204, 1206), False, 'from random import random\n'), ((2078, 2086), 'random.random', 'random', ([], {}), '()\n', (2084, 2086), False, 'from random import random\n'), ((4377, 4385), 'random.random', 'random', ([], {}), '()\n', (4383, 4385), False, 'from random import random\n'), ((5797, 5822), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5812, 5822), False, 'import os\n'), ((3068, 3076), 'random.random', 'random', ([], {}), '()\n', (3074, 3076), False, 'from random import random\n'), ((4226, 4234), 'random.random', 'random', ([], {}), '()\n', (4232, 4234), False, 'from random import random\n'), ((3659, 3667), 'random.random', 'random', ([], {}), '()\n', (3665, 3667), False, 'from random import random\n'), ((4107, 4115), 'random.random', 'random', ([], {}), '()\n', (4113, 4115), False, 'from random import random\n'), ((4559, 4567), 'random.random', 'random', ([], {}), '()\n', (4565, 4567), False, 'from random import random\n')] |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
from sqlalchemy import sql
from sqlalchemy import orm
from sqlalchemy.orm.exc import NoResultFound
from .. import Base
# http://www.iana.org/assignments/media-types/media-types.xhtml
class MimeMajor(Base):
"""Mime major"""
def __init__(self, name):
super().__init__()
self.name = name
class Mime(Base):
def __init__(self, name, template, major):
super().__init__()
self.name = name
self.template = template
self.major = major
@property
def full(self):
return '{0}/{1}'.format(self.major.name, self.name)
@staticmethod
def q_major_minor(dbsession, major, minor):
cond = sql.and_(
MimeMajor.name == major,
Mime.name == minor
)
result = dbsession.execute(
sql.select(Mime).join(Mime.major).options(
orm.contains_eager(Mime.major)
).filter(cond)
).scalar_one_or_none()
return result
###########
# Filters #
###########
@classmethod
def filter_mime(cls, value):
(major, minor) = value.split('/')
cond = sql.and_()
cond.append(MimeMajor.name == major)
if minor and minor != '*':
cond.append(Mime.name == minor)
return cond
| [
"sqlalchemy.sql.and_",
"sqlalchemy.orm.contains_eager",
"sqlalchemy.sql.select"
] | [((722, 775), 'sqlalchemy.sql.and_', 'sql.and_', (['(MimeMajor.name == major)', '(Mime.name == minor)'], {}), '(MimeMajor.name == major, Mime.name == minor)\n', (730, 775), False, 'from sqlalchemy import sql\n'), ((1187, 1197), 'sqlalchemy.sql.and_', 'sql.and_', ([], {}), '()\n', (1195, 1197), False, 'from sqlalchemy import sql\n'), ((918, 948), 'sqlalchemy.orm.contains_eager', 'orm.contains_eager', (['Mime.major'], {}), '(Mime.major)\n', (936, 948), False, 'from sqlalchemy import orm\n'), ((859, 875), 'sqlalchemy.sql.select', 'sql.select', (['Mime'], {}), '(Mime)\n', (869, 875), False, 'from sqlalchemy import sql\n')] |
# encoding: utf-8
from goods.models import Goods
from django.views.generic.base import View
class GoodsListView(View):
def get(self, request):
"""
通过django的view实现商品列表页
"""
json_list = []
goods = Goods.objects.all()[:10]
# for good in goods:
# json_dict = {}
# json_dict["name"] = good.name
# json_dict["category"] = good.category.name
# json_dict["market_price"] = good.market_price
# json_dict["add_time"] = good.add_time
# json_list.append(json_dict)
# from django.http import HttpResponse
# import json
# return HttpResponse(json.dumps(json_list),content_type="application/json")
from django.forms.models import model_to_dict
for good in goods:
json_dict = model_to_dict(good)
json_list.append(json_dict)
import json
from django.core import serializers
json_data = serializers.serialize('json', goods)
json_data = json.loads(json_data)
from django.http import HttpResponse, JsonResponse
# jsonResponse做的工作也就是加上了dumps和content_type
# return HttpResponse(json.dumps(json_data), content_type="application/json")
# 注释掉loads,下面语句正常
# return HttpResponse(json_data, content_type="application/json")
return JsonResponse(json_data, safe=False)
| [
"json.loads",
"django.http.JsonResponse",
"goods.models.Goods.objects.all",
"django.forms.models.model_to_dict",
"django.core.serializers.serialize"
] | [((986, 1022), 'django.core.serializers.serialize', 'serializers.serialize', (['"""json"""', 'goods'], {}), "('json', goods)\n", (1007, 1022), False, 'from django.core import serializers\n'), ((1043, 1064), 'json.loads', 'json.loads', (['json_data'], {}), '(json_data)\n', (1053, 1064), False, 'import json\n'), ((1376, 1411), 'django.http.JsonResponse', 'JsonResponse', (['json_data'], {'safe': '(False)'}), '(json_data, safe=False)\n', (1388, 1411), False, 'from django.http import HttpResponse, JsonResponse\n'), ((241, 260), 'goods.models.Goods.objects.all', 'Goods.objects.all', ([], {}), '()\n', (258, 260), False, 'from goods.models import Goods\n'), ((841, 860), 'django.forms.models.model_to_dict', 'model_to_dict', (['good'], {}), '(good)\n', (854, 860), False, 'from django.forms.models import model_to_dict\n')] |
"""Organize the calculation of statistics for each series in this DataFrame."""
import warnings
from datetime import datetime
from typing import Optional
import pandas as pd
from tqdm.auto import tqdm
from visions import VisionsTypeset
from pandas_profiling.config import Settings
from pandas_profiling.model.correlations import calculate_correlation
from pandas_profiling.model.duplicates import get_duplicates
from pandas_profiling.model.sample import Sample, get_sample
from pandas_profiling.model.summarizer import BaseSummarizer
from pandas_profiling.model.summary import (
get_messages,
get_missing_diagrams,
get_scatter_matrix,
get_series_descriptions,
get_table_stats,
)
from pandas_profiling.version import __version__
def describe(
config: Settings,
df: pd.DataFrame,
summarizer: BaseSummarizer,
typeset: VisionsTypeset,
sample: Optional[dict] = None,
) -> dict:
"""Calculate the statistics for each series in this DataFrame.
Args:
config: report Settings object
df: DataFrame.
sample: optional, dict with custom sample
Returns:
This function returns a dictionary containing:
- table: overall statistics.
- variables: descriptions per series.
- correlations: correlation matrices.
- missing: missing value diagrams.
- messages: direct special attention to these patterns in your data.
- package: package details.
"""
if df is None:
raise ValueError("Can not describe a `lazy` ProfileReport without a DataFrame.")
if not isinstance(df, pd.DataFrame):
warnings.warn("df is not of type pandas.DataFrame")
disable_progress_bar = not config.progress_bar
date_start = datetime.utcnow()
correlation_names = [
correlation_name
for correlation_name in [
"pearson",
"spearman",
"kendall",
"phi_k",
"cramers",
]
if config.correlations[correlation_name].calculate
]
number_of_tasks = 8 + len(df.columns) + len(correlation_names)
with tqdm(
total=number_of_tasks, desc="Summarize dataset", disable=disable_progress_bar
) as pbar:
series_description = get_series_descriptions(
config, df, summarizer, typeset, pbar
)
pbar.set_postfix_str("Get variable types")
variables = {
column: description["type"]
for column, description in series_description.items()
}
supported_columns = [
column
for column, type_name in variables.items()
if type_name != "Unsupported"
]
interval_columns = [
column for column, type_name in variables.items() if type_name == "Numeric"
]
pbar.update()
# Get correlations
correlations = {}
for correlation_name in correlation_names:
pbar.set_postfix_str(f"Calculate {correlation_name} correlation")
correlations[correlation_name] = calculate_correlation(
config, df, correlation_name, series_description
)
pbar.update()
# make sure correlations is not None
correlations = {
key: value for key, value in correlations.items() if value is not None
}
# Scatter matrix
pbar.set_postfix_str("Get scatter matrix")
scatter_matrix = get_scatter_matrix(config, df, interval_columns)
pbar.update()
# Table statistics
pbar.set_postfix_str("Get table statistics")
table_stats = get_table_stats(config, df, series_description)
pbar.update()
# missing diagrams
pbar.set_postfix_str("Get missing diagrams")
missing = get_missing_diagrams(config, df, table_stats)
pbar.update()
# Sample
pbar.set_postfix_str("Take sample")
if sample is None:
samples = get_sample(config, df)
else:
if "name" not in sample:
sample["name"] = None
if "caption" not in sample:
sample["caption"] = None
samples = [
Sample(
id="custom",
data=sample["data"],
name=sample["name"],
caption=sample["caption"],
)
]
pbar.update()
# Duplicates
pbar.set_postfix_str("Locating duplicates")
metrics, duplicates = get_duplicates(config, df, supported_columns)
table_stats.update(metrics)
pbar.update()
# Messages
pbar.set_postfix_str("Get messages/warnings")
messages = get_messages(config, table_stats, series_description, correlations)
pbar.update()
pbar.set_postfix_str("Get reproduction details")
package = {
"pandas_profiling_version": __version__,
"pandas_profiling_config": config.json(),
}
pbar.update()
pbar.set_postfix_str("Completed")
date_end = datetime.utcnow()
analysis = {
"title": config.title,
"date_start": date_start,
"date_end": date_end,
"duration": date_end - date_start,
}
return {
# Analysis metadata
"analysis": analysis,
# Overall dataset description
"table": table_stats,
# Per variable descriptions
"variables": series_description,
# Bivariate relations
"scatter": scatter_matrix,
# Correlation matrices
"correlations": correlations,
# Missing values
"missing": missing,
# Warnings
"messages": messages,
# Package
"package": package,
# Sample
"sample": samples,
# Duplicates
"duplicates": duplicates,
}
| [
"pandas_profiling.model.summary.get_missing_diagrams",
"pandas_profiling.model.summary.get_messages",
"datetime.datetime.utcnow",
"pandas_profiling.model.summary.get_series_descriptions",
"pandas_profiling.model.summary.get_table_stats",
"pandas_profiling.model.duplicates.get_duplicates",
"pandas_profiling.model.sample.Sample",
"pandas_profiling.model.correlations.calculate_correlation",
"tqdm.auto.tqdm",
"warnings.warn",
"pandas_profiling.model.sample.get_sample",
"pandas_profiling.model.summary.get_scatter_matrix"
] | [((1774, 1791), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1789, 1791), False, 'from datetime import datetime\n'), ((5130, 5147), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5145, 5147), False, 'from datetime import datetime\n'), ((1652, 1703), 'warnings.warn', 'warnings.warn', (['"""df is not of type pandas.DataFrame"""'], {}), "('df is not of type pandas.DataFrame')\n", (1665, 1703), False, 'import warnings\n'), ((2145, 2233), 'tqdm.auto.tqdm', 'tqdm', ([], {'total': 'number_of_tasks', 'desc': '"""Summarize dataset"""', 'disable': 'disable_progress_bar'}), "(total=number_of_tasks, desc='Summarize dataset', disable=\n disable_progress_bar)\n", (2149, 2233), False, 'from tqdm.auto import tqdm\n'), ((2281, 2343), 'pandas_profiling.model.summary.get_series_descriptions', 'get_series_descriptions', (['config', 'df', 'summarizer', 'typeset', 'pbar'], {}), '(config, df, summarizer, typeset, pbar)\n', (2304, 2343), False, 'from pandas_profiling.model.summary import get_messages, get_missing_diagrams, get_scatter_matrix, get_series_descriptions, get_table_stats\n'), ((3483, 3531), 'pandas_profiling.model.summary.get_scatter_matrix', 'get_scatter_matrix', (['config', 'df', 'interval_columns'], {}), '(config, df, interval_columns)\n', (3501, 3531), False, 'from pandas_profiling.model.summary import get_messages, get_missing_diagrams, get_scatter_matrix, get_series_descriptions, get_table_stats\n'), ((3657, 3704), 'pandas_profiling.model.summary.get_table_stats', 'get_table_stats', (['config', 'df', 'series_description'], {}), '(config, df, series_description)\n', (3672, 3704), False, 'from pandas_profiling.model.summary import get_messages, get_missing_diagrams, get_scatter_matrix, get_series_descriptions, get_table_stats\n'), ((3826, 3871), 'pandas_profiling.model.summary.get_missing_diagrams', 'get_missing_diagrams', (['config', 'df', 'table_stats'], {}), '(config, df, table_stats)\n', (3846, 3871), False, 'from pandas_profiling.model.summary import get_messages, get_missing_diagrams, get_scatter_matrix, get_series_descriptions, get_table_stats\n'), ((4567, 4612), 'pandas_profiling.model.duplicates.get_duplicates', 'get_duplicates', (['config', 'df', 'supported_columns'], {}), '(config, df, supported_columns)\n', (4581, 4612), False, 'from pandas_profiling.model.duplicates import get_duplicates\n'), ((4764, 4831), 'pandas_profiling.model.summary.get_messages', 'get_messages', (['config', 'table_stats', 'series_description', 'correlations'], {}), '(config, table_stats, series_description, correlations)\n', (4776, 4831), False, 'from pandas_profiling.model.summary import get_messages, get_missing_diagrams, get_scatter_matrix, get_series_descriptions, get_table_stats\n'), ((3089, 3160), 'pandas_profiling.model.correlations.calculate_correlation', 'calculate_correlation', (['config', 'df', 'correlation_name', 'series_description'], {}), '(config, df, correlation_name, series_description)\n', (3110, 3160), False, 'from pandas_profiling.model.correlations import calculate_correlation\n'), ((4005, 4027), 'pandas_profiling.model.sample.get_sample', 'get_sample', (['config', 'df'], {}), '(config, df)\n', (4015, 4027), False, 'from pandas_profiling.model.sample import Sample, get_sample\n'), ((4239, 4332), 'pandas_profiling.model.sample.Sample', 'Sample', ([], {'id': '"""custom"""', 'data': "sample['data']", 'name': "sample['name']", 'caption': "sample['caption']"}), "(id='custom', data=sample['data'], name=sample['name'], caption=\n sample['caption'])\n", (4245, 4332), False, 'from pandas_profiling.model.sample import Sample, get_sample\n')] |
#!/usr/bin/env python
from __future__ import absolute_import, print_function
import os
import os.path
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
NAME = "future"
PACKAGES = ["future",
"future.builtins",
"future.types",
"future.standard_library",
"future.backports",
"future.backports.email",
"future.backports.email.mime",
"future.backports.html",
"future.backports.http",
"future.backports.test",
"future.backports.urllib",
"future.backports.xmlrpc",
"future.moves",
"future.moves.dbm",
"future.moves.html",
"future.moves.http",
"future.moves.test",
"future.moves.tkinter",
"future.moves.urllib",
"future.moves.xmlrpc",
"future.tests", # for future.tests.base
# "future.tests.test_email",
"future.utils",
"past",
"past.builtins",
"past.types",
"past.utils",
# "past.tests",
"past.translation",
"libfuturize",
"libfuturize.fixes",
"libpasteurize",
"libpasteurize.fixes",
]
# PEP 3108 stdlib moves:
if sys.version_info[:2] < (3, 0):
PACKAGES += [
"builtins",
"configparser",
"copyreg",
"html",
"http",
"queue",
"reprlib",
"socketserver",
"tkinter",
"winreg",
"xmlrpc",
"_dummy_thread",
"_markupbase",
"_thread",
]
PACKAGE_DATA = {'': [
'README.rst',
'LICENSE.txt',
'futurize.py',
'pasteurize.py',
'discover_tests.py',
'check_rst.sh',
'TESTING.txt',
],
'tests': ['*.py'],
}
REQUIRES = []
TEST_REQUIRES = []
if sys.version_info[:2] == (2, 6):
REQUIRES += ['importlib', 'argparse']
TEST_REQUIRES += ['unittest2']
import src.future
VERSION = src.future.__version__
DESCRIPTION = "Clean single-source support for Python 3 and 2"
LONG_DESC = src.future.__doc__
AUTHOR = "<NAME>"
AUTHOR_EMAIL = "<EMAIL>"
URL="https://python-future.org"
LICENSE = "MIT"
KEYWORDS = "future past python3 migration futurize backport six 2to3 modernize pasteurize 3to2"
CLASSIFIERS = [
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"License :: OSI Approved",
"License :: OSI Approved :: MIT License",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
]
setup_kwds = {}
# * Important *
# We forcibly remove the build folder to avoid breaking the
# user's Py3 installation if they run "python2 setup.py
# build" and then "python3 setup.py install".
try:
# If the user happens to run:
# python2 setup.py build
# python3 setup.py install
# then folders like "configparser" will be in build/lib.
# If so, we CANNOT let the user install this, because
# this may break his/her Python 3 install, depending on the folder order in
# sys.path. (Running "import configparser" etc. may pick up our Py2
# substitute packages, instead of the intended system stdlib modules.)
SYSTEM_MODULES = set([
'_dummy_thread',
'_markupbase',
'_thread',
'builtins',
'configparser',
'copyreg',
'html',
'http',
'queue',
'reprlib',
'socketserver',
'tkinter',
'winreg',
'xmlrpc'
])
if sys.version_info[0] >= 3:
# Do any of the above folders exist in build/lib?
files = os.listdir(os.path.join('build', 'lib'))
if len(set(files) & set(SYSTEM_MODULES)) > 0:
print('ERROR: Your build folder is in an inconsistent state for '
'a Python 3.x install. Please remove it manually and run '
'setup.py again.', file=sys.stderr)
sys.exit(1)
except OSError:
pass
setup(name=NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
description=DESCRIPTION,
long_description=LONG_DESC,
license=LICENSE,
keywords=KEYWORDS,
entry_points={
'console_scripts': [
'futurize = libfuturize.main:main',
'pasteurize = libpasteurize.main:main'
]
},
package_dir={'': 'src'},
packages=PACKAGES,
package_data=PACKAGE_DATA,
include_package_data=True,
install_requires=REQUIRES,
classifiers=CLASSIFIERS,
test_suite = "discover_tests",
tests_require=TEST_REQUIRES,
**setup_kwds
)
| [
"os.path.join",
"os.system",
"sys.exit",
"distutils.core.setup"
] | [((4823, 5361), 'distutils.core.setup', 'setup', ([], {'name': 'NAME', 'version': 'VERSION', 'author': 'AUTHOR', 'author_email': 'AUTHOR_EMAIL', 'url': 'URL', 'description': 'DESCRIPTION', 'long_description': 'LONG_DESC', 'license': 'LICENSE', 'keywords': 'KEYWORDS', 'entry_points': "{'console_scripts': ['futurize = libfuturize.main:main',\n 'pasteurize = libpasteurize.main:main']}", 'package_dir': "{'': 'src'}", 'packages': 'PACKAGES', 'package_data': 'PACKAGE_DATA', 'include_package_data': '(True)', 'install_requires': 'REQUIRES', 'classifiers': 'CLASSIFIERS', 'test_suite': '"""discover_tests"""', 'tests_require': 'TEST_REQUIRES'}), "(name=NAME, version=VERSION, author=AUTHOR, author_email=AUTHOR_EMAIL,\n url=URL, description=DESCRIPTION, long_description=LONG_DESC, license=\n LICENSE, keywords=KEYWORDS, entry_points={'console_scripts': [\n 'futurize = libfuturize.main:main',\n 'pasteurize = libpasteurize.main:main']}, package_dir={'': 'src'},\n packages=PACKAGES, package_data=PACKAGE_DATA, include_package_data=True,\n install_requires=REQUIRES, classifiers=CLASSIFIERS, test_suite=\n 'discover_tests', tests_require=TEST_REQUIRES, **setup_kwds)\n", (4828, 5361), False, 'from distutils.core import setup\n'), ((248, 289), 'os.system', 'os.system', (['"""python setup.py sdist upload"""'], {}), "('python setup.py sdist upload')\n", (257, 289), False, 'import os\n'), ((294, 304), 'sys.exit', 'sys.exit', ([], {}), '()\n', (302, 304), False, 'import sys\n'), ((4480, 4508), 'os.path.join', 'os.path.join', (['"""build"""', '"""lib"""'], {}), "('build', 'lib')\n", (4492, 4508), False, 'import os\n'), ((4785, 4796), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4793, 4796), False, 'import sys\n')] |
import requests
from bs4 import BeautifulSoup
import urllib.request
import os
import random
import time
def html(url):
user_agents = [
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 "]
user_agent = random.choice(user_agents)
headers = {
'User-Agent': user_agent,
'Accept-Encoding': 'gzip'}
req = requests.get(url=url, headers=headers)
html_doc = req.text
soup = BeautifulSoup(html_doc, "html.parser")
times = soup.select("time")
views = soup.select("p.label-key > b")
active_str = str(views[2])
active = active_str[active_str.find("title=\"") + 7:active_str.find("Z")]
answers = soup.select("#answers-header > div > h2 >span")
question_content = soup.select("div.post-text")
tags = soup.select("#question > div.post-layout > div.postcell.post-layout--right > "
"div.post-taglist.grid.gs4.gsy.fd-column > div >a")
title = soup.select("h1 >a")
tags_str = ""
item = []
for tag in tags:
tags_str += tag.get_text() + ","
answer_contetnts = []
for i in range(1, len(question_content)):
answer_contetnts.append(question_content[i])
for i in range(len(times)):
if len(times[i].get_text()) > 1:
asked_time = times[i].get("datetime").replace("T", " ")
item.append(title[
0].get_text()) # title views answersnum asked_time tag_str active_time quest_content_ text answer_content_list
item.append(views[1].get_text())
item.append(answers[0].get_text())
item.append(asked_time)
item.append(tags_str)
item.append(active)
item.append(question_content[0])
item.append(answer_contetnts)
print(item)
# updatetosql(item)
def updatetosql(item):
ansers_text = "[split]".join(item[7])
updatesql = "UPDATE `t_stackoverflow_question` " \
"SET `tags`='%s', `views`='%s', `answers_num`='%s', `asked_time`='%s', `last_active_time`='%s', `question_content`='%s', `answers_contetnt`='%s' " \
"WHERE (`question_id`='%s') " \
% (item[4], item[1], item[2], item[3], item[5], item[6], ansers_text, item[0],)
pass
if __name__ == '__main__':
html("https://stackoverflow.com/questions/50119673/nginx-fast-cgi-cache-on-error-page-404")
| [
"bs4.BeautifulSoup",
"random.choice",
"requests.get"
] | [((923, 949), 'random.choice', 'random.choice', (['user_agents'], {}), '(user_agents)\n', (936, 949), False, 'import random\n'), ((1045, 1083), 'requests.get', 'requests.get', ([], {'url': 'url', 'headers': 'headers'}), '(url=url, headers=headers)\n', (1057, 1083), False, 'import requests\n'), ((1119, 1157), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_doc', '"""html.parser"""'], {}), "(html_doc, 'html.parser')\n", (1132, 1157), False, 'from bs4 import BeautifulSoup\n')] |
import os
import numpy as np
import pandas as pd
from keras.utils import to_categorical
from sklearn.model_selection import KFold, train_test_split
def load_data(path):
train = pd.read_json(os.path.join(path, "./train.json"))
test = pd.read_json(os.path.join(path, "./test.json"))
return (train, test)
def preprocess(df,
means=(-22.159262, -24.953745, 40.021883465782651),
stds=(5.33146, 4.5463958, 4.0815391476694414)):
X_band_1 = np.array([np.array(band).astype(np.float32).reshape(75, 75)
for band in df["band_1"]])
X_band_2 = np.array([np.array(band).astype(np.float32).reshape(75, 75)
for band in df["band_2"]])
angl = df['inc_angle'].map(lambda x: np.cos(x * np.pi / 180) if x != 'na' else means[3])
angl = np.array([np.full(shape=(75, 75), fill_value=angel).astype(np.float32)
for angel in angl])
X_band_1 = (X_band_1 - means[0]) / stds[0]
X_band_2 = (X_band_2 - means[1]) / stds[1]
angl = (angl - means[2]) / stds[2]
images = np.concatenate([X_band_1[:, :, :, np.newaxis],
X_band_2[:, :, :, np.newaxis],
angl[:, :, :, np.newaxis]],
axis=-1)
return images
def prepare_data_cv(path):
train, test = load_data(path)
X_train, y_train = (preprocess(train),
to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1)))
kfold_data = []
kf = KFold(n_splits=5, shuffle=True, random_state=0xCAFFE)
for train_indices, val_indices in kf.split(y_train):
X_train_cv = X_train[train_indices]
y_train_cv = y_train[train_indices]
X_val = X_train[val_indices]
y_val = y_train[val_indices]
kfold_data.append((X_train_cv, y_train_cv, X_val, y_val))
X_test = preprocess(test)
return (kfold_data, X_test)
def prepare_data(path):
train, test = load_data(path)
X_train, y_train = (preprocess(train),
to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1)))
X_train_cv, X_valid, y_train_cv, y_valid = train_test_split(X_train,
y_train,
random_state=0xCAFFE,
train_size=0.8)
X_test = preprocess(test)
return ([(X_train_cv, y_train_cv, X_valid, y_valid)], X_test)
| [
"sklearn.model_selection.train_test_split",
"os.path.join",
"numpy.array",
"numpy.cos",
"numpy.concatenate",
"numpy.full",
"sklearn.model_selection.KFold"
] | [((1090, 1209), 'numpy.concatenate', 'np.concatenate', (['[X_band_1[:, :, :, np.newaxis], X_band_2[:, :, :, np.newaxis], angl[:, :, :,\n np.newaxis]]'], {'axis': '(-1)'}), '([X_band_1[:, :, :, np.newaxis], X_band_2[:, :, :, np.newaxis\n ], angl[:, :, :, np.newaxis]], axis=-1)\n', (1104, 1209), True, 'import numpy as np\n'), ((1533, 1585), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': '(831486)'}), '(n_splits=5, shuffle=True, random_state=831486)\n', (1538, 1585), False, 'from sklearn.model_selection import KFold, train_test_split\n'), ((2178, 2249), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_train', 'y_train'], {'random_state': '(831486)', 'train_size': '(0.8)'}), '(X_train, y_train, random_state=831486, train_size=0.8)\n', (2194, 2249), False, 'from sklearn.model_selection import KFold, train_test_split\n'), ((198, 232), 'os.path.join', 'os.path.join', (['path', '"""./train.json"""'], {}), "(path, './train.json')\n", (210, 232), False, 'import os\n'), ((258, 291), 'os.path.join', 'os.path.join', (['path', '"""./test.json"""'], {}), "(path, './test.json')\n", (270, 291), False, 'import os\n'), ((767, 790), 'numpy.cos', 'np.cos', (['(x * np.pi / 180)'], {}), '(x * np.pi / 180)\n', (773, 790), True, 'import numpy as np\n'), ((840, 881), 'numpy.full', 'np.full', ([], {'shape': '(75, 75)', 'fill_value': 'angel'}), '(shape=(75, 75), fill_value=angel)\n', (847, 881), True, 'import numpy as np\n'), ((495, 509), 'numpy.array', 'np.array', (['band'], {}), '(band)\n', (503, 509), True, 'import numpy as np\n'), ((623, 637), 'numpy.array', 'np.array', (['band'], {}), '(band)\n', (631, 637), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import sys
import click
import rhea
from polyaxon_cli.cli.getters.experiment import (
get_experiment_job_or_local,
get_project_experiment_or_local
)
from polyaxon_cli.cli.upload import upload
from polyaxon_cli.client import PolyaxonClient
from polyaxon_cli.client.exceptions import PolyaxonHTTPError, PolyaxonShouldExitError
from polyaxon_cli.logger import clean_outputs
from polyaxon_cli.managers.experiment import ExperimentManager
from polyaxon_cli.managers.experiment_job import ExperimentJobManager
from polyaxon_cli.utils import cache
from polyaxon_cli.utils.formatting import (
Printer,
dict_tabulate,
get_meta_response,
get_resources,
list_dicts_to_tabulate
)
from polyaxon_cli.utils.log_handler import get_logs_handler
from polyaxon_cli.utils.validation import validate_tags
from polyaxon_client.exceptions import PolyaxonClientException
def get_experiment_details(experiment): # pylint:disable=redefined-outer-name
if experiment.description:
Printer.print_header("Experiment description:")
click.echo('{}\n'.format(experiment.description))
if experiment.resources:
get_resources(experiment.resources.to_dict(), header="Experiment resources:")
if experiment.declarations:
Printer.print_header("Experiment declarations:")
dict_tabulate(experiment.declarations)
if experiment.last_metric:
Printer.print_header("Experiment last metrics:")
dict_tabulate(experiment.last_metric)
response = experiment.to_light_dict(
humanize_values=True,
exclude_attrs=[
'uuid', 'config', 'project', 'experiments', 'description',
'declarations', 'last_metric', 'resources', 'jobs', 'run_env'
])
Printer.print_header("Experiment info:")
dict_tabulate(Printer.add_status_color(response))
@click.group()
@click.option('--project', '-p', type=str, help="The project name, e.g. 'mnist' or 'adam/mnist'.")
@click.option('--experiment', '-xp', type=int, help="The experiment id number.")
@click.pass_context
@clean_outputs
def experiment(ctx, project, experiment): # pylint:disable=redefined-outer-name
"""Commands for experiments."""
ctx.obj = ctx.obj or {}
ctx.obj['project'] = project
ctx.obj['experiment'] = experiment
@experiment.command()
@click.option('--job', '-j', type=int, help="The job id.")
@click.pass_context
@clean_outputs
def get(ctx, job):
"""Get experiment or experiment job.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples for getting an experiment:
\b
```bash
$ polyaxon experiment get # if experiment is cached
```
\b
```bash
$ polyaxon experiment --experiment=1 get
```
\b
```bash
$ polyaxon experiment -xp 1 --project=cats-vs-dogs get
```
\b
```bash
$ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get
```
Examples for getting an experiment job:
\b
```bash
$ polyaxon experiment get -j 1 # if experiment is cached
```
\b
```bash
$ polyaxon experiment --experiment=1 get --job=10
```
\b
```bash
$ polyaxon experiment -xp 1 --project=cats-vs-dogs get -j 2
```
\b
```bash
$ polyaxon experiment -xp 1 -p alain/cats-vs-dogs get -j 2
```
"""
def get_experiment():
try:
response = PolyaxonClient().experiment.get_experiment(user, project_name, _experiment)
cache.cache(config_manager=ExperimentManager, response=response)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not load experiment `{}` info.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
get_experiment_details(response)
def get_experiment_job():
try:
response = PolyaxonClient().experiment_job.get_job(user,
project_name,
_experiment,
_job)
cache.cache(config_manager=ExperimentJobManager, response=response)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get job `{}`.'.format(_job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
if response.resources:
get_resources(response.resources.to_dict(), header="Job resources:")
response = Printer.add_status_color(response.to_light_dict(
humanize_values=True,
exclude_attrs=['uuid', 'definition', 'experiment', 'unique_name', 'resources']
))
Printer.print_header("Job info:")
dict_tabulate(response)
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
if job:
_job = get_experiment_job_or_local(job)
get_experiment_job()
else:
get_experiment()
@experiment.command()
@click.pass_context
@clean_outputs
def delete(ctx):
"""Delete experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Example:
\b
```bash
$ polyaxon experiment delete
```
"""
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
if not click.confirm("Are sure you want to delete experiment `{}`".format(_experiment)):
click.echo('Existing without deleting experiment.')
sys.exit(1)
try:
response = PolyaxonClient().experiment.delete_experiment(
user, project_name, _experiment)
# Purge caching
ExperimentManager.purge()
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not delete experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
if response.status_code == 204:
Printer.print_success("Experiment `{}` was delete successfully".format(_experiment))
@experiment.command()
@click.option('--name', type=str,
help='Name of the experiment, must be unique within the project, could be none.')
@click.option('--description', type=str, help='Description of the experiment.')
@click.option('--tags', type=str, help='Tags of the experiment, comma separated values.')
@click.pass_context
@clean_outputs
def update(ctx, name, description, tags):
"""Update experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon experiment -xp 2 update --description="new description for my experiments"
```
\b
```bash
$ polyaxon experiment -xp 2 update --tags="foo, bar" --name="unique-name"
```
"""
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
update_dict = {}
if name:
update_dict['name'] = name
if description:
update_dict['description'] = description
tags = validate_tags(tags)
if tags:
update_dict['tags'] = tags
if not update_dict:
Printer.print_warning('No argument was provided to update the experiment.')
sys.exit(0)
try:
response = PolyaxonClient().experiment.update_experiment(
user, project_name, _experiment, update_dict)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not update experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success("Experiment updated.")
get_experiment_details(response)
@experiment.command()
@click.option('--yes', '-y', is_flag=True, default=False,
help="Automatic yes to prompts. "
"Assume \"yes\" as answer to all prompts and run non-interactively.")
@click.pass_context
@clean_outputs
def stop(ctx, yes):
"""Stop experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon experiment stop
```
\b
```bash
$ polyaxon experiment -xp 2 stop
```
"""
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
if not yes and not click.confirm("Are sure you want to stop "
"experiment `{}`".format(_experiment)):
click.echo('Existing without stopping experiment.')
sys.exit(0)
try:
PolyaxonClient().experiment.stop(user, project_name, _experiment)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not stop experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success("Experiment is being stopped.")
@experiment.command()
@click.option('--copy', '-c', is_flag=True, default=False,
help="To copy the experiment before restarting.")
@click.option('--file', '-f', multiple=True, type=click.Path(exists=True),
help="The polyaxon files to update with.")
@click.option('-u', is_flag=True, default=False,
help="To upload the repo before restarting.")
@click.pass_context
@clean_outputs
def restart(ctx, copy, file, u): # pylint:disable=redefined-builtin
"""Restart experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon experiment --experiment=1 restart
```
"""
config = None
update_code = None
if file:
config = rhea.read(file)
# Check if we need to upload
if u:
ctx.invoke(upload, sync=False)
update_code = True
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
try:
if copy:
response = PolyaxonClient().experiment.copy(
user, project_name, _experiment, config=config, update_code=update_code)
Printer.print_success('Experiment was copied with id {}'.format(response.id))
else:
response = PolyaxonClient().experiment.restart(
user, project_name, _experiment, config=config, update_code=update_code)
Printer.print_success('Experiment was restarted with id {}'.format(response.id))
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not restart experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
@experiment.command()
@click.option('--file', '-f', multiple=True, type=click.Path(exists=True),
help="The polyaxon files to update with.")
@click.option('-u', is_flag=True, default=False,
help="To upload the repo before resuming.")
@click.pass_context
@clean_outputs
def resume(ctx, file, u): # pylint:disable=redefined-builtin
"""Resume experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon experiment --experiment=1 resume
```
"""
config = None
update_code = None
if file:
config = rhea.read(file)
# Check if we need to upload
if u:
ctx.invoke(upload, sync=False)
update_code = True
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
try:
response = PolyaxonClient().experiment.resume(
user, project_name, _experiment, config=config, update_code=update_code)
Printer.print_success('Experiment was resumed with id {}'.format(response.id))
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not resume experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
@experiment.command()
@click.option('--page', type=int, help="To paginate through the list of jobs.")
@click.pass_context
@clean_outputs
def jobs(ctx, page):
"""List jobs for experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon experiment --experiment=1 jobs
```
"""
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
page = page or 1
try:
response = PolyaxonClient().experiment.list_jobs(
user, project_name, _experiment, page=page)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get jobs for experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
meta = get_meta_response(response)
if meta:
Printer.print_header('Jobs for experiment `{}`.'.format(_experiment))
Printer.print_header('Navigation:')
dict_tabulate(meta)
else:
Printer.print_header('No jobs found for experiment `{}`.'.format(_experiment))
objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))
for o in response['results']]
objects = list_dicts_to_tabulate(objects)
if objects:
Printer.print_header("Jobs:")
objects.pop('experiment', None)
dict_tabulate(objects, is_list_dict=True)
@experiment.command()
@click.option('--job', '-j', type=int, help="The job id.")
@click.option('--page', type=int, help="To paginate through the list of statuses.")
@click.pass_context
@clean_outputs
def statuses(ctx, job, page):
"""Get experiment or experiment job statuses.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples getting experiment statuses:
\b
```bash
$ polyaxon experiment statuses
```
\b
```bash
$ polyaxon experiment -xp 1 statuses
```
Examples getting experiment job statuses:
\b
```bash
$ polyaxon experiment statuses -j 3
```
\b
```bash
$ polyaxon experiment -xp 1 statuses --job 1
```
"""
def get_experiment_statuses():
try:
response = PolyaxonClient().experiment.get_statuses(
user, project_name, _experiment, page=page)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could get status for experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
meta = get_meta_response(response)
if meta:
Printer.print_header('Statuses for experiment `{}`.'.format(_experiment))
Printer.print_header('Navigation:')
dict_tabulate(meta)
else:
Printer.print_header('No statuses found for experiment `{}`.'.format(_experiment))
objects = list_dicts_to_tabulate(
[Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status')
for o in response['results']])
if objects:
Printer.print_header("Statuses:")
objects.pop('experiment', None)
dict_tabulate(objects, is_list_dict=True)
def get_experiment_job_statuses():
try:
response = PolyaxonClient().experiment_job.get_statuses(user,
project_name,
_experiment,
_job,
page=page)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get status for job `{}`.'.format(job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
meta = get_meta_response(response)
if meta:
Printer.print_header('Statuses for Job `{}`.'.format(_job))
Printer.print_header('Navigation:')
dict_tabulate(meta)
else:
Printer.print_header('No statuses found for job `{}`.'.format(_job))
objects = list_dicts_to_tabulate(
[Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status')
for o in response['results']])
if objects:
Printer.print_header("Statuses:")
objects.pop('job', None)
dict_tabulate(objects, is_list_dict=True)
page = page or 1
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
if job:
_job = get_experiment_job_or_local(job)
get_experiment_job_statuses()
else:
get_experiment_statuses()
@experiment.command()
@click.option('--job', '-j', type=int, help="The job id.")
@click.option('--gpu', '-g', is_flag=True, help="List experiment GPU resources.")
@click.pass_context
@clean_outputs
def resources(ctx, job, gpu):
"""Get experiment or experiment job resources.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples for getting experiment resources:
\b
```bash
$ polyaxon experiment -xp 19 resources
```
For GPU resources
\b
```bash
$ polyaxon experiment -xp 19 resources --gpu
```
Examples for getting experiment job resources:
\b
```bash
$ polyaxon experiment -xp 19 resources -j 1
```
For GPU resources
\b
```bash
$ polyaxon experiment -xp 19 resources -j 1 --gpu
```
"""
def get_experiment_resources():
try:
message_handler = Printer.gpu_resources if gpu else Printer.resources
PolyaxonClient().experiment.resources(
user, project_name, _experiment, message_handler=message_handler)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get resources for experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
def get_experiment_job_resources():
try:
message_handler = Printer.gpu_resources if gpu else Printer.resources
PolyaxonClient().experiment_job.resources(user,
project_name,
_experiment,
_job,
message_handler=message_handler)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get resources for job `{}`.'.format(_job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
if job:
_job = get_experiment_job_or_local(job)
get_experiment_job_resources()
else:
get_experiment_resources()
@experiment.command()
@click.option('--job', '-j', type=int, help="The job id.")
@click.option('--past', '-p', is_flag=True, help="Show the past logs.")
@click.option('--follow', '-f', is_flag=True, default=False,
help="Stream logs after showing past logs.")
@click.option('--hide_time', is_flag=True, default=False,
help="Whether or not to hide timestamps from the log stream.")
@click.pass_context
@clean_outputs
def logs(ctx, job, past, follow, hide_time):
"""Get experiment or experiment job logs.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples for getting experiment logs:
\b
```bash
$ polyaxon experiment logs
```
\b
```bash
$ polyaxon experiment -xp 10 -p mnist logs
```
Examples for getting experiment job logs:
\b
```bash
$ polyaxon experiment -xp 1 -j 1 logs
```
"""
def get_experiment_logs():
if past:
try:
response = PolyaxonClient().experiment.logs(
user, project_name, _experiment, stream=False)
get_logs_handler(handle_job_info=True,
show_timestamp=not hide_time,
stream=False)(response.content.decode().split('\n'))
print()
if not follow:
return
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
if not follow:
Printer.print_error(
'Could not get logs for experiment `{}`.'.format(_experiment))
Printer.print_error(
'Error message `{}`.'.format(e))
sys.exit(1)
try:
PolyaxonClient().experiment.logs(
user,
project_name,
_experiment,
message_handler=get_logs_handler(handle_job_info=True,
show_timestamp=not hide_time))
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get logs for experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
def get_experiment_job_logs():
if past:
try:
response = PolyaxonClient().experiment_job.logs(
user,
project_name,
_experiment,
_job,
stream=False)
get_logs_handler(handle_job_info=True,
show_timestamp=not hide_time,
stream=False)(response.content.decode().split('\n'))
print()
if not follow:
return
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
if not follow:
Printer.print_error(
'Could not get logs for experiment `{}`.'.format(_experiment))
Printer.print_error(
'Error message `{}`.'.format(e))
sys.exit(1)
try:
PolyaxonClient().experiment_job.logs(
user,
project_name,
_experiment,
_job,
message_handler=get_logs_handler(handle_job_info=True,
show_timestamp=not hide_time))
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get logs for job `{}`.'.format(_job))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
if job:
_job = get_experiment_job_or_local(job)
get_experiment_job_logs()
else:
get_experiment_logs()
@experiment.command()
@click.pass_context
@clean_outputs
def outputs(ctx):
"""Download outputs for experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon experiment -xp 1 outputs
```
"""
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
try:
PolyaxonClient().experiment.download_outputs(user, project_name, _experiment)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not download outputs for experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success('Files downloaded.')
@experiment.command()
@click.pass_context
@clean_outputs
def bookmark(ctx):
"""Bookmark experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon experiment bookmark
```
\b
```bash
$ polyaxon experiment -xp 2 bookmark
```
"""
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
try:
PolyaxonClient().experiment.bookmark(user, project_name, _experiment)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not bookmark experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success("Experiment is bookmarked.")
@experiment.command()
@click.pass_context
@clean_outputs
def unbookmark(ctx):
"""Unbookmark experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon experiment unbookmark
```
\b
```bash
$ polyaxon experiment -xp 2 unbookmark
```
"""
user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),
ctx.obj.get('experiment'))
try:
PolyaxonClient().experiment.unbookmark(user, project_name, _experiment)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not unbookmark experiment `{}`.'.format(_experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
Printer.print_success("Experiment is unbookmarked.")
| [
"polyaxon_cli.utils.formatting.Printer.print_warning",
"polyaxon_cli.utils.log_handler.get_logs_handler",
"click.echo",
"polyaxon_cli.utils.formatting.dict_tabulate",
"sys.exit",
"click.group",
"click.option",
"polyaxon_cli.utils.formatting.list_dicts_to_tabulate",
"polyaxon_cli.client.PolyaxonClient",
"rhea.read",
"polyaxon_cli.utils.formatting.Printer.add_status_color",
"polyaxon_cli.cli.getters.experiment.get_experiment_job_or_local",
"polyaxon_cli.utils.formatting.Printer.print_success",
"polyaxon_cli.utils.cache.cache",
"click.Path",
"polyaxon_cli.utils.formatting.get_meta_response",
"polyaxon_cli.managers.experiment.ExperimentManager.purge",
"polyaxon_cli.utils.validation.validate_tags",
"polyaxon_cli.utils.formatting.Printer.print_header"
] | [((1936, 1949), 'click.group', 'click.group', ([], {}), '()\n', (1947, 1949), False, 'import click\n'), ((1951, 2053), 'click.option', 'click.option', (['"""--project"""', '"""-p"""'], {'type': 'str', 'help': '"""The project name, e.g. \'mnist\' or \'adam/mnist\'."""'}), '(\'--project\', \'-p\', type=str, help=\n "The project name, e.g. \'mnist\' or \'adam/mnist\'.")\n', (1963, 2053), False, 'import click\n'), ((2050, 2129), 'click.option', 'click.option', (['"""--experiment"""', '"""-xp"""'], {'type': 'int', 'help': '"""The experiment id number."""'}), "('--experiment', '-xp', type=int, help='The experiment id number.')\n", (2062, 2129), False, 'import click\n'), ((2407, 2464), 'click.option', 'click.option', (['"""--job"""', '"""-j"""'], {'type': 'int', 'help': '"""The job id."""'}), "('--job', '-j', type=int, help='The job id.')\n", (2419, 2464), False, 'import click\n'), ((6492, 6616), 'click.option', 'click.option', (['"""--name"""'], {'type': 'str', 'help': '"""Name of the experiment, must be unique within the project, could be none."""'}), "('--name', type=str, help=\n 'Name of the experiment, must be unique within the project, could be none.'\n )\n", (6504, 6616), False, 'import click\n'), ((6622, 6700), 'click.option', 'click.option', (['"""--description"""'], {'type': 'str', 'help': '"""Description of the experiment."""'}), "('--description', type=str, help='Description of the experiment.')\n", (6634, 6700), False, 'import click\n'), ((6702, 6795), 'click.option', 'click.option', (['"""--tags"""'], {'type': 'str', 'help': '"""Tags of the experiment, comma separated values."""'}), "('--tags', type=str, help=\n 'Tags of the experiment, comma separated values.')\n", (6714, 6795), False, 'import click\n'), ((8235, 8400), 'click.option', 'click.option', (['"""--yes"""', '"""-y"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""Automatic yes to prompts. Assume "yes" as answer to all prompts and run non-interactively."""'}), '(\'--yes\', \'-y\', is_flag=True, default=False, help=\n \'Automatic yes to prompts. Assume "yes" as answer to all prompts and run non-interactively.\'\n )\n', (8247, 8400), False, 'import click\n'), ((9543, 9655), 'click.option', 'click.option', (['"""--copy"""', '"""-c"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""To copy the experiment before restarting."""'}), "('--copy', '-c', is_flag=True, default=False, help=\n 'To copy the experiment before restarting.')\n", (9555, 9655), False, 'import click\n'), ((9798, 9896), 'click.option', 'click.option', (['"""-u"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""To upload the repo before restarting."""'}), "('-u', is_flag=True, default=False, help=\n 'To upload the repo before restarting.')\n", (9810, 9896), False, 'import click\n'), ((11510, 11606), 'click.option', 'click.option', (['"""-u"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""To upload the repo before resuming."""'}), "('-u', is_flag=True, default=False, help=\n 'To upload the repo before resuming.')\n", (11522, 11606), False, 'import click\n'), ((12796, 12874), 'click.option', 'click.option', (['"""--page"""'], {'type': 'int', 'help': '"""To paginate through the list of jobs."""'}), "('--page', type=int, help='To paginate through the list of jobs.')\n", (12808, 12874), False, 'import click\n'), ((14350, 14407), 'click.option', 'click.option', (['"""--job"""', '"""-j"""'], {'type': 'int', 'help': '"""The job id."""'}), "('--job', '-j', type=int, help='The job id.')\n", (14362, 14407), False, 'import click\n'), ((14409, 14496), 'click.option', 'click.option', (['"""--page"""'], {'type': 'int', 'help': '"""To paginate through the list of statuses."""'}), "('--page', type=int, help=\n 'To paginate through the list of statuses.')\n", (14421, 14496), False, 'import click\n'), ((17906, 17963), 'click.option', 'click.option', (['"""--job"""', '"""-j"""'], {'type': 'int', 'help': '"""The job id."""'}), "('--job', '-j', type=int, help='The job id.')\n", (17918, 17963), False, 'import click\n'), ((17965, 18050), 'click.option', 'click.option', (['"""--gpu"""', '"""-g"""'], {'is_flag': '(True)', 'help': '"""List experiment GPU resources."""'}), "('--gpu', '-g', is_flag=True, help='List experiment GPU resources.'\n )\n", (17977, 18050), False, 'import click\n'), ((20328, 20385), 'click.option', 'click.option', (['"""--job"""', '"""-j"""'], {'type': 'int', 'help': '"""The job id."""'}), "('--job', '-j', type=int, help='The job id.')\n", (20340, 20385), False, 'import click\n'), ((20387, 20457), 'click.option', 'click.option', (['"""--past"""', '"""-p"""'], {'is_flag': '(True)', 'help': '"""Show the past logs."""'}), "('--past', '-p', is_flag=True, help='Show the past logs.')\n", (20399, 20457), False, 'import click\n'), ((20459, 20568), 'click.option', 'click.option', (['"""--follow"""', '"""-f"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""Stream logs after showing past logs."""'}), "('--follow', '-f', is_flag=True, default=False, help=\n 'Stream logs after showing past logs.')\n", (20471, 20568), False, 'import click\n'), ((20579, 20703), 'click.option', 'click.option', (['"""--hide_time"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""Whether or not to hide timestamps from the log stream."""'}), "('--hide_time', is_flag=True, default=False, help=\n 'Whether or not to hide timestamps from the log stream.')\n", (20591, 20703), False, 'import click\n'), ((1838, 1878), 'polyaxon_cli.utils.formatting.Printer.print_header', 'Printer.print_header', (['"""Experiment info:"""'], {}), "('Experiment info:')\n", (1858, 1878), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((7539, 7558), 'polyaxon_cli.utils.validation.validate_tags', 'validate_tags', (['tags'], {}), '(tags)\n', (7552, 7558), False, 'from polyaxon_cli.utils.validation import validate_tags\n'), ((8128, 8172), 'polyaxon_cli.utils.formatting.Printer.print_success', 'Printer.print_success', (['"""Experiment updated."""'], {}), "('Experiment updated.')\n", (8149, 8172), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((9464, 9517), 'polyaxon_cli.utils.formatting.Printer.print_success', 'Printer.print_success', (['"""Experiment is being stopped."""'], {}), "('Experiment is being stopped.')\n", (9485, 9517), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((13722, 13749), 'polyaxon_cli.utils.formatting.get_meta_response', 'get_meta_response', (['response'], {}), '(response)\n', (13739, 13749), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((14149, 14180), 'polyaxon_cli.utils.formatting.list_dicts_to_tabulate', 'list_dicts_to_tabulate', (['objects'], {}), '(objects)\n', (14171, 14180), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((25313, 25355), 'polyaxon_cli.utils.formatting.Printer.print_success', 'Printer.print_success', (['"""Files downloaded."""'], {}), "('Files downloaded.')\n", (25334, 25355), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((26210, 26260), 'polyaxon_cli.utils.formatting.Printer.print_success', 'Printer.print_success', (['"""Experiment is bookmarked."""'], {}), "('Experiment is bookmarked.')\n", (26231, 26260), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((27127, 27179), 'polyaxon_cli.utils.formatting.Printer.print_success', 'Printer.print_success', (['"""Experiment is unbookmarked."""'], {}), "('Experiment is unbookmarked.')\n", (27148, 27179), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((1087, 1134), 'polyaxon_cli.utils.formatting.Printer.print_header', 'Printer.print_header', (['"""Experiment description:"""'], {}), "('Experiment description:')\n", (1107, 1134), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((1350, 1398), 'polyaxon_cli.utils.formatting.Printer.print_header', 'Printer.print_header', (['"""Experiment declarations:"""'], {}), "('Experiment declarations:')\n", (1370, 1398), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((1407, 1445), 'polyaxon_cli.utils.formatting.dict_tabulate', 'dict_tabulate', (['experiment.declarations'], {}), '(experiment.declarations)\n', (1420, 1445), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((1486, 1534), 'polyaxon_cli.utils.formatting.Printer.print_header', 'Printer.print_header', (['"""Experiment last metrics:"""'], {}), "('Experiment last metrics:')\n", (1506, 1534), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((1543, 1580), 'polyaxon_cli.utils.formatting.dict_tabulate', 'dict_tabulate', (['experiment.last_metric'], {}), '(experiment.last_metric)\n', (1556, 1580), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((1897, 1931), 'polyaxon_cli.utils.formatting.Printer.add_status_color', 'Printer.add_status_color', (['response'], {}), '(response)\n', (1921, 1931), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((4918, 4951), 'polyaxon_cli.utils.formatting.Printer.print_header', 'Printer.print_header', (['"""Job info:"""'], {}), "('Job info:')\n", (4938, 4951), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((4960, 4983), 'polyaxon_cli.utils.formatting.dict_tabulate', 'dict_tabulate', (['response'], {}), '(response)\n', (4973, 4983), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((5204, 5236), 'polyaxon_cli.cli.getters.experiment.get_experiment_job_or_local', 'get_experiment_job_or_local', (['job'], {}), '(job)\n', (5231, 5236), False, 'from polyaxon_cli.cli.getters.experiment import get_experiment_job_or_local, get_project_experiment_or_local\n'), ((5833, 5884), 'click.echo', 'click.echo', (['"""Existing without deleting experiment."""'], {}), "('Existing without deleting experiment.')\n", (5843, 5884), False, 'import click\n'), ((5893, 5904), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5901, 5904), False, 'import sys\n'), ((6058, 6083), 'polyaxon_cli.managers.experiment.ExperimentManager.purge', 'ExperimentManager.purge', ([], {}), '()\n', (6081, 6083), False, 'from polyaxon_cli.managers.experiment import ExperimentManager\n'), ((7640, 7715), 'polyaxon_cli.utils.formatting.Printer.print_warning', 'Printer.print_warning', (['"""No argument was provided to update the experiment."""'], {}), "('No argument was provided to update the experiment.')\n", (7661, 7715), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((7724, 7735), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (7732, 7735), False, 'import sys\n'), ((9052, 9103), 'click.echo', 'click.echo', (['"""Existing without stopping experiment."""'], {}), "('Existing without stopping experiment.')\n", (9062, 9103), False, 'import click\n'), ((9112, 9123), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (9120, 9123), False, 'import sys\n'), ((10263, 10278), 'rhea.read', 'rhea.read', (['file'], {}), '(file)\n', (10272, 10278), False, 'import rhea\n'), ((9715, 9738), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (9725, 9738), False, 'import click\n'), ((11964, 11979), 'rhea.read', 'rhea.read', (['file'], {}), '(file)\n', (11973, 11979), False, 'import rhea\n'), ((11427, 11450), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (11437, 11450), False, 'import click\n'), ((13849, 13884), 'polyaxon_cli.utils.formatting.Printer.print_header', 'Printer.print_header', (['"""Navigation:"""'], {}), "('Navigation:')\n", (13869, 13884), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((13893, 13912), 'polyaxon_cli.utils.formatting.dict_tabulate', 'dict_tabulate', (['meta'], {}), '(meta)\n', (13906, 13912), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((14205, 14234), 'polyaxon_cli.utils.formatting.Printer.print_header', 'Printer.print_header', (['"""Jobs:"""'], {}), "('Jobs:')\n", (14225, 14234), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((14283, 14324), 'polyaxon_cli.utils.formatting.dict_tabulate', 'dict_tabulate', (['objects'], {'is_list_dict': '(True)'}), '(objects, is_list_dict=True)\n', (14296, 14324), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((15500, 15527), 'polyaxon_cli.utils.formatting.get_meta_response', 'get_meta_response', (['response'], {}), '(response)\n', (15517, 15527), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((16890, 16917), 'polyaxon_cli.utils.formatting.get_meta_response', 'get_meta_response', (['response'], {}), '(response)\n', (16907, 16917), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((17766, 17798), 'polyaxon_cli.cli.getters.experiment.get_experiment_job_or_local', 'get_experiment_job_or_local', (['job'], {}), '(job)\n', (17793, 17798), False, 'from polyaxon_cli.cli.getters.experiment import get_experiment_job_or_local, get_project_experiment_or_local\n'), ((20186, 20218), 'polyaxon_cli.cli.getters.experiment.get_experiment_job_or_local', 'get_experiment_job_or_local', (['job'], {}), '(job)\n', (20213, 20218), False, 'from polyaxon_cli.cli.getters.experiment import get_experiment_job_or_local, get_project_experiment_or_local\n'), ((24386, 24418), 'polyaxon_cli.cli.getters.experiment.get_experiment_job_or_local', 'get_experiment_job_or_local', (['job'], {}), '(job)\n', (24413, 24418), False, 'from polyaxon_cli.cli.getters.experiment import get_experiment_job_or_local, get_project_experiment_or_local\n'), ((3546, 3610), 'polyaxon_cli.utils.cache.cache', 'cache.cache', ([], {'config_manager': 'ExperimentManager', 'response': 'response'}), '(config_manager=ExperimentManager, response=response)\n', (3557, 3610), False, 'from polyaxon_cli.utils import cache\n'), ((4272, 4339), 'polyaxon_cli.utils.cache.cache', 'cache.cache', ([], {'config_manager': 'ExperimentJobManager', 'response': 'response'}), '(config_manager=ExperimentJobManager, response=response)\n', (4283, 4339), False, 'from polyaxon_cli.utils import cache\n'), ((6325, 6336), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6333, 6336), False, 'import sys\n'), ((8111, 8122), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8119, 8122), False, 'import sys\n'), ((9447, 9458), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (9455, 9458), False, 'import sys\n'), ((11341, 11352), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (11349, 11352), False, 'import sys\n'), ((12759, 12770), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (12767, 12770), False, 'import sys\n'), ((13698, 13709), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (13706, 13709), False, 'import sys\n'), ((15643, 15678), 'polyaxon_cli.utils.formatting.Printer.print_header', 'Printer.print_header', (['"""Navigation:"""'], {}), "('Navigation:')\n", (15663, 15678), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((15691, 15710), 'polyaxon_cli.utils.formatting.dict_tabulate', 'dict_tabulate', (['meta'], {}), '(meta)\n', (15704, 15710), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((16037, 16070), 'polyaxon_cli.utils.formatting.Printer.print_header', 'Printer.print_header', (['"""Statuses:"""'], {}), "('Statuses:')\n", (16057, 16070), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((16127, 16168), 'polyaxon_cli.utils.formatting.dict_tabulate', 'dict_tabulate', (['objects'], {'is_list_dict': '(True)'}), '(objects, is_list_dict=True)\n', (16140, 16168), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((17019, 17054), 'polyaxon_cli.utils.formatting.Printer.print_header', 'Printer.print_header', (['"""Navigation:"""'], {}), "('Navigation:')\n", (17039, 17054), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((17067, 17086), 'polyaxon_cli.utils.formatting.dict_tabulate', 'dict_tabulate', (['meta'], {}), '(meta)\n', (17080, 17086), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((17399, 17432), 'polyaxon_cli.utils.formatting.Printer.print_header', 'Printer.print_header', (['"""Statuses:"""'], {}), "('Statuses:')\n", (17419, 17432), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((17482, 17523), 'polyaxon_cli.utils.formatting.dict_tabulate', 'dict_tabulate', (['objects'], {'is_list_dict': '(True)'}), '(objects, is_list_dict=True)\n', (17495, 17523), False, 'from polyaxon_cli.utils.formatting import Printer, dict_tabulate, get_meta_response, get_resources, list_dicts_to_tabulate\n'), ((25297, 25308), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (25305, 25308), False, 'import sys\n'), ((26193, 26204), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (26201, 26204), False, 'import sys\n'), ((27110, 27121), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (27118, 27121), False, 'import sys\n'), ((3871, 3882), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3879, 3882), False, 'import sys\n'), ((4580, 4591), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4588, 4591), False, 'import sys\n'), ((15472, 15483), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (15480, 15483), False, 'import sys\n'), ((16862, 16873), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (16870, 16873), False, 'import sys\n'), ((19210, 19221), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (19218, 19221), False, 'import sys\n'), ((19954, 19965), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (19962, 19965), False, 'import sys\n'), ((22616, 22627), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (22624, 22627), False, 'import sys\n'), ((24154, 24165), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (24162, 24165), False, 'import sys\n'), ((5934, 5950), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (5948, 5950), False, 'from polyaxon_cli.client import PolyaxonClient\n'), ((7765, 7781), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (7779, 7781), False, 'from polyaxon_cli.client import PolyaxonClient\n'), ((9142, 9158), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (9156, 9158), False, 'from polyaxon_cli.client import PolyaxonClient\n'), ((12310, 12326), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (12324, 12326), False, 'from polyaxon_cli.client import PolyaxonClient\n'), ((13356, 13372), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (13370, 13372), False, 'from polyaxon_cli.client import PolyaxonClient\n'), ((21406, 21493), 'polyaxon_cli.utils.log_handler.get_logs_handler', 'get_logs_handler', ([], {'handle_job_info': '(True)', 'show_timestamp': '(not hide_time)', 'stream': '(False)'}), '(handle_job_info=True, show_timestamp=not hide_time, stream\n =False)\n', (21422, 21493), False, 'from polyaxon_cli.utils.log_handler import get_logs_handler\n'), ((22234, 22302), 'polyaxon_cli.utils.log_handler.get_logs_handler', 'get_logs_handler', ([], {'handle_job_info': '(True)', 'show_timestamp': '(not hide_time)'}), '(handle_job_info=True, show_timestamp=not hide_time)\n', (22250, 22302), False, 'from polyaxon_cli.utils.log_handler import get_logs_handler\n'), ((22932, 23019), 'polyaxon_cli.utils.log_handler.get_logs_handler', 'get_logs_handler', ([], {'handle_job_info': '(True)', 'show_timestamp': '(not hide_time)', 'stream': '(False)'}), '(handle_job_info=True, show_timestamp=not hide_time, stream\n =False)\n', (22948, 23019), False, 'from polyaxon_cli.utils.log_handler import get_logs_handler\n'), ((23786, 23854), 'polyaxon_cli.utils.log_handler.get_logs_handler', 'get_logs_handler', ([], {'handle_job_info': '(True)', 'show_timestamp': '(not hide_time)'}), '(handle_job_info=True, show_timestamp=not hide_time)\n', (23802, 23854), False, 'from polyaxon_cli.utils.log_handler import get_logs_handler\n'), ((24964, 24980), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (24978, 24980), False, 'from polyaxon_cli.client import PolyaxonClient\n'), ((25880, 25896), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (25894, 25896), False, 'from polyaxon_cli.client import PolyaxonClient\n'), ((26793, 26809), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (26807, 26809), False, 'from polyaxon_cli.client import PolyaxonClient\n'), ((3458, 3474), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (3472, 3474), False, 'from polyaxon_cli.client import PolyaxonClient\n'), ((3992, 4008), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (4006, 4008), False, 'from polyaxon_cli.client import PolyaxonClient\n'), ((10630, 10646), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (10644, 10646), False, 'from polyaxon_cli.client import PolyaxonClient\n'), ((10880, 10896), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (10894, 10896), False, 'from polyaxon_cli.client import PolyaxonClient\n'), ((15109, 15125), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (15123, 15125), False, 'from polyaxon_cli.client import PolyaxonClient\n'), ((16245, 16261), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (16259, 16261), False, 'from polyaxon_cli.client import PolyaxonClient\n'), ((18821, 18837), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (18835, 18837), False, 'from polyaxon_cli.client import PolyaxonClient\n'), ((19370, 19386), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (19384, 19386), False, 'from polyaxon_cli.client import PolyaxonClient\n'), ((22049, 22060), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (22057, 22060), False, 'import sys\n'), ((22087, 22103), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (22101, 22103), False, 'from polyaxon_cli.client import PolyaxonClient\n'), ((23575, 23586), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (23583, 23586), False, 'import sys\n'), ((23613, 23629), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (23627, 23629), False, 'from polyaxon_cli.client import PolyaxonClient\n'), ((21289, 21305), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (21303, 21305), False, 'from polyaxon_cli.client import PolyaxonClient\n'), ((22725, 22741), 'polyaxon_cli.client.PolyaxonClient', 'PolyaxonClient', ([], {}), '()\n', (22739, 22741), False, 'from polyaxon_cli.client import PolyaxonClient\n')] |
from bs4 import BeautifulSoup
import requests
import json
import datetime
import codecs
import re
featHolder = {}
featHolder['name'] = 'Pathfinder 2.0 Ancestry feat list'
featHolder['date'] = datetime.date.today().strftime("%B %d, %Y")
def get_details(link):
res = requests.get(link)
res.raise_for_status()
soup = BeautifulSoup(res.text, 'lxml')
feat = soup.find_all("div", {'class':'main'})
detailraw = soup.find("meta", {'name':'description'})['content'] #First we grab the content from the meta tag
detailsplit = re.split('<(.*?)>', detailraw) #Now we split it into groups of strings seperated by < >, to pull out any links
detail = ''.join(detailsplit[::2]) #Finally, we join every other group together (passing over the link groups) into one string
#print(detail)
return detail
def get_feats(link):
feats = []
res = requests.get(link)
res.raise_for_status()
soup = BeautifulSoup(res.text, 'lxml')
table = soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']=="ctl00_MainContent_TableElement")
rows = table.findAll(lambda tag: tag.name=='tr')
t = 0
for row in rows:
t += 1
#print(row)
#print("-----------------------------------")
feat = {}
entries = row.find_all(lambda tag: tag.name=='td')
if entries is not None:
if len(entries) > 0:
name = entries[0].find("a").next_sibling.text #We do next_sibling here because the source puts PFS links first, which we want to skip over.
link = entries[0].find("a").next_sibling.a['href']
#for entry in entries:
# print(entry)
# print("row---------------")
level = entries[1].text
traits = entries[2].text
prereq = entries[3].text
source = entries[4].text
feat['name'] = name
feat['level'] = level
feat['traits'] = traits.split(", ")
feat['link'] = "https://2e.aonprd.com/" +link
feat['prereq'] = prereq
feat['benefits'] = source
details = get_details(feat['link'])
feat['text'] = details
feats.append(feat)
#if t > 5:
#break
return feats
listOfPages = codecs.open("ancestryFeats.csv", encoding='utf-8')
for line in listOfPages:
featMD = line.split(",")
print("Getting feats for :", featMD[0],"This url:", featMD[2])
featHolder[featMD[1]] = get_feats(featMD[2].strip('\n'))
json_data = json.dumps(featHolder, indent=4)
#print(json_data)
filename = "ancestry-feats-pf2.json"
f = open(filename, "w")
f.write(json_data)
f.close
| [
"re.split",
"json.dumps",
"datetime.date.today",
"requests.get",
"bs4.BeautifulSoup",
"codecs.open"
] | [((2384, 2434), 'codecs.open', 'codecs.open', (['"""ancestryFeats.csv"""'], {'encoding': '"""utf-8"""'}), "('ancestryFeats.csv', encoding='utf-8')\n", (2395, 2434), False, 'import codecs\n'), ((2632, 2664), 'json.dumps', 'json.dumps', (['featHolder'], {'indent': '(4)'}), '(featHolder, indent=4)\n', (2642, 2664), False, 'import json\n'), ((273, 291), 'requests.get', 'requests.get', (['link'], {}), '(link)\n', (285, 291), False, 'import requests\n'), ((330, 361), 'bs4.BeautifulSoup', 'BeautifulSoup', (['res.text', '"""lxml"""'], {}), "(res.text, 'lxml')\n", (343, 361), False, 'from bs4 import BeautifulSoup\n'), ((544, 574), 're.split', 're.split', (['"""<(.*?)>"""', 'detailraw'], {}), "('<(.*?)>', detailraw)\n", (552, 574), False, 'import re\n'), ((880, 898), 'requests.get', 'requests.get', (['link'], {}), '(link)\n', (892, 898), False, 'import requests\n'), ((937, 968), 'bs4.BeautifulSoup', 'BeautifulSoup', (['res.text', '"""lxml"""'], {}), "(res.text, 'lxml')\n", (950, 968), False, 'from bs4 import BeautifulSoup\n'), ((193, 214), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (212, 214), False, 'import datetime\n')] |
import unittest
import scrape
class TestScrapeFunctions(unittest.TestCase):
def test_build_url(self):
url = scrape.build_url("indeed",
"/jobs?q=Data+Scientist&l=Texas&start=10",
join_next=True)
expected = ("https://www.indeed.com/"
"jobs?q=Data+Scientist&l=Texas&start=10")
url2 = scrape.build_url("indeed", job="Data Scientist", state="Texas")
expected2 = ("https://www.indeed.com/"
"jobs?q=Data%20Scientist&l=Texas&start=0")
self.assertEqual(url, expected)
self.assertEqual(url2, expected2)
def test_fetch_page(self):
fpl = scrape.fetch_page_listings
job_data = fpl("indeed",
job="Data Scientist",
state="Texas")
self.assertNotEqual(len(job_data), 0)
self.assertIsInstance(job_data, tuple)
self.assertIsInstance(job_data[0][0], dict)
self.assertIsInstance(job_data[1], str)
job_data = fpl("indeed",
next_page="/jobs?q=Data+Scientist"
"&l=Texas&start=10")
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"scrape.build_url"
] | [((1212, 1227), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1225, 1227), False, 'import unittest\n'), ((123, 212), 'scrape.build_url', 'scrape.build_url', (['"""indeed"""', '"""/jobs?q=Data+Scientist&l=Texas&start=10"""'], {'join_next': '(True)'}), "('indeed', '/jobs?q=Data+Scientist&l=Texas&start=10',\n join_next=True)\n", (139, 212), False, 'import scrape\n'), ((395, 458), 'scrape.build_url', 'scrape.build_url', (['"""indeed"""'], {'job': '"""Data Scientist"""', 'state': '"""Texas"""'}), "('indeed', job='Data Scientist', state='Texas')\n", (411, 458), False, 'import scrape\n')] |
import random
# Create a deck of cards
deck = [x for x in range(52)]
# Create suits and ranks lists
suits = ["Spades", "Hearts", "Diamonds", "Clubs"]
ranks = ["Ace", "2", "3", "4", "5", "6", "7", "8", "9",
"10", "Jack", "Queen", "King"]
# Shuffle the cards
random.shuffle(deck)
# Display the first four cards
for i in range(4):
suit = suits[deck[i] // 13]
rank = ranks[deck[i] % 13]
print("Card number", deck[i], "is the", rank, "of", suit)
| [
"random.shuffle"
] | [((269, 289), 'random.shuffle', 'random.shuffle', (['deck'], {}), '(deck)\n', (283, 289), False, 'import random\n')] |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.automl.v1beta1 AutoMl API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.operation
import google.api_core.operations_v1
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from google.cloud.automl_v1beta1.gapic import auto_ml_client_config
from google.cloud.automl_v1beta1.gapic import enums
from google.cloud.automl_v1beta1.gapic.transports import auto_ml_grpc_transport
from google.cloud.automl_v1beta1.proto import data_items_pb2
from google.cloud.automl_v1beta1.proto import dataset_pb2
from google.cloud.automl_v1beta1.proto import io_pb2
from google.cloud.automl_v1beta1.proto import model_evaluation_pb2
from google.cloud.automl_v1beta1.proto import model_pb2
from google.cloud.automl_v1beta1.proto import operations_pb2 as proto_operations_pb2
from google.cloud.automl_v1beta1.proto import prediction_service_pb2
from google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc
from google.cloud.automl_v1beta1.proto import service_pb2
from google.cloud.automl_v1beta1.proto import service_pb2_grpc
from google.longrunning import operations_pb2 as longrunning_operations_pb2
from google.protobuf import empty_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl").version
class AutoMlClient(object):
"""
AutoML Server API.
The resource names are assigned by the server. The server never reuses
names that it has created after the resources with those names are
deleted.
An ID of a resource is the last element of the item's resource name. For
``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}``,
then the id for the item is ``{dataset_id}``.
"""
SERVICE_ADDRESS = "automl.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.automl.v1beta1.AutoMl"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AutoMlClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def location_path(cls, project, location):
"""Return a fully-qualified location string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}",
project=project,
location=location,
)
@classmethod
def dataset_path(cls, project, location, dataset):
"""Return a fully-qualified dataset string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/datasets/{dataset}",
project=project,
location=location,
dataset=dataset,
)
@classmethod
def model_path(cls, project, location, model):
"""Return a fully-qualified model string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/models/{model}",
project=project,
location=location,
model=model,
)
@classmethod
def model_evaluation_path(cls, project, location, model, model_evaluation):
"""Return a fully-qualified model_evaluation string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}",
project=project,
location=location,
model=model,
model_evaluation=model_evaluation,
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
):
"""Constructor.
Args:
transport (Union[~.AutoMlGrpcTransport,
Callable[[~.Credentials, type], ~.AutoMlGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = auto_ml_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=auto_ml_grpc_transport.AutoMlGrpcTransport,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = auto_ml_grpc_transport.AutoMlGrpcTransport(
address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def create_dataset(
self,
parent,
dataset,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a dataset.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # TODO: Initialize `dataset`:
>>> dataset = {}
>>>
>>> response = client.create_dataset(parent, dataset)
Args:
parent (str): The resource name of the project to create the dataset for.
dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): The dataset to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.automl_v1beta1.types.Dataset`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_dataset" not in self._inner_api_calls:
self._inner_api_calls[
"create_dataset"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_dataset,
default_retry=self._method_configs["CreateDataset"].retry,
default_timeout=self._method_configs["CreateDataset"].timeout,
client_info=self._client_info,
)
request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset)
return self._inner_api_calls["create_dataset"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_dataset(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets a dataset.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]')
>>>
>>> response = client.get_dataset(name)
Args:
name (str): The resource name of the dataset to retrieve.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_dataset" not in self._inner_api_calls:
self._inner_api_calls[
"get_dataset"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_dataset,
default_retry=self._method_configs["GetDataset"].retry,
default_timeout=self._method_configs["GetDataset"].timeout,
client_info=self._client_info,
)
request = service_pb2.GetDatasetRequest(name=name)
return self._inner_api_calls["get_dataset"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_datasets(
self,
parent,
filter_=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists datasets in a project.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # Iterate over all results
>>> for element in client.list_datasets(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_datasets(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): The resource name of the project from which to list datasets.
filter_ (str): An expression for filtering the results of the request.
- ``dataset_metadata`` - for existence of the case.
An example of using the filter is:
- ``translation_dataset_metadata:*`` --> The dataset has
translation\_dataset\_metadata.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.automl_v1beta1.types.Dataset` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_datasets" not in self._inner_api_calls:
self._inner_api_calls[
"list_datasets"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_datasets,
default_retry=self._method_configs["ListDatasets"].retry,
default_timeout=self._method_configs["ListDatasets"].timeout,
client_info=self._client_info,
)
request = service_pb2.ListDatasetsRequest(
parent=parent, filter=filter_, page_size=page_size
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_datasets"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="datasets",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def delete_dataset(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes a dataset and all of its contents. Returns empty response in the
``response`` field when it completes, and ``delete_details`` in the
``metadata`` field.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]')
>>>
>>> response = client.delete_dataset(name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): The resource name of the dataset to delete.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_dataset" not in self._inner_api_calls:
self._inner_api_calls[
"delete_dataset"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_dataset,
default_retry=self._method_configs["DeleteDataset"].retry,
default_timeout=self._method_configs["DeleteDataset"].timeout,
client_info=self._client_info,
)
request = service_pb2.DeleteDatasetRequest(name=name)
operation = self._inner_api_calls["delete_dataset"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=proto_operations_pb2.OperationMetadata,
)
def import_data(
self,
name,
input_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Imports data into a dataset. Returns an empty response in the
``response`` field when it completes.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]')
>>>
>>> # TODO: Initialize `input_config`:
>>> input_config = {}
>>>
>>> response = client.import_data(name, input_config)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): Required. Dataset name. Dataset must already exist. All imported
annotations and examples will be added.
input_config (Union[dict, ~google.cloud.automl_v1beta1.types.InputConfig]): Required. The desired input location.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.automl_v1beta1.types.InputConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "import_data" not in self._inner_api_calls:
self._inner_api_calls[
"import_data"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.import_data,
default_retry=self._method_configs["ImportData"].retry,
default_timeout=self._method_configs["ImportData"].timeout,
client_info=self._client_info,
)
request = service_pb2.ImportDataRequest(name=name, input_config=input_config)
operation = self._inner_api_calls["import_data"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=proto_operations_pb2.OperationMetadata,
)
def export_data(
self,
name,
output_config,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Exports dataset's data to a Google Cloud Storage bucket. Returns an
empty response in the ``response`` field when it completes.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]')
>>>
>>> # TODO: Initialize `output_config`:
>>> output_config = {}
>>>
>>> response = client.export_data(name, output_config)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): Required. The resource name of the dataset.
output_config (Union[dict, ~google.cloud.automl_v1beta1.types.OutputConfig]): Required. The desired output location.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.automl_v1beta1.types.OutputConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "export_data" not in self._inner_api_calls:
self._inner_api_calls[
"export_data"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.export_data,
default_retry=self._method_configs["ExportData"].retry,
default_timeout=self._method_configs["ExportData"].timeout,
client_info=self._client_info,
)
request = service_pb2.ExportDataRequest(name=name, output_config=output_config)
operation = self._inner_api_calls["export_data"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=proto_operations_pb2.OperationMetadata,
)
def create_model(
self,
parent,
model,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a model. Returns a Model in the ``response`` field when it
completes. When you create a model, several model evaluations are
created for it: a global evaluation, and one evaluation for each
annotation spec.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # TODO: Initialize `model`:
>>> model = {}
>>>
>>> response = client.create_model(parent, model)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Resource name of the parent project where the model is being created.
model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]): The model to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.automl_v1beta1.types.Model`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_model" not in self._inner_api_calls:
self._inner_api_calls[
"create_model"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_model,
default_retry=self._method_configs["CreateModel"].retry,
default_timeout=self._method_configs["CreateModel"].timeout,
client_info=self._client_info,
)
request = service_pb2.CreateModelRequest(parent=parent, model=model)
operation = self._inner_api_calls["create_model"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
model_pb2.Model,
metadata_type=proto_operations_pb2.OperationMetadata,
)
def get_model(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets a model.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]')
>>>
>>> response = client.get_model(name)
Args:
name (str): Resource name of the model.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Model` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_model" not in self._inner_api_calls:
self._inner_api_calls[
"get_model"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_model,
default_retry=self._method_configs["GetModel"].retry,
default_timeout=self._method_configs["GetModel"].timeout,
client_info=self._client_info,
)
request = service_pb2.GetModelRequest(name=name)
return self._inner_api_calls["get_model"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_models(
self,
parent,
filter_=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists models.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> parent = client.location_path('[PROJECT]', '[LOCATION]')
>>>
>>> # Iterate over all results
>>> for element in client.list_models(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_models(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Resource name of the project, from which to list the models.
filter_ (str): An expression for filtering the results of the request.
- ``model_metadata`` - for existence of the case.
- ``dataset_id`` - for = or !=.
Some examples of using the filter are:
- ``image_classification_model_metadata:*`` --> The model has
image\_classification\_model\_metadata.
- ``dataset_id=5`` --> The model was created from a sibling dataset
with ID 5.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.automl_v1beta1.types.Model` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_models" not in self._inner_api_calls:
self._inner_api_calls[
"list_models"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_models,
default_retry=self._method_configs["ListModels"].retry,
default_timeout=self._method_configs["ListModels"].timeout,
client_info=self._client_info,
)
request = service_pb2.ListModelsRequest(
parent=parent, filter=filter_, page_size=page_size
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_models"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="model",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def delete_model(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes a model. If a model is already deployed, this only deletes the
model in AutoML BE, and does not change the status of the deployed model
in the production environment. Returns ``google.protobuf.Empty`` in the
``response`` field when it completes, and ``delete_details`` in the
``metadata`` field.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]')
>>>
>>> response = client.delete_model(name)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
name (str): Resource name of the model being deleted.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_model" not in self._inner_api_calls:
self._inner_api_calls[
"delete_model"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_model,
default_retry=self._method_configs["DeleteModel"].retry,
default_timeout=self._method_configs["DeleteModel"].timeout,
client_info=self._client_info,
)
request = service_pb2.DeleteModelRequest(name=name)
operation = self._inner_api_calls["delete_model"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
empty_pb2.Empty,
metadata_type=proto_operations_pb2.OperationMetadata,
)
def deploy_model(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deploys model. Returns a ``DeployModelResponse`` in the ``response``
field when it completes.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]')
>>>
>>> response = client.deploy_model(name)
Args:
name (str): Resource name of the model to deploy.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "deploy_model" not in self._inner_api_calls:
self._inner_api_calls[
"deploy_model"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.deploy_model,
default_retry=self._method_configs["DeployModel"].retry,
default_timeout=self._method_configs["DeployModel"].timeout,
client_info=self._client_info,
)
request = service_pb2.DeployModelRequest(name=name)
return self._inner_api_calls["deploy_model"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def undeploy_model(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Undeploys model. Returns an ``UndeployModelResponse`` in the
``response`` field when it completes.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]')
>>>
>>> response = client.undeploy_model(name)
Args:
name (str): Resource name of the model to undeploy.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.Operation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "undeploy_model" not in self._inner_api_calls:
self._inner_api_calls[
"undeploy_model"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.undeploy_model,
default_retry=self._method_configs["UndeployModel"].retry,
default_timeout=self._method_configs["UndeployModel"].timeout,
client_info=self._client_info,
)
request = service_pb2.UndeployModelRequest(name=name)
return self._inner_api_calls["undeploy_model"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_model_evaluation(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets a model evaluation.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> name = client.model_evaluation_path('[PROJECT]', '[LOCATION]', '[MODEL]', '[MODEL_EVALUATION]')
>>>
>>> response = client.get_model_evaluation(name)
Args:
name (str): Resource name for the model evaluation.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_model_evaluation" not in self._inner_api_calls:
self._inner_api_calls[
"get_model_evaluation"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_model_evaluation,
default_retry=self._method_configs["GetModelEvaluation"].retry,
default_timeout=self._method_configs["GetModelEvaluation"].timeout,
client_info=self._client_info,
)
request = service_pb2.GetModelEvaluationRequest(name=name)
return self._inner_api_calls["get_model_evaluation"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_model_evaluations(
self,
parent,
filter_=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists model evaluations.
Example:
>>> from google.cloud import automl_v1beta1
>>>
>>> client = automl_v1beta1.AutoMlClient()
>>>
>>> parent = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]')
>>>
>>> # Iterate over all results
>>> for element in client.list_model_evaluations(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_model_evaluations(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Resource name of the model to list the model evaluations for.
If modelId is set as "-", this will list model evaluations from across all
models of the parent location.
filter_ (str): An expression for filtering the results of the request.
- ``annotation_spec_id`` - for =, != or existence. See example below
for the last.
Some examples of using the filter are:
- ``annotation_spec_id!=4`` --> The model evaluation was done for
annotation spec with ID different than 4.
- ``NOT annotation_spec_id:*`` --> The model evaluation was done for
aggregate of all annotation specs.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_model_evaluations" not in self._inner_api_calls:
self._inner_api_calls[
"list_model_evaluations"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_model_evaluations,
default_retry=self._method_configs["ListModelEvaluations"].retry,
default_timeout=self._method_configs["ListModelEvaluations"].timeout,
client_info=self._client_info,
)
request = service_pb2.ListModelEvaluationsRequest(
parent=parent, filter=filter_, page_size=page_size
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_model_evaluations"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="model_evaluation",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
| [
"google.cloud.automl_v1beta1.proto.service_pb2.UndeployModelRequest",
"google.cloud.automl_v1beta1.proto.service_pb2.GetModelEvaluationRequest",
"google.cloud.automl_v1beta1.proto.service_pb2.ListDatasetsRequest",
"google.oauth2.service_account.Credentials.from_service_account_file",
"google.cloud.automl_v1beta1.proto.service_pb2.CreateDatasetRequest",
"google.cloud.automl_v1beta1.proto.service_pb2.CreateModelRequest",
"google.cloud.automl_v1beta1.proto.service_pb2.ListModelsRequest",
"google.cloud.automl_v1beta1.proto.service_pb2.GetDatasetRequest",
"google.cloud.automl_v1beta1.proto.service_pb2.ExportDataRequest",
"warnings.warn",
"google.cloud.automl_v1beta1.proto.service_pb2.DeleteDatasetRequest",
"google.cloud.automl_v1beta1.proto.service_pb2.DeployModelRequest",
"pkg_resources.get_distribution",
"google.cloud.automl_v1beta1.proto.service_pb2.GetModelRequest",
"google.cloud.automl_v1beta1.proto.service_pb2.ImportDataRequest",
"google.cloud.automl_v1beta1.gapic.transports.auto_ml_grpc_transport.AutoMlGrpcTransport",
"functools.partial",
"google.cloud.automl_v1beta1.proto.service_pb2.DeleteModelRequest",
"google.cloud.automl_v1beta1.proto.service_pb2.ListModelEvaluationsRequest"
] | [((2057, 2110), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['"""google-cloud-automl"""'], {}), "('google-cloud-automl')\n", (2087, 2110), False, 'import pkg_resources\n'), ((3380, 3443), 'google.oauth2.service_account.Credentials.from_service_account_file', 'service_account.Credentials.from_service_account_file', (['filename'], {}), '(filename)\n', (3433, 3443), False, 'from google.oauth2 import service_account\n'), ((11996, 12060), 'google.cloud.automl_v1beta1.proto.service_pb2.CreateDatasetRequest', 'service_pb2.CreateDatasetRequest', ([], {'parent': 'parent', 'dataset': 'dataset'}), '(parent=parent, dataset=dataset)\n', (12028, 12060), False, 'from google.cloud.automl_v1beta1.proto import service_pb2\n'), ((14322, 14362), 'google.cloud.automl_v1beta1.proto.service_pb2.GetDatasetRequest', 'service_pb2.GetDatasetRequest', ([], {'name': 'name'}), '(name=name)\n', (14351, 14362), False, 'from google.cloud.automl_v1beta1.proto import service_pb2\n'), ((18051, 18139), 'google.cloud.automl_v1beta1.proto.service_pb2.ListDatasetsRequest', 'service_pb2.ListDatasetsRequest', ([], {'parent': 'parent', 'filter': 'filter_', 'page_size': 'page_size'}), '(parent=parent, filter=filter_, page_size=\n page_size)\n', (18082, 18139), False, 'from google.cloud.automl_v1beta1.proto import service_pb2\n'), ((21286, 21329), 'google.cloud.automl_v1beta1.proto.service_pb2.DeleteDatasetRequest', 'service_pb2.DeleteDatasetRequest', ([], {'name': 'name'}), '(name=name)\n', (21318, 21329), False, 'from google.cloud.automl_v1beta1.proto import service_pb2\n'), ((24750, 24817), 'google.cloud.automl_v1beta1.proto.service_pb2.ImportDataRequest', 'service_pb2.ImportDataRequest', ([], {'name': 'name', 'input_config': 'input_config'}), '(name=name, input_config=input_config)\n', (24779, 24817), False, 'from google.cloud.automl_v1beta1.proto import service_pb2\n'), ((28194, 28263), 'google.cloud.automl_v1beta1.proto.service_pb2.ExportDataRequest', 'service_pb2.ExportDataRequest', ([], {'name': 'name', 'output_config': 'output_config'}), '(name=name, output_config=output_config)\n', (28223, 28263), False, 'from google.cloud.automl_v1beta1.proto import service_pb2\n'), ((31700, 31758), 'google.cloud.automl_v1beta1.proto.service_pb2.CreateModelRequest', 'service_pb2.CreateModelRequest', ([], {'parent': 'parent', 'model': 'model'}), '(parent=parent, model=model)\n', (31730, 31758), False, 'from google.cloud.automl_v1beta1.proto import service_pb2\n'), ((34211, 34249), 'google.cloud.automl_v1beta1.proto.service_pb2.GetModelRequest', 'service_pb2.GetModelRequest', ([], {'name': 'name'}), '(name=name)\n', (34238, 34249), False, 'from google.cloud.automl_v1beta1.proto import service_pb2\n'), ((38081, 38167), 'google.cloud.automl_v1beta1.proto.service_pb2.ListModelsRequest', 'service_pb2.ListModelsRequest', ([], {'parent': 'parent', 'filter': 'filter_', 'page_size': 'page_size'}), '(parent=parent, filter=filter_, page_size=\n page_size)\n', (38110, 38167), False, 'from google.cloud.automl_v1beta1.proto import service_pb2\n'), ((41448, 41489), 'google.cloud.automl_v1beta1.proto.service_pb2.DeleteModelRequest', 'service_pb2.DeleteModelRequest', ([], {'name': 'name'}), '(name=name)\n', (41478, 41489), False, 'from google.cloud.automl_v1beta1.proto import service_pb2\n'), ((44065, 44106), 'google.cloud.automl_v1beta1.proto.service_pb2.DeployModelRequest', 'service_pb2.DeployModelRequest', ([], {'name': 'name'}), '(name=name)\n', (44095, 44106), False, 'from google.cloud.automl_v1beta1.proto import service_pb2\n'), ((46470, 46513), 'google.cloud.automl_v1beta1.proto.service_pb2.UndeployModelRequest', 'service_pb2.UndeployModelRequest', ([], {'name': 'name'}), '(name=name)\n', (46502, 46513), False, 'from google.cloud.automl_v1beta1.proto import service_pb2\n'), ((48876, 48924), 'google.cloud.automl_v1beta1.proto.service_pb2.GetModelEvaluationRequest', 'service_pb2.GetModelEvaluationRequest', ([], {'name': 'name'}), '(name=name)\n', (48913, 48924), False, 'from google.cloud.automl_v1beta1.proto import service_pb2\n'), ((53055, 53150), 'google.cloud.automl_v1beta1.proto.service_pb2.ListModelEvaluationsRequest', 'service_pb2.ListModelEvaluationsRequest', ([], {'parent': 'parent', 'filter': 'filter_', 'page_size': 'page_size'}), '(parent=parent, filter=filter_,\n page_size=page_size)\n', (53094, 53150), False, 'from google.cloud.automl_v1beta1.proto import service_pb2\n'), ((7146, 7251), 'warnings.warn', 'warnings.warn', (['"""The `client_config` argument is deprecated."""', 'PendingDeprecationWarning'], {'stacklevel': '(2)'}), "('The `client_config` argument is deprecated.',\n PendingDeprecationWarning, stacklevel=2)\n", (7159, 7251), False, 'import warnings\n'), ((7415, 7539), 'warnings.warn', 'warnings.warn', (['"""The `channel` argument is deprecated; use `transport` instead."""', 'PendingDeprecationWarning'], {'stacklevel': '(2)'}), "('The `channel` argument is deprecated; use `transport` instead.',\n PendingDeprecationWarning, stacklevel=2)\n", (7428, 7539), False, 'import warnings\n'), ((8352, 8470), 'google.cloud.automl_v1beta1.gapic.transports.auto_ml_grpc_transport.AutoMlGrpcTransport', 'auto_ml_grpc_transport.AutoMlGrpcTransport', ([], {'address': 'self.SERVICE_ADDRESS', 'channel': 'channel', 'credentials': 'credentials'}), '(address=self.SERVICE_ADDRESS,\n channel=channel, credentials=credentials)\n', (8394, 8470), False, 'from google.cloud.automl_v1beta1.gapic.transports import auto_ml_grpc_transport\n'), ((18264, 18374), 'functools.partial', 'functools.partial', (["self._inner_api_calls['list_datasets']"], {'retry': 'retry', 'timeout': 'timeout', 'metadata': 'metadata'}), "(self._inner_api_calls['list_datasets'], retry=retry,\n timeout=timeout, metadata=metadata)\n", (18281, 18374), False, 'import functools\n'), ((38292, 38400), 'functools.partial', 'functools.partial', (["self._inner_api_calls['list_models']"], {'retry': 'retry', 'timeout': 'timeout', 'metadata': 'metadata'}), "(self._inner_api_calls['list_models'], retry=retry,\n timeout=timeout, metadata=metadata)\n", (38309, 38400), False, 'import functools\n'), ((53276, 53396), 'functools.partial', 'functools.partial', (["self._inner_api_calls['list_model_evaluations']"], {'retry': 'retry', 'timeout': 'timeout', 'metadata': 'metadata'}), "(self._inner_api_calls['list_model_evaluations'], retry=\n retry, timeout=timeout, metadata=metadata)\n", (53293, 53396), False, 'import functools\n')] |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import ast
from datetime import timedelta, datetime
from random import randint
from odoo import api, fields, models, tools, SUPERUSER_ID, _
from odoo.exceptions import UserError, AccessError, ValidationError, RedirectWarning
from odoo.tools.misc import format_date, get_lang
from odoo.osv.expression import OR
from .project_task_recurrence import DAYS, WEEKS
class ProjectTaskType(models.Model):
_name = 'project.task.type'
_description = 'Task Stage'
_order = 'sequence, id'
def _get_default_project_ids(self):
default_project_id = self.env.context.get('default_project_id')
return [default_project_id] if default_project_id else None
active = fields.Boolean('Active', default=True)
name = fields.Char(string='Stage Name', required=True, translate=True)
description = fields.Text(translate=True)
sequence = fields.Integer(default=1)
project_ids = fields.Many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', string='Projects',
default=_get_default_project_ids)
legend_blocked = fields.Char(
'Red Kanban Label', default=lambda s: _('Blocked'), translate=True, required=True,
help='Override the default value displayed for the blocked state for kanban selection, when the task or issue is in that stage.')
legend_done = fields.Char(
'Green Kanban Label', default=lambda s: _('Ready'), translate=True, required=True,
help='Override the default value displayed for the done state for kanban selection, when the task or issue is in that stage.')
legend_normal = fields.Char(
'Grey Kanban Label', default=lambda s: _('In Progress'), translate=True, required=True,
help='Override the default value displayed for the normal state for kanban selection, when the task or issue is in that stage.')
mail_template_id = fields.Many2one(
'mail.template',
string='Email Template',
domain=[('model', '=', 'project.task')],
help="If set an email will be sent to the customer when the task or issue reaches this step.")
fold = fields.Boolean(string='Folded in Kanban',
help='This stage is folded in the kanban view when there are no records in that stage to display.')
rating_template_id = fields.Many2one(
'mail.template',
string='Rating Email Template',
domain=[('model', '=', 'project.task')],
help="If set and if the project's rating configuration is 'Rating when changing stage', then an email will be sent to the customer when the task reaches this step.")
auto_validation_kanban_state = fields.Boolean('Automatic kanban status', default=False,
help="Automatically modify the kanban state when the customer replies to the feedback for this stage.\n"
" * A good feedback from the customer will update the kanban state to 'ready for the new stage' (green bullet).\n"
" * A medium or a bad feedback will set the kanban state to 'blocked' (red bullet).\n")
is_closed = fields.Boolean('Closing Stage', help="Tasks in this stage are considered as closed.")
disabled_rating_warning = fields.Text(compute='_compute_disabled_rating_warning')
def unlink_wizard(self, stage_view=False):
self = self.with_context(active_test=False)
# retrieves all the projects with a least 1 task in that stage
# a task can be in a stage even if the project is not assigned to the stage
readgroup = self.with_context(active_test=False).env['project.task'].read_group([('stage_id', 'in', self.ids)], ['project_id'], ['project_id'])
project_ids = list(set([project['project_id'][0] for project in readgroup] + self.project_ids.ids))
wizard = self.with_context(project_ids=project_ids).env['project.task.type.delete.wizard'].create({
'project_ids': project_ids,
'stage_ids': self.ids
})
context = dict(self.env.context)
context['stage_view'] = stage_view
return {
'name': _('Delete Stage'),
'view_mode': 'form',
'res_model': 'project.task.type.delete.wizard',
'views': [(self.env.ref('project.view_project_task_type_delete_wizard').id, 'form')],
'type': 'ir.actions.act_window',
'res_id': wizard.id,
'target': 'new',
'context': context,
}
def write(self, vals):
if 'active' in vals and not vals['active']:
self.env['project.task'].search([('stage_id', 'in', self.ids)]).write({'active': False})
return super(ProjectTaskType, self).write(vals)
@api.depends('project_ids', 'project_ids.rating_active')
def _compute_disabled_rating_warning(self):
for stage in self:
disabled_projects = stage.project_ids.filtered(lambda p: not p.rating_active)
if disabled_projects:
stage.disabled_rating_warning = '\n'.join('- %s' % p.name for p in disabled_projects)
else:
stage.disabled_rating_warning = False
class Project(models.Model):
_name = "project.project"
_description = "Project"
_inherit = ['portal.mixin', 'mail.alias.mixin', 'mail.thread', 'rating.parent.mixin']
_order = "sequence, name, id"
_rating_satisfaction_days = False # takes all existing ratings
_check_company_auto = True
def _compute_attached_docs_count(self):
Attachment = self.env['ir.attachment']
for project in self:
project.doc_count = Attachment.search_count([
'|',
'&',
('res_model', '=', 'project.project'), ('res_id', '=', project.id),
'&',
('res_model', '=', 'project.task'), ('res_id', 'in', project.task_ids.ids)
])
def _compute_task_count(self):
task_data = self.env['project.task'].read_group([('project_id', 'in', self.ids), '|', '&', ('stage_id.is_closed', '=', False), ('stage_id.fold', '=', False), ('stage_id', '=', False)], ['project_id'], ['project_id'])
result = dict((data['project_id'][0], data['project_id_count']) for data in task_data)
for project in self:
project.task_count = result.get(project.id, 0)
def attachment_tree_view(self):
action = self.env['ir.actions.act_window']._for_xml_id('base.action_attachment')
action['domain'] = str([
'|',
'&',
('res_model', '=', 'project.project'),
('res_id', 'in', self.ids),
'&',
('res_model', '=', 'project.task'),
('res_id', 'in', self.task_ids.ids)
])
action['context'] = "{'default_res_model': '%s','default_res_id': %d}" % (self._name, self.id)
return action
def _compute_is_favorite(self):
for project in self:
project.is_favorite = self.env.user in project.favorite_user_ids
def _inverse_is_favorite(self):
favorite_projects = not_fav_projects = self.env['project.project'].sudo()
for project in self:
if self.env.user in project.favorite_user_ids:
favorite_projects |= project
else:
not_fav_projects |= project
# Project User has no write access for project.
not_fav_projects.write({'favorite_user_ids': [(4, self.env.uid)]})
favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]})
def _get_default_favorite_user_ids(self):
return [(6, 0, [self.env.uid])]
name = fields.Char("Name", index=True, required=True, tracking=True)
description = fields.Html()
active = fields.Boolean(default=True,
help="If the active field is set to False, it will allow you to hide the project without removing it.")
sequence = fields.Integer(default=10, help="Gives the sequence order when displaying a list of Projects.")
partner_id = fields.Many2one('res.partner', string='Customer', auto_join=True, tracking=True, domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]")
partner_email = fields.Char(
compute='_compute_partner_email', inverse='_inverse_partner_email',
string='Email', readonly=False, store=True, copy=False)
partner_phone = fields.Char(
compute='_compute_partner_phone', inverse='_inverse_partner_phone',
string="Phone", readonly=False, store=True, copy=False)
company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.company)
currency_id = fields.Many2one('res.currency', related="company_id.currency_id", string="Currency", readonly=True)
analytic_account_id = fields.Many2one('account.analytic.account', string="Analytic Account", copy=False, ondelete='set null',
domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]", check_company=True,
help="Analytic account to which this project is linked for financial management. "
"Use an analytic account to record cost and revenue on your project.")
favorite_user_ids = fields.Many2many(
'res.users', 'project_favorite_user_rel', 'project_id', 'user_id',
default=_get_default_favorite_user_ids,
string='Members')
is_favorite = fields.Boolean(compute='_compute_is_favorite', inverse='_inverse_is_favorite', string='Show Project on dashboard',
help="Whether this project should be displayed on your dashboard.")
label_tasks = fields.Char(string='Use Tasks as', default='Tasks', help="Label used for the tasks of the project.", translate=True)
tasks = fields.One2many('project.task', 'project_id', string="Task Activities")
resource_calendar_id = fields.Many2one(
'resource.calendar', string='Working Time',
related='company_id.resource_calendar_id')
type_ids = fields.Many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id', string='Tasks Stages')
task_count = fields.Integer(compute='_compute_task_count', string="Task Count")
task_ids = fields.One2many('project.task', 'project_id', string='Tasks',
domain=['|', ('stage_id.fold', '=', False), ('stage_id', '=', False)])
color = fields.Integer(string='Color Index')
user_id = fields.Many2one('res.users', string='Project Manager', default=lambda self: self.env.user, tracking=True)
alias_enabled = fields.Boolean(string='Use email alias', compute='_compute_alias_enabled', readonly=False)
alias_id = fields.Many2one('mail.alias', string='Alias', ondelete="restrict", required=True,
help="Internal email associated with this project. Incoming emails are automatically synchronized "
"with Tasks (or optionally Issues if the Issue Tracker module is installed).")
privacy_visibility = fields.Selection([
('followers', 'Invited internal users'),
('employees', 'All internal users'),
('portal', 'Invited portal users and all internal users'),
],
string='Visibility', required=True,
default='portal',
help="Defines the visibility of the tasks of the project:\n"
"- Invited internal users: employees may only see the followed project and tasks.\n"
"- All internal users: employees may see all project and tasks.\n"
"- Invited portal and all internal users: employees may see everything."
" Portal users may see project and tasks followed by\n"
" them or by someone of their company.")
allowed_user_ids = fields.Many2many('res.users', compute='_compute_allowed_users', inverse='_inverse_allowed_user')
allowed_internal_user_ids = fields.Many2many('res.users', 'project_allowed_internal_users_rel',
string="Allowed Internal Users", default=lambda self: self.env.user, domain=[('share', '=', False)])
allowed_portal_user_ids = fields.Many2many('res.users', 'project_allowed_portal_users_rel', string="Allowed Portal Users", domain=[('share', '=', True)])
doc_count = fields.Integer(compute='_compute_attached_docs_count', string="Number of documents attached")
date_start = fields.Date(string='Start Date')
date = fields.Date(string='Expiration Date', index=True, tracking=True)
subtask_project_id = fields.Many2one('project.project', string='Sub-task Project', ondelete="restrict",
help="Project in which sub-tasks of the current project will be created. It can be the current project itself.")
allow_subtasks = fields.Boolean('Sub-tasks', default=lambda self: self.env.user.has_group('project.group_subtask_project'))
allow_recurring_tasks = fields.Boolean('Recurring Tasks', default=lambda self: self.env.user.has_group('project.group_project_recurring_tasks'))
# rating fields
rating_request_deadline = fields.Datetime(compute='_compute_rating_request_deadline', store=True)
rating_active = fields.Boolean('Customer Ratings', default=lambda self: self.env.user.has_group('project.group_project_rating'))
rating_status = fields.Selection(
[('stage', 'Rating when changing stage'),
('periodic', 'Periodical Rating')
], 'Customer Ratings Status', default="stage", required=True,
help="How to get customer feedback?\n"
"- Rating when changing stage: an email will be sent when a task is pulled in another stage.\n"
"- Periodical Rating: email will be sent periodically.\n\n"
"Don't forget to set up the mail templates on the stages for which you want to get the customer's feedbacks.")
rating_status_period = fields.Selection([
('daily', 'Daily'),
('weekly', 'Weekly'),
('bimonthly', 'Twice a Month'),
('monthly', 'Once a Month'),
('quarterly', 'Quarterly'),
('yearly', 'Yearly')], 'Rating Frequency', required=True, default='monthly')
_sql_constraints = [
('project_date_greater', 'check(date >= date_start)', 'Error! project start-date must be lower than project end-date.')
]
@api.depends('partner_id.email')
def _compute_partner_email(self):
for project in self:
if project.partner_id and project.partner_id.email != project.partner_email:
project.partner_email = project.partner_id.email
def _inverse_partner_email(self):
for project in self:
if project.partner_id and project.partner_email != project.partner_id.email:
project.partner_id.email = project.partner_email
@api.depends('partner_id.phone')
def _compute_partner_phone(self):
for project in self:
if project.partner_id and project.partner_phone != project.partner_id.phone:
project.partner_phone = project.partner_id.phone
def _inverse_partner_phone(self):
for project in self:
if project.partner_id and project.partner_phone != project.partner_id.phone:
project.partner_id.phone = project.partner_phone
@api.onchange('alias_enabled')
def _onchange_alias_name(self):
if not self.alias_enabled:
self.alias_name = False
def _compute_alias_enabled(self):
for project in self:
project.alias_enabled = project.alias_domain and project.alias_id.alias_name
@api.depends('allowed_internal_user_ids', 'allowed_portal_user_ids')
def _compute_allowed_users(self):
for project in self:
users = project.allowed_internal_user_ids | project.allowed_portal_user_ids
project.allowed_user_ids = users
def _inverse_allowed_user(self):
for project in self:
allowed_users = project.allowed_user_ids
project.allowed_portal_user_ids = allowed_users.filtered('share')
project.allowed_internal_user_ids = allowed_users - project.allowed_portal_user_ids
def _compute_access_url(self):
super(Project, self)._compute_access_url()
for project in self:
project.access_url = '/my/project/%s' % project.id
def _compute_access_warning(self):
super(Project, self)._compute_access_warning()
for project in self.filtered(lambda x: x.privacy_visibility != 'portal'):
project.access_warning = _(
"The project cannot be shared with the recipient(s) because the privacy of the project is too restricted. Set the privacy to 'Visible by following customers' in order to make it accessible by the recipient(s).")
@api.depends('rating_status', 'rating_status_period')
def _compute_rating_request_deadline(self):
periods = {'daily': 1, 'weekly': 7, 'bimonthly': 15, 'monthly': 30, 'quarterly': 90, 'yearly': 365}
for project in self:
project.rating_request_deadline = fields.datetime.now() + timedelta(days=periods.get(project.rating_status_period, 0))
@api.model
def _map_tasks_default_valeus(self, task, project):
""" get the default value for the copied task on project duplication """
return {
'stage_id': task.stage_id.id,
'name': task.name,
'company_id': project.company_id.id,
}
def map_tasks(self, new_project_id):
""" copy and map tasks from old to new project """
project = self.browse(new_project_id)
tasks = self.env['project.task']
# We want to copy archived task, but do not propagate an active_test context key
task_ids = self.env['project.task'].with_context(active_test=False).search([('project_id', '=', self.id)], order='parent_id').ids
old_to_new_tasks = {}
for task in self.env['project.task'].browse(task_ids):
# preserve task name and stage, normally altered during copy
defaults = self._map_tasks_default_valeus(task, project)
if task.parent_id:
# set the parent to the duplicated task
defaults['parent_id'] = old_to_new_tasks.get(task.parent_id.id, False)
new_task = task.copy(defaults)
old_to_new_tasks[task.id] = new_task.id
tasks += new_task
return project.write({'tasks': [(6, 0, tasks.ids)]})
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
if default is None:
default = {}
if not default.get('name'):
default['name'] = _("%s (copy)") % (self.name)
project = super(Project, self).copy(default)
if self.subtask_project_id == self:
project.subtask_project_id = project
for follower in self.message_follower_ids:
project.message_subscribe(partner_ids=follower.partner_id.ids, subtype_ids=follower.subtype_ids.ids)
if 'tasks' not in default:
self.map_tasks(project.id)
return project
@api.model
def create(self, vals):
# Prevent double project creation
self = self.with_context(mail_create_nosubscribe=True)
project = super(Project, self).create(vals)
if not vals.get('subtask_project_id'):
project.subtask_project_id = project.id
if project.privacy_visibility == 'portal' and project.partner_id.user_ids:
project.allowed_user_ids |= project.partner_id.user_ids
return project
def write(self, vals):
allowed_users_changed = 'allowed_portal_user_ids' in vals or 'allowed_internal_user_ids' in vals
if allowed_users_changed:
allowed_users = {project: project.allowed_user_ids for project in self}
# directly compute is_favorite to dodge allow write access right
if 'is_favorite' in vals:
vals.pop('is_favorite')
self._fields['is_favorite'].determine_inverse(self)
res = super(Project, self).write(vals) if vals else True
if allowed_users_changed:
for project in self:
permission_removed = allowed_users.get(project) - project.allowed_user_ids
allowed_portal_users_removed = permission_removed.filtered('share')
project.message_unsubscribe(allowed_portal_users_removed.partner_id.commercial_partner_id.ids)
for task in project.task_ids:
task.allowed_user_ids -= permission_removed
if 'allow_recurring_tasks' in vals and not vals.get('allow_recurring_tasks'):
self.env['project.task'].search([('project_id', 'in', self.ids), ('recurring_task', '=', True)]).write({'recurring_task': False})
if 'active' in vals:
# archiving/unarchiving a project does it on its tasks, too
self.with_context(active_test=False).mapped('tasks').write({'active': vals['active']})
if vals.get('partner_id') or vals.get('privacy_visibility'):
for project in self.filtered(lambda project: project.privacy_visibility == 'portal'):
project.allowed_user_ids |= project.partner_id.user_ids
return res
def action_unlink(self):
wizard = self.env['project.delete.wizard'].create({
'project_ids': self.ids
})
return {
'name': _('Confirmation'),
'view_mode': 'form',
'res_model': 'project.delete.wizard',
'views': [(self.env.ref('project.project_delete_wizard_form').id, 'form')],
'type': 'ir.actions.act_window',
'res_id': wizard.id,
'target': 'new',
'context': self.env.context,
}
def unlink(self):
# Check project is empty
for project in self.with_context(active_test=False):
if project.tasks:
raise UserError(_('You cannot delete a project containing tasks. You can either archive it or first delete all of its tasks.'))
# Delete the empty related analytic account
analytic_accounts_to_delete = self.env['account.analytic.account']
for project in self:
if project.analytic_account_id and not project.analytic_account_id.line_ids:
analytic_accounts_to_delete |= project.analytic_account_id
result = super(Project, self).unlink()
analytic_accounts_to_delete.unlink()
return result
def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None):
"""
Subscribe to all existing active tasks when subscribing to a project
And add the portal user subscribed to allowed portal users
"""
res = super(Project, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids)
project_subtypes = self.env['mail.message.subtype'].browse(subtype_ids) if subtype_ids else None
task_subtypes = (project_subtypes.mapped('parent_id') | project_subtypes.filtered(lambda sub: sub.internal or sub.default)).ids if project_subtypes else None
if not subtype_ids or task_subtypes:
self.mapped('tasks').message_subscribe(
partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=task_subtypes)
if partner_ids:
all_users = self.env['res.partner'].browse(partner_ids).user_ids
portal_users = all_users.filtered('share')
internal_users = all_users - portal_users
self.allowed_portal_user_ids |= portal_users
self.allowed_internal_user_ids |= internal_users
return res
def message_unsubscribe(self, partner_ids=None, channel_ids=None):
""" Unsubscribe from all tasks when unsubscribing from a project """
self.mapped('tasks').message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids)
return super(Project, self).message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids)
def _alias_get_creation_values(self):
values = super(Project, self)._alias_get_creation_values()
values['alias_model_id'] = self.env['ir.model']._get('project.task').id
if self.id:
values['alias_defaults'] = defaults = ast.literal_eval(self.alias_defaults or "{}")
defaults['project_id'] = self.id
return values
# ---------------------------------------------------
# Actions
# ---------------------------------------------------
def toggle_favorite(self):
favorite_projects = not_fav_projects = self.env['project.project'].sudo()
for project in self:
if self.env.user in project.favorite_user_ids:
favorite_projects |= project
else:
not_fav_projects |= project
# Project User has no write access for project.
not_fav_projects.write({'favorite_user_ids': [(4, self.env.uid)]})
favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]})
def action_view_tasks(self):
action = self.with_context(active_id=self.id, active_ids=self.ids) \
.env.ref('project.act_project_project_2_project_task_all') \
.sudo().read()[0]
action['display_name'] = self.name
return action
def action_view_account_analytic_line(self):
""" return the action to see all the analytic lines of the project's analytic account """
action = self.env["ir.actions.actions"]._for_xml_id("analytic.account_analytic_line_action")
action['context'] = {'default_account_id': self.analytic_account_id.id}
action['domain'] = [('account_id', '=', self.analytic_account_id.id)]
return action
def action_view_all_rating(self):
""" return the action to see all the rating of the project and activate default filters"""
action = self.env['ir.actions.act_window']._for_xml_id('project.rating_rating_action_view_project_rating')
action['name'] = _('Ratings of %s') % (self.name,)
action_context = ast.literal_eval(action['context']) if action['context'] else {}
action_context.update(self._context)
action_context['search_default_parent_res_name'] = self.name
action_context.pop('group_by', None)
return dict(action, context=action_context)
# ---------------------------------------------------
# Business Methods
# ---------------------------------------------------
@api.model
def _create_analytic_account_from_values(self, values):
analytic_account = self.env['account.analytic.account'].create({
'name': values.get('name', _('Unknown Analytic Account')),
'company_id': values.get('company_id') or self.env.company.id,
'partner_id': values.get('partner_id'),
'active': True,
})
return analytic_account
def _create_analytic_account(self):
for project in self:
analytic_account = self.env['account.analytic.account'].create({
'name': project.name,
'company_id': project.company_id.id,
'partner_id': project.partner_id.id,
'active': True,
})
project.write({'analytic_account_id': analytic_account.id})
# ---------------------------------------------------
# Rating business
# ---------------------------------------------------
# This method should be called once a day by the scheduler
@api.model
def _send_rating_all(self):
projects = self.search([
('rating_active', '=', True),
('rating_status', '=', 'periodic'),
('rating_request_deadline', '<=', fields.Datetime.now())
])
for project in projects:
project.task_ids._send_task_rating_mail()
project._compute_rating_request_deadline()
self.env.cr.commit()
class Task(models.Model):
_name = "project.task"
_description = "Task"
_date_name = "date_assign"
_inherit = ['portal.mixin', 'mail.thread.cc', 'mail.activity.mixin', 'rating.mixin']
_mail_post_access = 'read'
_order = "priority desc, sequence, id desc"
_check_company_auto = True
def _get_default_stage_id(self):
""" Gives default stage_id """
project_id = self.env.context.get('default_project_id')
if not project_id:
return False
return self.stage_find(project_id, [('fold', '=', False), ('is_closed', '=', False)])
@api.model
def _default_company_id(self):
if self._context.get('default_project_id'):
return self.env['project.project'].browse(self._context['default_project_id']).company_id
return self.env.company
@api.model
def _read_group_stage_ids(self, stages, domain, order):
search_domain = [('id', 'in', stages.ids)]
if 'default_project_id' in self.env.context:
search_domain = ['|', ('project_ids', '=', self.env.context['default_project_id'])] + search_domain
stage_ids = stages._search(search_domain, order=order, access_rights_uid=SUPERUSER_ID)
return stages.browse(stage_ids)
active = fields.Boolean(default=True)
name = fields.Char(string='Title', tracking=True, required=True, index=True)
description = fields.Html(string='Description')
priority = fields.Selection([
('0', 'Normal'),
('1', 'Important'),
], default='0', index=True, string="Priority")
sequence = fields.Integer(string='Sequence', index=True, default=10,
help="Gives the sequence order when displaying a list of tasks.")
stage_id = fields.Many2one('project.task.type', string='Stage', compute='_compute_stage_id',
store=True, readonly=False, ondelete='restrict', tracking=True, index=True,
default=_get_default_stage_id, group_expand='_read_group_stage_ids',
domain="[('project_ids', '=', project_id)]", copy=False)
tag_ids = fields.Many2many('project.tags', string='Tags')
kanban_state = fields.Selection([
('normal', 'In Progress'),
('done', 'Ready'),
('blocked', 'Blocked')], string='Kanban State',
copy=False, default='normal', required=True)
kanban_state_label = fields.Char(compute='_compute_kanban_state_label', string='Kanban State Label', tracking=True)
create_date = fields.Datetime("Created On", readonly=True, index=True)
write_date = fields.Datetime("Last Updated On", readonly=True, index=True)
date_end = fields.Datetime(string='Ending Date', index=True, copy=False)
date_assign = fields.Datetime(string='Assigning Date', index=True, copy=False, readonly=True)
date_deadline = fields.Date(string='Deadline', index=True, copy=False, tracking=True)
date_last_stage_update = fields.Datetime(string='Last Stage Update',
index=True,
copy=False,
readonly=True)
project_id = fields.Many2one('project.project', string='Project',
compute='_compute_project_id', store=True, readonly=False,
index=True, tracking=True, check_company=True, change_default=True)
planned_hours = fields.Float("Initially Planned Hours", help='Time planned to achieve this task (including its sub-tasks).', tracking=True)
subtask_planned_hours = fields.Float("Sub-tasks Planned Hours", compute='_compute_subtask_planned_hours', help="Sum of the time planned of all the sub-tasks linked to this task. Usually less or equal to the initially time planned of this task.")
user_id = fields.Many2one('res.users',
string='Assigned to',
default=lambda self: self.env.uid,
index=True, tracking=True)
partner_id = fields.Many2one('res.partner',
string='Customer',
compute='_compute_partner_id', store=True, readonly=False,
domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]")
partner_is_company = fields.Boolean(related='partner_id.is_company', readonly=True)
commercial_partner_id = fields.Many2one(related='partner_id.commercial_partner_id')
partner_email = fields.Char(
compute='_compute_partner_email', inverse='_inverse_partner_email',
string='Email', readonly=False, store=True, copy=False)
partner_phone = fields.Char(
compute='_compute_partner_phone', inverse='_inverse_partner_phone',
string="Phone", readonly=False, store=True, copy=False)
ribbon_message = fields.Char('Ribbon message', compute='_compute_ribbon_message')
partner_city = fields.Char(related='partner_id.city', readonly=False)
manager_id = fields.Many2one('res.users', string='Project Manager', related='project_id.user_id', readonly=True)
company_id = fields.Many2one(
'res.company', string='Company', compute='_compute_company_id', store=True, readonly=False,
required=True, copy=True, default=_default_company_id)
color = fields.Integer(string='Color Index')
user_email = fields.Char(related='user_id.email', string='User Email', readonly=True, related_sudo=False)
attachment_ids = fields.One2many('ir.attachment', compute='_compute_attachment_ids', string="Main Attachments",
help="Attachment that don't come from message.")
# In the domain of displayed_image_id, we couln't use attachment_ids because a one2many is represented as a list of commands so we used res_model & res_id
displayed_image_id = fields.Many2one('ir.attachment', domain="[('res_model', '=', 'project.task'), ('res_id', '=', id), ('mimetype', 'ilike', 'image')]", string='Cover Image')
legend_blocked = fields.Char(related='stage_id.legend_blocked', string='Kanban Blocked Explanation', readonly=True, related_sudo=False)
legend_done = fields.Char(related='stage_id.legend_done', string='Kanban Valid Explanation', readonly=True, related_sudo=False)
legend_normal = fields.Char(related='stage_id.legend_normal', string='Kanban Ongoing Explanation', readonly=True, related_sudo=False)
is_closed = fields.Boolean(related="stage_id.is_closed", string="Closing Stage", readonly=True, related_sudo=False)
parent_id = fields.Many2one('project.task', string='Parent Task', index=True)
child_ids = fields.One2many('project.task', 'parent_id', string="Sub-tasks", context={'active_test': False})
subtask_project_id = fields.Many2one('project.project', related="project_id.subtask_project_id", string='Sub-task Project', readonly=True)
allow_subtasks = fields.Boolean(string="Allow Sub-tasks", related="project_id.allow_subtasks", readonly=True)
subtask_count = fields.Integer("Sub-task count", compute='_compute_subtask_count')
email_from = fields.Char(string='Email From', help="These people will receive email.", index=True,
compute='_compute_email_from', store="True", readonly=False)
allowed_user_ids = fields.Many2many('res.users', string="Visible to", groups='project.group_project_manager', compute='_compute_allowed_user_ids', store=True, readonly=False, copy=False)
project_privacy_visibility = fields.Selection(related='project_id.privacy_visibility', string="Project Visibility")
# Computed field about working time elapsed between record creation and assignation/closing.
working_hours_open = fields.Float(compute='_compute_elapsed', string='Working hours to assign', store=True, group_operator="avg")
working_hours_close = fields.Float(compute='_compute_elapsed', string='Working hours to close', store=True, group_operator="avg")
working_days_open = fields.Float(compute='_compute_elapsed', string='Working days to assign', store=True, group_operator="avg")
working_days_close = fields.Float(compute='_compute_elapsed', string='Working days to close', store=True, group_operator="avg")
# customer portal: include comment and incoming emails in communication history
website_message_ids = fields.One2many(domain=lambda self: [('model', '=', self._name), ('message_type', 'in', ['email', 'comment'])])
# recurrence fields
allow_recurring_tasks = fields.Boolean(related='project_id.allow_recurring_tasks')
recurring_task = fields.Boolean(string="Recurrent")
recurring_count = fields.Integer(string="Tasks in Recurrence", compute='_compute_recurring_count')
recurrence_id = fields.Many2one('project.task.recurrence', copy=False)
recurrence_update = fields.Selection([
('this', 'This task'),
('subsequent', 'This and following tasks'),
('all', 'All tasks'),
], default='this', store=False)
recurrence_message = fields.Char(string='Next Recurrencies', compute='_compute_recurrence_message')
repeat_interval = fields.Integer(string='Repeat Every', default=1, compute='_compute_repeat', readonly=False)
repeat_unit = fields.Selection([
('day', 'Days'),
('week', 'Weeks'),
('month', 'Months'),
('year', 'Years'),
], default='week', compute='_compute_repeat', readonly=False)
repeat_type = fields.Selection([
('forever', 'Forever'),
('until', 'End Date'),
('after', 'Number of Repetitions'),
], default="forever", string="Until", compute='_compute_repeat', readonly=False)
repeat_until = fields.Date(string="End Date", compute='_compute_repeat', readonly=False)
repeat_number = fields.Integer(string="Repetitions", default=1, compute='_compute_repeat', readonly=False)
repeat_on_month = fields.Selection([
('date', 'Date of the Month'),
('day', 'Day of the Month'),
], default='date', compute='_compute_repeat', readonly=False)
repeat_on_year = fields.Selection([
('date', 'Date of the Year'),
('day', 'Day of the Year'),
], default='date', compute='_compute_repeat', readonly=False)
mon = fields.Boolean(string="Mon", compute='_compute_repeat', readonly=False)
tue = fields.Boolean(string="Tue", compute='_compute_repeat', readonly=False)
wed = fields.Boolean(string="Wed", compute='_compute_repeat', readonly=False)
thu = fields.Boolean(string="Thu", compute='_compute_repeat', readonly=False)
fri = fields.Boolean(string="Fri", compute='_compute_repeat', readonly=False)
sat = fields.Boolean(string="Sat", compute='_compute_repeat', readonly=False)
sun = fields.Boolean(string="Sun", compute='_compute_repeat', readonly=False)
repeat_day = fields.Selection([
(str(i), str(i)) for i in range(1, 32)
], compute='_compute_repeat', readonly=False)
repeat_week = fields.Selection([
('first', 'First'),
('second', 'Second'),
('third', 'Third'),
('last', 'Last'),
], default='first', compute='_compute_repeat', readonly=False)
repeat_weekday = fields.Selection([
('mon', 'Monday'),
('tue', 'Tuesday'),
('wed', 'Wednesday'),
('thu', 'Thursday'),
('fri', 'Friday'),
('sat', 'Saturday'),
('sun', 'Sunday'),
], string='Day Of The Week', compute='_compute_repeat', readonly=False)
repeat_month = fields.Selection([
('january', 'January'),
('february', 'February'),
('march', 'March'),
('april', 'April'),
('may', 'May'),
('june', 'June'),
('july', 'July'),
('august', 'August'),
('september', 'September'),
('october', 'October'),
('november', 'November'),
('december', 'December'),
], compute='_compute_repeat', readonly=False)
repeat_show_dow = fields.Boolean(compute='_compute_repeat_visibility')
repeat_show_day = fields.Boolean(compute='_compute_repeat_visibility')
repeat_show_week = fields.Boolean(compute='_compute_repeat_visibility')
repeat_show_month = fields.Boolean(compute='_compute_repeat_visibility')
@api.model
def _get_recurrence_fields(self):
return ['repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number',
'repeat_on_month', 'repeat_on_year', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat',
'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday']
@api.depends('recurring_task', 'repeat_unit', 'repeat_on_month', 'repeat_on_year')
def _compute_repeat_visibility(self):
for task in self:
task.repeat_show_day = task.recurring_task and (task.repeat_unit == 'month' and task.repeat_on_month == 'date') or (task.repeat_unit == 'year' and task.repeat_on_year == 'date')
task.repeat_show_week = task.recurring_task and (task.repeat_unit == 'month' and task.repeat_on_month == 'day') or (task.repeat_unit == 'year' and task.repeat_on_year == 'day')
task.repeat_show_dow = task.recurring_task and task.repeat_unit == 'week'
task.repeat_show_month = task.recurring_task and task.repeat_unit == 'year'
@api.depends('recurring_task')
def _compute_repeat(self):
rec_fields = self._get_recurrence_fields()
defaults = self.default_get(rec_fields)
for task in self:
for f in rec_fields:
if task.recurrence_id:
task[f] = task.recurrence_id[f]
else:
if task.recurring_task:
task[f] = defaults.get(f)
else:
task[f] = False
def _get_weekdays(self, n=1):
self.ensure_one()
if self.repeat_unit == 'week':
return [fn(n) for day, fn in DAYS.items() if self[day]]
return [DAYS.get(self.repeat_weekday)(n)]
@api.depends(
'recurring_task', 'repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until',
'repeat_number', 'repeat_on_month', 'repeat_on_year', 'mon', 'tue', 'wed', 'thu', 'fri',
'sat', 'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday')
def _compute_recurrence_message(self):
self.recurrence_message = False
for task in self.filtered(lambda t: t.recurring_task and t._is_recurrence_valid()):
date = fields.Date.today()
number_occurrences = min(5, task.repeat_number if task.repeat_type == 'after' else 5)
delta = task.repeat_interval if task.repeat_unit == 'day' else 1
recurring_dates = self.env['project.task.recurrence']._get_next_recurring_dates(
date + timedelta(days=delta),
task.repeat_interval,
task.repeat_unit,
task.repeat_type,
task.repeat_until,
task.repeat_on_month,
task.repeat_on_year,
task._get_weekdays(WEEKS.get(task.repeat_week)),
task.repeat_day,
task.repeat_week,
task.repeat_month,
count=number_occurrences)
date_format = self.env['res.lang']._lang_get(self.env.user.lang).date_format
task.recurrence_message = '<ul>'
for date in recurring_dates[:5]:
task.recurrence_message += '<li>%s</li>' % date.strftime(date_format)
if task.repeat_type == 'after' and task.repeat_number > 5 or task.repeat_type == 'forever' or len(recurring_dates) > 5:
task.recurrence_message += '<li>...</li>'
task.recurrence_message += '</ul>'
if task.repeat_type == 'until':
task.recurrence_message += _('<p><em>Number of tasks: %(tasks_count)s</em></p>') % {'tasks_count': len(recurring_dates)}
def _is_recurrence_valid(self):
self.ensure_one()
return self.repeat_interval > 0 and\
(not self.repeat_show_dow or self._get_weekdays()) and\
(self.repeat_type != 'after' or self.repeat_number) and\
(self.repeat_type != 'until' or self.repeat_until and self.repeat_until > fields.Date.today())
@api.depends('recurrence_id')
def _compute_recurring_count(self):
self.recurring_count = 0
recurring_tasks = self.filtered(lambda l: l.recurrence_id)
count = self.env['project.task'].read_group([('recurrence_id', 'in', recurring_tasks.recurrence_id.ids)], ['id'], 'recurrence_id')
tasks_count = {c.get('recurrence_id')[0]: c.get('recurrence_id_count') for c in count}
for task in recurring_tasks:
task.recurring_count = tasks_count.get(task.recurrence_id.id, 0)
@api.depends('partner_id.email')
def _compute_partner_email(self):
for task in self:
if task.partner_id and task.partner_id.email != task.partner_email:
task.partner_email = task.partner_id.email
def _inverse_partner_email(self):
for task in self:
if task.partner_id and task.partner_email != task.partner_id.email:
task.partner_id.email = task.partner_email
@api.depends('partner_id.phone')
def _compute_partner_phone(self):
for task in self:
if task.partner_id and task.partner_phone != task.partner_id.phone:
task.partner_phone = task.partner_id.phone
def _inverse_partner_phone(self):
for task in self:
if task.partner_id and task.partner_phone != task.partner_id.phone:
task.partner_id.phone = task.partner_phone
@api.depends('partner_email', 'partner_phone', 'partner_id')
def _compute_ribbon_message(self):
for task in self:
will_write_email = task.partner_id and task.partner_email != task.partner_id.email
will_write_phone = task.partner_id and task.partner_phone != task.partner_id.phone
if will_write_email and will_write_phone:
task.ribbon_message = _('By saving this change, the customer email and phone number will also be updated.')
elif will_write_email:
task.ribbon_message = _('By saving this change, the customer email will also be updated.')
elif will_write_phone:
task.ribbon_message = _('By saving this change, the customer phone number will also be updated.')
else:
task.ribbon_message = False
@api.constrains('parent_id')
def _check_parent_id(self):
if not self._check_recursion():
raise ValidationError(_('Error! You cannot create recursive hierarchy of tasks.'))
@api.constrains('allowed_user_ids')
def _check_no_portal_allowed(self):
for task in self.filtered(lambda t: t.project_id.privacy_visibility != 'portal'):
portal_users = task.allowed_user_ids.filtered('share')
if portal_users:
user_names = ', '.join(portal_users[:10].mapped('name'))
raise ValidationError(_("The project visibility setting doesn't allow portal users to see the project's tasks. (%s)", user_names))
def _compute_attachment_ids(self):
for task in self:
attachment_ids = self.env['ir.attachment'].search([('res_id', '=', task.id), ('res_model', '=', 'project.task')]).ids
message_attachment_ids = task.mapped('message_ids.attachment_ids').ids # from mail_thread
task.attachment_ids = [(6, 0, list(set(attachment_ids) - set(message_attachment_ids)))]
@api.depends('project_id.allowed_user_ids', 'project_id.privacy_visibility')
def _compute_allowed_user_ids(self):
for task in self:
portal_users = task.allowed_user_ids.filtered('share')
internal_users = task.allowed_user_ids - portal_users
if task.project_id.privacy_visibility == 'followers':
task.allowed_user_ids |= task.project_id.allowed_internal_user_ids
task.allowed_user_ids -= portal_users
elif task.project_id.privacy_visibility == 'portal':
task.allowed_user_ids |= task.project_id.allowed_portal_user_ids
if task.project_id.privacy_visibility != 'portal':
task.allowed_user_ids -= portal_users
elif task.project_id.privacy_visibility != 'followers':
task.allowed_user_ids -= internal_users
@api.depends('create_date', 'date_end', 'date_assign')
def _compute_elapsed(self):
task_linked_to_calendar = self.filtered(
lambda task: task.project_id.resource_calendar_id and task.create_date
)
for task in task_linked_to_calendar:
dt_create_date = fields.Datetime.from_string(task.create_date)
if task.date_assign:
dt_date_assign = fields.Datetime.from_string(task.date_assign)
duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_assign, compute_leaves=True)
task.working_hours_open = duration_data['hours']
task.working_days_open = duration_data['days']
else:
task.working_hours_open = 0.0
task.working_days_open = 0.0
if task.date_end:
dt_date_end = fields.Datetime.from_string(task.date_end)
duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_end, compute_leaves=True)
task.working_hours_close = duration_data['hours']
task.working_days_close = duration_data['days']
else:
task.working_hours_close = 0.0
task.working_days_close = 0.0
(self - task_linked_to_calendar).update(dict.fromkeys(
['working_hours_open', 'working_hours_close', 'working_days_open', 'working_days_close'], 0.0))
@api.depends('stage_id', 'kanban_state')
def _compute_kanban_state_label(self):
for task in self:
if task.kanban_state == 'normal':
task.kanban_state_label = task.legend_normal
elif task.kanban_state == 'blocked':
task.kanban_state_label = task.legend_blocked
else:
task.kanban_state_label = task.legend_done
def _compute_access_url(self):
super(Task, self)._compute_access_url()
for task in self:
task.access_url = '/my/task/%s' % task.id
def _compute_access_warning(self):
super(Task, self)._compute_access_warning()
for task in self.filtered(lambda x: x.project_id.privacy_visibility != 'portal'):
task.access_warning = _(
"The task cannot be shared with the recipient(s) because the privacy of the project is too restricted. Set the privacy of the project to 'Visible by following customers' in order to make it accessible by the recipient(s).")
@api.depends('child_ids.planned_hours')
def _compute_subtask_planned_hours(self):
for task in self:
task.subtask_planned_hours = sum(child_task.planned_hours + child_task.subtask_planned_hours for child_task in task.child_ids)
@api.depends('child_ids')
def _compute_subtask_count(self):
for task in self:
task.subtask_count = len(task._get_all_subtasks())
@api.onchange('company_id')
def _onchange_task_company(self):
if self.project_id.company_id != self.company_id:
self.project_id = False
@api.depends('project_id.company_id')
def _compute_company_id(self):
for task in self.filtered(lambda task: task.project_id):
task.company_id = task.project_id.company_id
@api.depends('project_id')
def _compute_stage_id(self):
for task in self:
if task.project_id:
if task.project_id not in task.stage_id.project_ids:
task.stage_id = task.stage_find(task.project_id.id, [
('fold', '=', False), ('is_closed', '=', False)])
else:
task.stage_id = False
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
if default is None:
default = {}
if not default.get('name'):
default['name'] = _("%s (copy)", self.name)
if self.recurrence_id:
default['recurrence_id'] = self.recurrence_id.copy().id
return super(Task, self).copy(default)
@api.constrains('parent_id')
def _check_parent_id(self):
for task in self:
if not task._check_recursion():
raise ValidationError(_('Error! You cannot create recursive hierarchy of task(s).'))
@api.model
def get_empty_list_help(self, help):
tname = _("task")
project_id = self.env.context.get('default_project_id', False)
if project_id:
name = self.env['project.project'].browse(project_id).label_tasks
if name: tname = name.lower()
self = self.with_context(
empty_list_help_id=self.env.context.get('default_project_id'),
empty_list_help_model='project.project',
empty_list_help_document_name=tname,
)
return super(Task, self).get_empty_list_help(help)
def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None):
"""
Add the users subscribed to allowed portal users
"""
res = super(Task, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids)
if partner_ids:
new_allowed_users = self.env['res.partner'].browse(partner_ids).user_ids.filtered('share')
tasks = self.filtered(lambda task: task.project_id.privacy_visibility == 'portal')
tasks.sudo().write({'allowed_user_ids': [(4, user.id) for user in new_allowed_users]})
return res
# ----------------------------------------
# Case management
# ----------------------------------------
def stage_find(self, section_id, domain=[], order='sequence'):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- section_id: if set, stages must belong to this section or
be a default stage; if not set, stages must be default
stages
"""
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
section_ids.extend(self.mapped('project_id').ids)
search_domain = []
if section_ids:
search_domain = [('|')] * (len(section_ids) - 1)
for section_id in section_ids:
search_domain.append(('project_ids', '=', section_id))
search_domain += list(domain)
# perform search, return the first found
return self.env['project.task.type'].search(search_domain, order=order, limit=1).id
# ------------------------------------------------
# CRUD overrides
# ------------------------------------------------
@api.model
def default_get(self, default_fields):
vals = super(Task, self).default_get(default_fields)
days = list(DAYS.keys())
week_start = fields.Datetime.today().weekday()
if all(d in default_fields for d in days):
vals[days[week_start]] = True
if 'repeat_day' in default_fields:
vals['repeat_day'] = str(fields.Datetime.today().day)
if 'repeat_month' in default_fields:
vals['repeat_month'] = self._fields.get('repeat_month').selection[fields.Datetime.today().month - 1][0]
if 'repeat_until' in default_fields:
vals['repeat_until'] = fields.Date.today() + timedelta(days=7)
if 'repeat_weekday' in default_fields:
vals['repeat_weekday'] = self._fields.get('repeat_weekday').selection[week_start][0]
return vals
@api.model_create_multi
def create(self, vals_list):
default_stage = dict()
for vals in vals_list:
project_id = vals.get('project_id') or self.env.context.get('default_project_id')
if project_id and not "company_id" in vals:
vals["company_id"] = self.env["project.project"].browse(
project_id
).company_id.id or self.env.company.id
if project_id and "stage_id" not in vals:
# 1) Allows keeping the batch creation of tasks
# 2) Ensure the defaults are correct (and computed once by project),
# by using default get (instead of _get_default_stage_id or _stage_find),
if project_id not in default_stage:
default_stage[project_id] = self.with_context(
default_project_id=project_id
).default_get(['stage_id']).get('stage_id')
vals["stage_id"] = default_stage[project_id]
# user_id change: update date_assign
if vals.get('user_id'):
vals['date_assign'] = fields.Datetime.now()
# Stage change: Update date_end if folded stage and date_last_stage_update
if vals.get('stage_id'):
vals.update(self.update_date_end(vals['stage_id']))
vals['date_last_stage_update'] = fields.Datetime.now()
# recurrence
rec_fields = vals.keys() & self._get_recurrence_fields()
if rec_fields and vals.get('recurring_task') is True:
rec_values = {rec_field: vals[rec_field] for rec_field in rec_fields}
rec_values['next_recurrence_date'] = fields.Datetime.today()
recurrence = self.env['project.task.recurrence'].create(rec_values)
vals['recurrence_id'] = recurrence.id
tasks = super().create(vals_list)
for task in tasks:
if task.project_id.privacy_visibility == 'portal':
task._portal_ensure_token()
return tasks
def write(self, vals):
now = fields.Datetime.now()
if 'parent_id' in vals and vals['parent_id'] in self.ids:
raise UserError(_("Sorry. You can't set a task as its parent task."))
if 'active' in vals and not vals.get('active') and any(self.mapped('recurrence_id')):
# TODO: show a dialog to stop the recurrence
raise UserError(_('You cannot archive recurring tasks. Please, disable the recurrence first.'))
# stage change: update date_last_stage_update
if 'stage_id' in vals:
vals.update(self.update_date_end(vals['stage_id']))
vals['date_last_stage_update'] = now
# reset kanban state when changing stage
if 'kanban_state' not in vals:
vals['kanban_state'] = 'normal'
# user_id change: update date_assign
if vals.get('user_id') and 'date_assign' not in vals:
vals['date_assign'] = now
# recurrence fields
rec_fields = vals.keys() & self._get_recurrence_fields()
if rec_fields:
rec_values = {rec_field: vals[rec_field] for rec_field in rec_fields}
for task in self:
if task.recurrence_id:
task.recurrence_id.write(rec_values)
elif vals.get('recurring_task'):
rec_values['next_recurrence_date'] = fields.Datetime.today()
recurrence = self.env['project.task.recurrence'].create(rec_values)
task.recurrence_id = recurrence.id
if 'recurring_task' in vals and not vals.get('recurring_task'):
self.recurrence_id.unlink()
tasks = self
recurrence_update = vals.pop('recurrence_update', 'this')
if recurrence_update != 'this':
recurrence_domain = []
if recurrence_update == 'subsequent':
for task in self:
recurrence_domain = OR([recurrence_domain, ['&', ('recurrence_id', '=', task.recurrence_id.id), ('create_date', '>=', task.create_date)]])
else:
recurrence_domain = [('recurrence_id', 'in', self.recurrence_id.ids)]
tasks |= self.env['project.task'].search(recurrence_domain)
result = super(Task, tasks).write(vals)
# rating on stage
if 'stage_id' in vals and vals.get('stage_id'):
self.filtered(lambda x: x.project_id.rating_active and x.project_id.rating_status == 'stage')._send_task_rating_mail(force_send=True)
return result
def update_date_end(self, stage_id):
project_task_type = self.env['project.task.type'].browse(stage_id)
if project_task_type.fold or project_task_type.is_closed:
return {'date_end': fields.Datetime.now()}
return {'date_end': False}
def unlink(self):
if any(self.mapped('recurrence_id')):
# TODO: show a dialog to stop the recurrence
raise UserError(_('You cannot delete recurring tasks. Please, disable the recurrence first.'))
return super().unlink()
# ---------------------------------------------------
# Subtasks
# ---------------------------------------------------
@api.depends('parent_id.partner_id', 'project_id.partner_id')
def _compute_partner_id(self):
"""
If a task has no partner_id, use the project partner_id if any, or else the parent task partner_id.
Once the task partner_id has been set:
1) if the project partner_id changes, the task partner_id is automatically changed also.
2) if the parent task partner_id changes, the task partner_id remains the same.
"""
for task in self:
if task.partner_id:
if task.project_id.partner_id:
task.partner_id = task.project_id.partner_id
else:
task.partner_id = task.project_id.partner_id or task.parent_id.partner_id
@api.depends('partner_id.email', 'parent_id.email_from')
def _compute_email_from(self):
for task in self:
task.email_from = task.partner_id.email or ((task.partner_id or task.parent_id) and task.email_from) or task.parent_id.email_from
@api.depends('parent_id.project_id.subtask_project_id')
def _compute_project_id(self):
for task in self:
if not task.project_id:
task.project_id = task.parent_id.project_id.subtask_project_id
# ---------------------------------------------------
# Mail gateway
# ---------------------------------------------------
def _track_template(self, changes):
res = super(Task, self)._track_template(changes)
test_task = self[0]
if 'stage_id' in changes and test_task.stage_id.mail_template_id:
res['stage_id'] = (test_task.stage_id.mail_template_id, {
'auto_delete_message': True,
'subtype_id': self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note'),
'email_layout_xmlid': 'mail.mail_notification_light'
})
return res
def _creation_subtype(self):
return self.env.ref('project.mt_task_new')
def _track_subtype(self, init_values):
self.ensure_one()
if 'kanban_state_label' in init_values and self.kanban_state == 'blocked':
return self.env.ref('project.mt_task_blocked')
elif 'kanban_state_label' in init_values and self.kanban_state == 'done':
return self.env.ref('project.mt_task_ready')
elif 'stage_id' in init_values:
return self.env.ref('project.mt_task_stage')
return super(Task, self)._track_subtype(init_values)
def _notify_get_groups(self, msg_vals=None):
""" Handle project users and managers recipients that can assign
tasks and create new one directly from notification emails. Also give
access button to portal users and portal customers. If they are notified
they should probably have access to the document. """
groups = super(Task, self)._notify_get_groups(msg_vals=msg_vals)
local_msg_vals = dict(msg_vals or {})
self.ensure_one()
project_user_group_id = self.env.ref('project.group_project_user').id
group_func = lambda pdata: pdata['type'] == 'user' and project_user_group_id in pdata['groups']
if self.project_id.privacy_visibility == 'followers':
allowed_user_ids = self.project_id.allowed_internal_user_ids.partner_id.ids
group_func = lambda pdata: pdata['type'] == 'user' and project_user_group_id in pdata['groups'] and pdata['id'] in allowed_user_ids
new_group = ('group_project_user', group_func, {})
if not self.user_id and not self.stage_id.fold:
take_action = self._notify_get_action_link('assign', **local_msg_vals)
project_actions = [{'url': take_action, 'title': _('I take it')}]
new_group[2]['actions'] = project_actions
groups = [new_group] + groups
if self.project_id.privacy_visibility == 'portal':
allowed_user_ids = self.project_id.allowed_portal_user_ids.partner_id.ids
groups.insert(0, (
'allowed_portal_users',
lambda pdata: pdata['type'] == 'portal' and pdata['id'] in allowed_user_ids,
{}
))
portal_privacy = self.project_id.privacy_visibility == 'portal'
for group_name, group_method, group_data in groups:
if group_name in ('customer', 'user') or group_name == 'portal_customer' and not portal_privacy:
group_data['has_button_access'] = False
elif group_name == 'portal_customer' and portal_privacy:
group_data['has_button_access'] = True
return groups
def _notify_get_reply_to(self, default=None, records=None, company=None, doc_names=None):
""" Override to set alias of tasks to their project if any. """
aliases = self.sudo().mapped('project_id')._notify_get_reply_to(default=default, records=None, company=company, doc_names=None)
res = {task.id: aliases.get(task.project_id.id) for task in self}
leftover = self.filtered(lambda rec: not rec.project_id)
if leftover:
res.update(super(Task, leftover)._notify_get_reply_to(default=default, records=None, company=company, doc_names=doc_names))
return res
def email_split(self, msg):
email_list = tools.email_split((msg.get('to') or '') + ',' + (msg.get('cc') or ''))
# check left-part is not already an alias
aliases = self.mapped('project_id.alias_name')
return [x for x in email_list if x.split('@')[0] not in aliases]
@api.model
def message_new(self, msg, custom_values=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
# remove default author when going through the mail gateway. Indeed we
# do not want to explicitly set user_id to False; however we do not
# want the gateway user to be responsible if no other responsible is
# found.
create_context = dict(self.env.context or {})
create_context['default_user_id'] = False
if custom_values is None:
custom_values = {}
defaults = {
'name': msg.get('subject') or _("No Subject"),
'email_from': msg.get('from'),
'planned_hours': 0.0,
'partner_id': msg.get('author_id')
}
defaults.update(custom_values)
task = super(Task, self.with_context(create_context)).message_new(msg, custom_values=defaults)
email_list = task.email_split(msg)
partner_ids = [p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=task, force_create=False) if p]
task.message_subscribe(partner_ids)
return task
def message_update(self, msg, update_vals=None):
""" Override to update the task according to the email. """
email_list = self.email_split(msg)
partner_ids = [p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=self, force_create=False) if p]
self.message_subscribe(partner_ids)
return super(Task, self).message_update(msg, update_vals=update_vals)
def _message_get_suggested_recipients(self):
recipients = super(Task, self)._message_get_suggested_recipients()
for task in self:
if task.partner_id:
reason = _('Customer Email') if task.partner_id.email else _('Customer')
task._message_add_suggested_recipient(recipients, partner=task.partner_id, reason=reason)
elif task.email_from:
task._message_add_suggested_recipient(recipients, email=task.email_from, reason=_('Customer Email'))
return recipients
def _notify_email_header_dict(self):
headers = super(Task, self)._notify_email_header_dict()
if self.project_id:
current_objects = [h for h in headers.get('X-Odoo-Objects', '').split(',') if h]
current_objects.insert(0, 'project.project-%s, ' % self.project_id.id)
headers['X-Odoo-Objects'] = ','.join(current_objects)
if self.tag_ids:
headers['X-Odoo-Tags'] = ','.join(self.tag_ids.mapped('name'))
return headers
def _message_post_after_hook(self, message, msg_vals):
if message.attachment_ids and not self.displayed_image_id:
image_attachments = message.attachment_ids.filtered(lambda a: a.mimetype == 'image')
if image_attachments:
self.displayed_image_id = image_attachments[0]
if self.email_from and not self.partner_id:
# we consider that posting a message with a specified recipient (not a follower, a specific one)
# on a document without customer means that it was created through the chatter using
# suggested recipients. This heuristic allows to avoid ugly hacks in JS.
new_partner = message.partner_ids.filtered(lambda partner: partner.email == self.email_from)
if new_partner:
self.search([
('partner_id', '=', False),
('email_from', '=', new_partner.email),
('stage_id.fold', '=', False)]).write({'partner_id': new_partner.id})
return super(Task, self)._message_post_after_hook(message, msg_vals)
def action_assign_to_me(self):
self.write({'user_id': self.env.user.id})
# If depth == 1, return only direct children
# If depth == 3, return children to third generation
# If depth <= 0, return all children without depth limit
def _get_all_subtasks(self, depth=0):
children = self.mapped('child_ids').filtered(lambda children: children.active)
if not children:
return self.env['project.task']
if depth == 1:
return children
return children + children._get_all_subtasks(depth - 1)
def action_open_parent_task(self):
return {
'name': _('Parent Task'),
'view_mode': 'form',
'res_model': 'project.task',
'res_id': self.parent_id.id,
'type': 'ir.actions.act_window',
'context': dict(self._context, create=False)
}
def action_subtask(self):
action = self.env["ir.actions.actions"]._for_xml_id("project.project_task_action_sub_task")
# display all subtasks of current task
action['domain'] = [('id', 'child_of', self.id), ('id', '!=', self.id)]
# update context, with all default values as 'quick_create' does not contains all field in its view
if self._context.get('default_project_id'):
default_project = self.env['project.project'].browse(self.env.context['default_project_id'])
else:
default_project = self.project_id.subtask_project_id or self.project_id
ctx = dict(self.env.context)
ctx = {k: v for k, v in ctx.items() if not k.startswith('search_default_')}
ctx.update({
'default_name': self.env.context.get('name', self.name) + ':',
'default_parent_id': self.id, # will give default subtask field in `default_get`
'default_company_id': default_project.company_id.id if default_project else self.env.company.id,
})
action['context'] = ctx
return action
def action_recurring_tasks(self):
return {
'name': 'Tasks in Recurrence',
'type': 'ir.actions.act_window',
'res_model': 'project.task',
'view_mode': 'tree,form',
'domain': [('recurrence_id', 'in', self.recurrence_id.ids)],
}
# ---------------------------------------------------
# Rating business
# ---------------------------------------------------
def _send_task_rating_mail(self, force_send=False):
for task in self:
rating_template = task.stage_id.rating_template_id
if rating_template:
task.rating_send_request(rating_template, lang=task.partner_id.lang, force_send=force_send)
def rating_get_partner_id(self):
res = super(Task, self).rating_get_partner_id()
if not res and self.project_id.partner_id:
return self.project_id.partner_id
return res
def rating_apply(self, rate, token=None, feedback=None, subtype_xmlid=None):
return super(Task, self).rating_apply(rate, token=token, feedback=feedback, subtype_xmlid="project.mt_task_rating")
def _rating_get_parent_field_name(self):
return 'project_id'
class ProjectTags(models.Model):
""" Tags of project's tasks """
_name = "project.tags"
_description = "Project Tags"
def _get_default_color(self):
return randint(1, 11)
name = fields.Char('Name', required=True)
color = fields.Integer(string='Color', default=_get_default_color)
_sql_constraints = [
('name_uniq', 'unique (name)', "Tag name already exists!"),
]
| [
"odoo.fields.Datetime.from_string",
"odoo.fields.Datetime.today",
"odoo.osv.expression.OR",
"odoo.fields.Many2one",
"odoo.api.onchange",
"datetime.timedelta",
"odoo._",
"odoo.fields.Float",
"odoo.fields.datetime.now",
"odoo.fields.One2many",
"odoo.fields.Text",
"odoo.api.returns",
"random.randint",
"odoo.fields.Datetime.now",
"odoo.fields.Date",
"odoo.fields.Date.today",
"ast.literal_eval",
"odoo.fields.Selection",
"odoo.api.depends",
"odoo.fields.Many2many",
"odoo.fields.Char",
"odoo.fields.Boolean",
"odoo.api.constrains",
"odoo.fields.Html",
"odoo.fields.Datetime",
"odoo.fields.Integer"
] | [((786, 824), 'odoo.fields.Boolean', 'fields.Boolean', (['"""Active"""'], {'default': '(True)'}), "('Active', default=True)\n", (800, 824), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((836, 899), 'odoo.fields.Char', 'fields.Char', ([], {'string': '"""Stage Name"""', 'required': '(True)', 'translate': '(True)'}), "(string='Stage Name', required=True, translate=True)\n", (847, 899), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((918, 945), 'odoo.fields.Text', 'fields.Text', ([], {'translate': '(True)'}), '(translate=True)\n', (929, 945), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((961, 986), 'odoo.fields.Integer', 'fields.Integer', ([], {'default': '(1)'}), '(default=1)\n', (975, 986), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((1005, 1147), 'odoo.fields.Many2many', 'fields.Many2many', (['"""project.project"""', '"""project_task_type_rel"""', '"""type_id"""', '"""project_id"""'], {'string': '"""Projects"""', 'default': '_get_default_project_ids'}), "('project.project', 'project_task_type_rel', 'type_id',\n 'project_id', string='Projects', default=_get_default_project_ids)\n", (1021, 1147), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((1961, 2168), 'odoo.fields.Many2one', 'fields.Many2one', (['"""mail.template"""'], {'string': '"""Email Template"""', 'domain': "[('model', '=', 'project.task')]", 'help': '"""If set an email will be sent to the customer when the task or issue reaches this step."""'}), "('mail.template', string='Email Template', domain=[('model',\n '=', 'project.task')], help=\n 'If set an email will be sent to the customer when the task or issue reaches this step.'\n )\n", (1976, 2168), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((2199, 2350), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Folded in Kanban"""', 'help': '"""This stage is folded in the kanban view when there are no records in that stage to display."""'}), "(string='Folded in Kanban', help=\n 'This stage is folded in the kanban view when there are no records in that stage to display.'\n )\n", (2213, 2350), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((2374, 2660), 'odoo.fields.Many2one', 'fields.Many2one', (['"""mail.template"""'], {'string': '"""Rating Email Template"""', 'domain': "[('model', '=', 'project.task')]", 'help': '"""If set and if the project\'s rating configuration is \'Rating when changing stage\', then an email will be sent to the customer when the task reaches this step."""'}), '(\'mail.template\', string=\'Rating Email Template\', domain=[(\n \'model\', \'=\', \'project.task\')], help=\n "If set and if the project\'s rating configuration is \'Rating when changing stage\', then an email will be sent to the customer when the task reaches this step."\n )\n', (2389, 2660), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((2714, 3083), 'odoo.fields.Boolean', 'fields.Boolean', (['"""Automatic kanban status"""'], {'default': '(False)', 'help': '"""Automatically modify the kanban state when the customer replies to the feedback for this stage.\n * A good feedback from the customer will update the kanban state to \'ready for the new stage\' (green bullet).\n * A medium or a bad feedback will set the kanban state to \'blocked\' (red bullet).\n"""'}), '(\'Automatic kanban status\', default=False, help=\n """Automatically modify the kanban state when the customer replies to the feedback for this stage.\n * A good feedback from the customer will update the kanban state to \'ready for the new stage\' (green bullet).\n * A medium or a bad feedback will set the kanban state to \'blocked\' (red bullet).\n"""\n )\n', (2728, 3083), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((3127, 3217), 'odoo.fields.Boolean', 'fields.Boolean', (['"""Closing Stage"""'], {'help': '"""Tasks in this stage are considered as closed."""'}), "('Closing Stage', help=\n 'Tasks in this stage are considered as closed.')\n", (3141, 3217), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((3243, 3298), 'odoo.fields.Text', 'fields.Text', ([], {'compute': '"""_compute_disabled_rating_warning"""'}), "(compute='_compute_disabled_rating_warning')\n", (3254, 3298), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((4732, 4787), 'odoo.api.depends', 'api.depends', (['"""project_ids"""', '"""project_ids.rating_active"""'], {}), "('project_ids', 'project_ids.rating_active')\n", (4743, 4787), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((7647, 7708), 'odoo.fields.Char', 'fields.Char', (['"""Name"""'], {'index': '(True)', 'required': '(True)', 'tracking': '(True)'}), "('Name', index=True, required=True, tracking=True)\n", (7658, 7708), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((7727, 7740), 'odoo.fields.Html', 'fields.Html', ([], {}), '()\n', (7738, 7740), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((7754, 7896), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'default': '(True)', 'help': '"""If the active field is set to False, it will allow you to hide the project without removing it."""'}), "(default=True, help=\n 'If the active field is set to False, it will allow you to hide the project without removing it.'\n )\n", (7768, 7896), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((7910, 8010), 'odoo.fields.Integer', 'fields.Integer', ([], {'default': '(10)', 'help': '"""Gives the sequence order when displaying a list of Projects."""'}), "(default=10, help=\n 'Gives the sequence order when displaying a list of Projects.')\n", (7924, 8010), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((8023, 8190), 'odoo.fields.Many2one', 'fields.Many2one', (['"""res.partner"""'], {'string': '"""Customer"""', 'auto_join': '(True)', 'tracking': '(True)', 'domain': '"""[\'|\', (\'company_id\', \'=\', False), (\'company_id\', \'=\', company_id)]"""'}), '(\'res.partner\', string=\'Customer\', auto_join=True, tracking=\n True, domain=\n "[\'|\', (\'company_id\', \'=\', False), (\'company_id\', \'=\', company_id)]")\n', (8038, 8190), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((8201, 8345), 'odoo.fields.Char', 'fields.Char', ([], {'compute': '"""_compute_partner_email"""', 'inverse': '"""_inverse_partner_email"""', 'string': '"""Email"""', 'readonly': '(False)', 'store': '(True)', 'copy': '(False)'}), "(compute='_compute_partner_email', inverse=\n '_inverse_partner_email', string='Email', readonly=False, store=True,\n copy=False)\n", (8212, 8345), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((8374, 8518), 'odoo.fields.Char', 'fields.Char', ([], {'compute': '"""_compute_partner_phone"""', 'inverse': '"""_inverse_partner_phone"""', 'string': '"""Phone"""', 'readonly': '(False)', 'store': '(True)', 'copy': '(False)'}), "(compute='_compute_partner_phone', inverse=\n '_inverse_partner_phone', string='Phone', readonly=False, store=True,\n copy=False)\n", (8385, 8518), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((8544, 8650), 'odoo.fields.Many2one', 'fields.Many2one', (['"""res.company"""'], {'string': '"""Company"""', 'required': '(True)', 'default': '(lambda self: self.env.company)'}), "('res.company', string='Company', required=True, default=lambda\n self: self.env.company)\n", (8559, 8650), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((8665, 8769), 'odoo.fields.Many2one', 'fields.Many2one', (['"""res.currency"""'], {'related': '"""company_id.currency_id"""', 'string': '"""Currency"""', 'readonly': '(True)'}), "('res.currency', related='company_id.currency_id', string=\n 'Currency', readonly=True)\n", (8680, 8769), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((8791, 9166), 'odoo.fields.Many2one', 'fields.Many2one', (['"""account.analytic.account"""'], {'string': '"""Analytic Account"""', 'copy': '(False)', 'ondelete': '"""set null"""', 'domain': '"""[\'|\', (\'company_id\', \'=\', False), (\'company_id\', \'=\', company_id)]"""', 'check_company': '(True)', 'help': '"""Analytic account to which this project is linked for financial management. Use an analytic account to record cost and revenue on your project."""'}), '(\'account.analytic.account\', string=\'Analytic Account\', copy\n =False, ondelete=\'set null\', domain=\n "[\'|\', (\'company_id\', \'=\', False), (\'company_id\', \'=\', company_id)]",\n check_company=True, help=\n \'Analytic account to which this project is linked for financial management. Use an analytic account to record cost and revenue on your project.\'\n )\n', (8806, 9166), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((9200, 9345), 'odoo.fields.Many2many', 'fields.Many2many', (['"""res.users"""', '"""project_favorite_user_rel"""', '"""project_id"""', '"""user_id"""'], {'default': '_get_default_favorite_user_ids', 'string': '"""Members"""'}), "('res.users', 'project_favorite_user_rel', 'project_id',\n 'user_id', default=_get_default_favorite_user_ids, string='Members')\n", (9216, 9345), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((9385, 9577), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'compute': '"""_compute_is_favorite"""', 'inverse': '"""_inverse_is_favorite"""', 'string': '"""Show Project on dashboard"""', 'help': '"""Whether this project should be displayed on your dashboard."""'}), "(compute='_compute_is_favorite', inverse=\n '_inverse_is_favorite', string='Show Project on dashboard', help=\n 'Whether this project should be displayed on your dashboard.')\n", (9399, 9577), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((9594, 9715), 'odoo.fields.Char', 'fields.Char', ([], {'string': '"""Use Tasks as"""', 'default': '"""Tasks"""', 'help': '"""Label used for the tasks of the project."""', 'translate': '(True)'}), "(string='Use Tasks as', default='Tasks', help=\n 'Label used for the tasks of the project.', translate=True)\n", (9605, 9715), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((9723, 9794), 'odoo.fields.One2many', 'fields.One2many', (['"""project.task"""', '"""project_id"""'], {'string': '"""Task Activities"""'}), "('project.task', 'project_id', string='Task Activities')\n", (9738, 9794), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((9822, 9929), 'odoo.fields.Many2one', 'fields.Many2one', (['"""resource.calendar"""'], {'string': '"""Working Time"""', 'related': '"""company_id.resource_calendar_id"""'}), "('resource.calendar', string='Working Time', related=\n 'company_id.resource_calendar_id')\n", (9837, 9929), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((9957, 10071), 'odoo.fields.Many2many', 'fields.Many2many', (['"""project.task.type"""', '"""project_task_type_rel"""', '"""project_id"""', '"""type_id"""'], {'string': '"""Tasks Stages"""'}), "('project.task.type', 'project_task_type_rel', 'project_id',\n 'type_id', string='Tasks Stages')\n", (9973, 10071), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((10085, 10151), 'odoo.fields.Integer', 'fields.Integer', ([], {'compute': '"""_compute_task_count"""', 'string': '"""Task Count"""'}), "(compute='_compute_task_count', string='Task Count')\n", (10099, 10151), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((10167, 10303), 'odoo.fields.One2many', 'fields.One2many', (['"""project.task"""', '"""project_id"""'], {'string': '"""Tasks"""', 'domain': "['|', ('stage_id.fold', '=', False), ('stage_id', '=', False)]"}), "('project.task', 'project_id', string='Tasks', domain=['|',\n ('stage_id.fold', '=', False), ('stage_id', '=', False)])\n", (10182, 10303), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((10343, 10379), 'odoo.fields.Integer', 'fields.Integer', ([], {'string': '"""Color Index"""'}), "(string='Color Index')\n", (10357, 10379), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((10394, 10503), 'odoo.fields.Many2one', 'fields.Many2one', (['"""res.users"""'], {'string': '"""Project Manager"""', 'default': '(lambda self: self.env.user)', 'tracking': '(True)'}), "('res.users', string='Project Manager', default=lambda self:\n self.env.user, tracking=True)\n", (10409, 10503), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((10520, 10614), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Use email alias"""', 'compute': '"""_compute_alias_enabled"""', 'readonly': '(False)'}), "(string='Use email alias', compute='_compute_alias_enabled',\n readonly=False)\n", (10534, 10614), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((10626, 10898), 'odoo.fields.Many2one', 'fields.Many2one', (['"""mail.alias"""'], {'string': '"""Alias"""', 'ondelete': '"""restrict"""', 'required': '(True)', 'help': '"""Internal email associated with this project. Incoming emails are automatically synchronized with Tasks (or optionally Issues if the Issue Tracker module is installed)."""'}), "('mail.alias', string='Alias', ondelete='restrict', required\n =True, help=\n 'Internal email associated with this project. Incoming emails are automatically synchronized with Tasks (or optionally Issues if the Issue Tracker module is installed).'\n )\n", (10641, 10898), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((10933, 11536), 'odoo.fields.Selection', 'fields.Selection', (["[('followers', 'Invited internal users'), ('employees',\n 'All internal users'), ('portal',\n 'Invited portal users and all internal users')]"], {'string': '"""Visibility"""', 'required': '(True)', 'default': '"""portal"""', 'help': '"""Defines the visibility of the tasks of the project:\n- Invited internal users: employees may only see the followed project and tasks.\n- All internal users: employees may see all project and tasks.\n- Invited portal and all internal users: employees may see everything. Portal users may see project and tasks followed by\n them or by someone of their company."""'}), '([(\'followers\', \'Invited internal users\'), (\'employees\',\n \'All internal users\'), (\'portal\',\n \'Invited portal users and all internal users\')], string=\'Visibility\',\n required=True, default=\'portal\', help=\n """Defines the visibility of the tasks of the project:\n- Invited internal users: employees may only see the followed project and tasks.\n- All internal users: employees may see all project and tasks.\n- Invited portal and all internal users: employees may see everything. Portal users may see project and tasks followed by\n them or by someone of their company."""\n )\n', (10949, 11536), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((11705, 11806), 'odoo.fields.Many2many', 'fields.Many2many', (['"""res.users"""'], {'compute': '"""_compute_allowed_users"""', 'inverse': '"""_inverse_allowed_user"""'}), "('res.users', compute='_compute_allowed_users', inverse=\n '_inverse_allowed_user')\n", (11721, 11806), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((11834, 12012), 'odoo.fields.Many2many', 'fields.Many2many', (['"""res.users"""', '"""project_allowed_internal_users_rel"""'], {'string': '"""Allowed Internal Users"""', 'default': '(lambda self: self.env.user)', 'domain': "[('share', '=', False)]"}), "('res.users', 'project_allowed_internal_users_rel', string=\n 'Allowed Internal Users', default=lambda self: self.env.user, domain=[(\n 'share', '=', False)])\n", (11850, 12012), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((12082, 12214), 'odoo.fields.Many2many', 'fields.Many2many', (['"""res.users"""', '"""project_allowed_portal_users_rel"""'], {'string': '"""Allowed Portal Users"""', 'domain': "[('share', '=', True)]"}), "('res.users', 'project_allowed_portal_users_rel', string=\n 'Allowed Portal Users', domain=[('share', '=', True)])\n", (12098, 12214), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((12226, 12324), 'odoo.fields.Integer', 'fields.Integer', ([], {'compute': '"""_compute_attached_docs_count"""', 'string': '"""Number of documents attached"""'}), "(compute='_compute_attached_docs_count', string=\n 'Number of documents attached')\n", (12240, 12324), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((12337, 12369), 'odoo.fields.Date', 'fields.Date', ([], {'string': '"""Start Date"""'}), "(string='Start Date')\n", (12348, 12369), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((12381, 12445), 'odoo.fields.Date', 'fields.Date', ([], {'string': '"""Expiration Date"""', 'index': '(True)', 'tracking': '(True)'}), "(string='Expiration Date', index=True, tracking=True)\n", (12392, 12445), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((12471, 12681), 'odoo.fields.Many2one', 'fields.Many2one', (['"""project.project"""'], {'string': '"""Sub-task Project"""', 'ondelete': '"""restrict"""', 'help': '"""Project in which sub-tasks of the current project will be created. It can be the current project itself."""'}), "('project.project', string='Sub-task Project', ondelete=\n 'restrict', help=\n 'Project in which sub-tasks of the current project will be created. It can be the current project itself.'\n )\n", (12486, 12681), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((13003, 13074), 'odoo.fields.Datetime', 'fields.Datetime', ([], {'compute': '"""_compute_rating_request_deadline"""', 'store': '(True)'}), "(compute='_compute_rating_request_deadline', store=True)\n", (13018, 13074), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((13228, 13696), 'odoo.fields.Selection', 'fields.Selection', (["[('stage', 'Rating when changing stage'), ('periodic', 'Periodical Rating')]", '"""Customer Ratings Status"""'], {'default': '"""stage"""', 'required': '(True)', 'help': '"""How to get customer feedback?\n- Rating when changing stage: an email will be sent when a task is pulled in another stage.\n- Periodical Rating: email will be sent periodically.\n\nDon\'t forget to set up the mail templates on the stages for which you want to get the customer\'s feedbacks."""'}), '([(\'stage\', \'Rating when changing stage\'), (\'periodic\',\n \'Periodical Rating\')], \'Customer Ratings Status\', default=\'stage\',\n required=True, help=\n """How to get customer feedback?\n- Rating when changing stage: an email will be sent when a task is pulled in another stage.\n- Periodical Rating: email will be sent periodically.\n\nDon\'t forget to set up the mail templates on the stages for which you want to get the customer\'s feedbacks."""\n )\n', (13244, 13696), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((13789, 14026), 'odoo.fields.Selection', 'fields.Selection', (["[('daily', 'Daily'), ('weekly', 'Weekly'), ('bimonthly', 'Twice a Month'),\n ('monthly', 'Once a Month'), ('quarterly', 'Quarterly'), ('yearly',\n 'Yearly')]", '"""Rating Frequency"""'], {'required': '(True)', 'default': '"""monthly"""'}), "([('daily', 'Daily'), ('weekly', 'Weekly'), ('bimonthly',\n 'Twice a Month'), ('monthly', 'Once a Month'), ('quarterly',\n 'Quarterly'), ('yearly', 'Yearly')], 'Rating Frequency', required=True,\n default='monthly')\n", (13805, 14026), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((14230, 14261), 'odoo.api.depends', 'api.depends', (['"""partner_id.email"""'], {}), "('partner_id.email')\n", (14241, 14261), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((14711, 14742), 'odoo.api.depends', 'api.depends', (['"""partner_id.phone"""'], {}), "('partner_id.phone')\n", (14722, 14742), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((15192, 15221), 'odoo.api.onchange', 'api.onchange', (['"""alias_enabled"""'], {}), "('alias_enabled')\n", (15204, 15221), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((15492, 15559), 'odoo.api.depends', 'api.depends', (['"""allowed_internal_user_ids"""', '"""allowed_portal_user_ids"""'], {}), "('allowed_internal_user_ids', 'allowed_portal_user_ids')\n", (15503, 15559), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((16684, 16736), 'odoo.api.depends', 'api.depends', (['"""rating_status"""', '"""rating_status_period"""'], {}), "('rating_status', 'rating_status_period')\n", (16695, 16736), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((18372, 18415), 'odoo.api.returns', 'api.returns', (['"""self"""', '(lambda value: value.id)'], {}), "('self', lambda value: value.id)\n", (18383, 18415), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((29166, 29194), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'default': '(True)'}), '(default=True)\n', (29180, 29194), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((29206, 29275), 'odoo.fields.Char', 'fields.Char', ([], {'string': '"""Title"""', 'tracking': '(True)', 'required': '(True)', 'index': '(True)'}), "(string='Title', tracking=True, required=True, index=True)\n", (29217, 29275), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((29294, 29327), 'odoo.fields.Html', 'fields.Html', ([], {'string': '"""Description"""'}), "(string='Description')\n", (29305, 29327), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((29343, 29447), 'odoo.fields.Selection', 'fields.Selection', (["[('0', 'Normal'), ('1', 'Important')]"], {'default': '"""0"""', 'index': '(True)', 'string': '"""Priority"""'}), "([('0', 'Normal'), ('1', 'Important')], default='0', index=\n True, string='Priority')\n", (29359, 29447), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((29481, 29609), 'odoo.fields.Integer', 'fields.Integer', ([], {'string': '"""Sequence"""', 'index': '(True)', 'default': '(10)', 'help': '"""Gives the sequence order when displaying a list of tasks."""'}), "(string='Sequence', index=True, default=10, help=\n 'Gives the sequence order when displaying a list of tasks.')\n", (29495, 29609), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((29628, 29929), 'odoo.fields.Many2one', 'fields.Many2one', (['"""project.task.type"""'], {'string': '"""Stage"""', 'compute': '"""_compute_stage_id"""', 'store': '(True)', 'readonly': '(False)', 'ondelete': '"""restrict"""', 'tracking': '(True)', 'index': '(True)', 'default': '_get_default_stage_id', 'group_expand': '"""_read_group_stage_ids"""', 'domain': '"""[(\'project_ids\', \'=\', project_id)]"""', 'copy': '(False)'}), '(\'project.task.type\', string=\'Stage\', compute=\n \'_compute_stage_id\', store=True, readonly=False, ondelete=\'restrict\',\n tracking=True, index=True, default=_get_default_stage_id, group_expand=\n \'_read_group_stage_ids\', domain="[(\'project_ids\', \'=\', project_id)]",\n copy=False)\n', (29643, 29929), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((29950, 29997), 'odoo.fields.Many2many', 'fields.Many2many', (['"""project.tags"""'], {'string': '"""Tags"""'}), "('project.tags', string='Tags')\n", (29966, 29997), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((30017, 30181), 'odoo.fields.Selection', 'fields.Selection', (["[('normal', 'In Progress'), ('done', 'Ready'), ('blocked', 'Blocked')]"], {'string': '"""Kanban State"""', 'copy': '(False)', 'default': '"""normal"""', 'required': '(True)'}), "([('normal', 'In Progress'), ('done', 'Ready'), ('blocked',\n 'Blocked')], string='Kanban State', copy=False, default='normal',\n required=True)\n", (30033, 30181), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((30232, 30331), 'odoo.fields.Char', 'fields.Char', ([], {'compute': '"""_compute_kanban_state_label"""', 'string': '"""Kanban State Label"""', 'tracking': '(True)'}), "(compute='_compute_kanban_state_label', string=\n 'Kanban State Label', tracking=True)\n", (30243, 30331), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((30345, 30401), 'odoo.fields.Datetime', 'fields.Datetime', (['"""Created On"""'], {'readonly': '(True)', 'index': '(True)'}), "('Created On', readonly=True, index=True)\n", (30360, 30401), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((30419, 30480), 'odoo.fields.Datetime', 'fields.Datetime', (['"""Last Updated On"""'], {'readonly': '(True)', 'index': '(True)'}), "('Last Updated On', readonly=True, index=True)\n", (30434, 30480), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((30496, 30557), 'odoo.fields.Datetime', 'fields.Datetime', ([], {'string': '"""Ending Date"""', 'index': '(True)', 'copy': '(False)'}), "(string='Ending Date', index=True, copy=False)\n", (30511, 30557), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((30576, 30655), 'odoo.fields.Datetime', 'fields.Datetime', ([], {'string': '"""Assigning Date"""', 'index': '(True)', 'copy': '(False)', 'readonly': '(True)'}), "(string='Assigning Date', index=True, copy=False, readonly=True)\n", (30591, 30655), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((30676, 30745), 'odoo.fields.Date', 'fields.Date', ([], {'string': '"""Deadline"""', 'index': '(True)', 'copy': '(False)', 'tracking': '(True)'}), "(string='Deadline', index=True, copy=False, tracking=True)\n", (30687, 30745), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((30775, 30861), 'odoo.fields.Datetime', 'fields.Datetime', ([], {'string': '"""Last Stage Update"""', 'index': '(True)', 'copy': '(False)', 'readonly': '(True)'}), "(string='Last Stage Update', index=True, copy=False,\n readonly=True)\n", (30790, 30861), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((30899, 31088), 'odoo.fields.Many2one', 'fields.Many2one', (['"""project.project"""'], {'string': '"""Project"""', 'compute': '"""_compute_project_id"""', 'store': '(True)', 'readonly': '(False)', 'index': '(True)', 'tracking': '(True)', 'check_company': '(True)', 'change_default': '(True)'}), "('project.project', string='Project', compute=\n '_compute_project_id', store=True, readonly=False, index=True, tracking\n =True, check_company=True, change_default=True)\n", (30914, 31088), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((31115, 31247), 'odoo.fields.Float', 'fields.Float', (['"""Initially Planned Hours"""'], {'help': '"""Time planned to achieve this task (including its sub-tasks)."""', 'tracking': '(True)'}), "('Initially Planned Hours', help=\n 'Time planned to achieve this task (including its sub-tasks).',\n tracking=True)\n", (31127, 31247), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((31267, 31503), 'odoo.fields.Float', 'fields.Float', (['"""Sub-tasks Planned Hours"""'], {'compute': '"""_compute_subtask_planned_hours"""', 'help': '"""Sum of the time planned of all the sub-tasks linked to this task. Usually less or equal to the initially time planned of this task."""'}), "('Sub-tasks Planned Hours', compute=\n '_compute_subtask_planned_hours', help=\n 'Sum of the time planned of all the sub-tasks linked to this task. Usually less or equal to the initially time planned of this task.'\n )\n", (31279, 31503), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((31503, 31619), 'odoo.fields.Many2one', 'fields.Many2one', (['"""res.users"""'], {'string': '"""Assigned to"""', 'default': '(lambda self: self.env.uid)', 'index': '(True)', 'tracking': '(True)'}), "('res.users', string='Assigned to', default=lambda self:\n self.env.uid, index=True, tracking=True)\n", (31518, 31619), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((31657, 31852), 'odoo.fields.Many2one', 'fields.Many2one', (['"""res.partner"""'], {'string': '"""Customer"""', 'compute': '"""_compute_partner_id"""', 'store': '(True)', 'readonly': '(False)', 'domain': '"""[\'|\', (\'company_id\', \'=\', False), (\'company_id\', \'=\', company_id)]"""'}), '(\'res.partner\', string=\'Customer\', compute=\n \'_compute_partner_id\', store=True, readonly=False, domain=\n "[\'|\', (\'company_id\', \'=\', False), (\'company_id\', \'=\', company_id)]")\n', (31672, 31852), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((31892, 31954), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'related': '"""partner_id.is_company"""', 'readonly': '(True)'}), "(related='partner_id.is_company', readonly=True)\n", (31906, 31954), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((31983, 32042), 'odoo.fields.Many2one', 'fields.Many2one', ([], {'related': '"""partner_id.commercial_partner_id"""'}), "(related='partner_id.commercial_partner_id')\n", (31998, 32042), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((32063, 32207), 'odoo.fields.Char', 'fields.Char', ([], {'compute': '"""_compute_partner_email"""', 'inverse': '"""_inverse_partner_email"""', 'string': '"""Email"""', 'readonly': '(False)', 'store': '(True)', 'copy': '(False)'}), "(compute='_compute_partner_email', inverse=\n '_inverse_partner_email', string='Email', readonly=False, store=True,\n copy=False)\n", (32074, 32207), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((32236, 32380), 'odoo.fields.Char', 'fields.Char', ([], {'compute': '"""_compute_partner_phone"""', 'inverse': '"""_inverse_partner_phone"""', 'string': '"""Phone"""', 'readonly': '(False)', 'store': '(True)', 'copy': '(False)'}), "(compute='_compute_partner_phone', inverse=\n '_inverse_partner_phone', string='Phone', readonly=False, store=True,\n copy=False)\n", (32247, 32380), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((32410, 32474), 'odoo.fields.Char', 'fields.Char', (['"""Ribbon message"""'], {'compute': '"""_compute_ribbon_message"""'}), "('Ribbon message', compute='_compute_ribbon_message')\n", (32421, 32474), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((32494, 32548), 'odoo.fields.Char', 'fields.Char', ([], {'related': '"""partner_id.city"""', 'readonly': '(False)'}), "(related='partner_id.city', readonly=False)\n", (32505, 32548), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((32566, 32670), 'odoo.fields.Many2one', 'fields.Many2one', (['"""res.users"""'], {'string': '"""Project Manager"""', 'related': '"""project_id.user_id"""', 'readonly': '(True)'}), "('res.users', string='Project Manager', related=\n 'project_id.user_id', readonly=True)\n", (32581, 32670), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((32683, 32855), 'odoo.fields.Many2one', 'fields.Many2one', (['"""res.company"""'], {'string': '"""Company"""', 'compute': '"""_compute_company_id"""', 'store': '(True)', 'readonly': '(False)', 'required': '(True)', 'copy': '(True)', 'default': '_default_company_id'}), "('res.company', string='Company', compute=\n '_compute_company_id', store=True, readonly=False, required=True, copy=\n True, default=_default_company_id)\n", (32698, 32855), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((32875, 32911), 'odoo.fields.Integer', 'fields.Integer', ([], {'string': '"""Color Index"""'}), "(string='Color Index')\n", (32889, 32911), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((32929, 33025), 'odoo.fields.Char', 'fields.Char', ([], {'related': '"""user_id.email"""', 'string': '"""User Email"""', 'readonly': '(True)', 'related_sudo': '(False)'}), "(related='user_id.email', string='User Email', readonly=True,\n related_sudo=False)\n", (32940, 33025), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((33043, 33191), 'odoo.fields.One2many', 'fields.One2many', (['"""ir.attachment"""'], {'compute': '"""_compute_attachment_ids"""', 'string': '"""Main Attachments"""', 'help': '"""Attachment that don\'t come from message."""'}), '(\'ir.attachment\', compute=\'_compute_attachment_ids\', string=\n \'Main Attachments\', help="Attachment that don\'t come from message.")\n', (33058, 33191), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((33379, 33543), 'odoo.fields.Many2one', 'fields.Many2one', (['"""ir.attachment"""'], {'domain': '"""[(\'res_model\', \'=\', \'project.task\'), (\'res_id\', \'=\', id), (\'mimetype\', \'ilike\', \'image\')]"""', 'string': '"""Cover Image"""'}), '(\'ir.attachment\', domain=\n "[(\'res_model\', \'=\', \'project.task\'), (\'res_id\', \'=\', id), (\'mimetype\', \'ilike\', \'image\')]"\n , string=\'Cover Image\')\n', (33394, 33543), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((33555, 33678), 'odoo.fields.Char', 'fields.Char', ([], {'related': '"""stage_id.legend_blocked"""', 'string': '"""Kanban Blocked Explanation"""', 'readonly': '(True)', 'related_sudo': '(False)'}), "(related='stage_id.legend_blocked', string=\n 'Kanban Blocked Explanation', readonly=True, related_sudo=False)\n", (33566, 33678), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((33692, 33810), 'odoo.fields.Char', 'fields.Char', ([], {'related': '"""stage_id.legend_done"""', 'string': '"""Kanban Valid Explanation"""', 'readonly': '(True)', 'related_sudo': '(False)'}), "(related='stage_id.legend_done', string=\n 'Kanban Valid Explanation', readonly=True, related_sudo=False)\n", (33703, 33810), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((33826, 33948), 'odoo.fields.Char', 'fields.Char', ([], {'related': '"""stage_id.legend_normal"""', 'string': '"""Kanban Ongoing Explanation"""', 'readonly': '(True)', 'related_sudo': '(False)'}), "(related='stage_id.legend_normal', string=\n 'Kanban Ongoing Explanation', readonly=True, related_sudo=False)\n", (33837, 33948), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((33960, 34067), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'related': '"""stage_id.is_closed"""', 'string': '"""Closing Stage"""', 'readonly': '(True)', 'related_sudo': '(False)'}), "(related='stage_id.is_closed', string='Closing Stage',\n readonly=True, related_sudo=False)\n", (33974, 34067), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((34080, 34145), 'odoo.fields.Many2one', 'fields.Many2one', (['"""project.task"""'], {'string': '"""Parent Task"""', 'index': '(True)'}), "('project.task', string='Parent Task', index=True)\n", (34095, 34145), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((34162, 34263), 'odoo.fields.One2many', 'fields.One2many', (['"""project.task"""', '"""parent_id"""'], {'string': '"""Sub-tasks"""', 'context': "{'active_test': False}"}), "('project.task', 'parent_id', string='Sub-tasks', context={\n 'active_test': False})\n", (34177, 34263), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((34284, 34405), 'odoo.fields.Many2one', 'fields.Many2one', (['"""project.project"""'], {'related': '"""project_id.subtask_project_id"""', 'string': '"""Sub-task Project"""', 'readonly': '(True)'}), "('project.project', related='project_id.subtask_project_id',\n string='Sub-task Project', readonly=True)\n", (34299, 34405), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((34423, 34520), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Allow Sub-tasks"""', 'related': '"""project_id.allow_subtasks"""', 'readonly': '(True)'}), "(string='Allow Sub-tasks', related=\n 'project_id.allow_subtasks', readonly=True)\n", (34437, 34520), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((34536, 34602), 'odoo.fields.Integer', 'fields.Integer', (['"""Sub-task count"""'], {'compute': '"""_compute_subtask_count"""'}), "('Sub-task count', compute='_compute_subtask_count')\n", (34550, 34602), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((34620, 34770), 'odoo.fields.Char', 'fields.Char', ([], {'string': '"""Email From"""', 'help': '"""These people will receive email."""', 'index': '(True)', 'compute': '"""_compute_email_from"""', 'store': '"""True"""', 'readonly': '(False)'}), "(string='Email From', help='These people will receive email.',\n index=True, compute='_compute_email_from', store='True', readonly=False)\n", (34631, 34770), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((34798, 34974), 'odoo.fields.Many2many', 'fields.Many2many', (['"""res.users"""'], {'string': '"""Visible to"""', 'groups': '"""project.group_project_manager"""', 'compute': '"""_compute_allowed_user_ids"""', 'store': '(True)', 'readonly': '(False)', 'copy': '(False)'}), "('res.users', string='Visible to', groups=\n 'project.group_project_manager', compute='_compute_allowed_user_ids',\n store=True, readonly=False, copy=False)\n", (34814, 34974), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((34999, 35090), 'odoo.fields.Selection', 'fields.Selection', ([], {'related': '"""project_id.privacy_visibility"""', 'string': '"""Project Visibility"""'}), "(related='project_id.privacy_visibility', string=\n 'Project Visibility')\n", (35015, 35090), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((35208, 35320), 'odoo.fields.Float', 'fields.Float', ([], {'compute': '"""_compute_elapsed"""', 'string': '"""Working hours to assign"""', 'store': '(True)', 'group_operator': '"""avg"""'}), "(compute='_compute_elapsed', string='Working hours to assign',\n store=True, group_operator='avg')\n", (35220, 35320), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((35343, 35454), 'odoo.fields.Float', 'fields.Float', ([], {'compute': '"""_compute_elapsed"""', 'string': '"""Working hours to close"""', 'store': '(True)', 'group_operator': '"""avg"""'}), "(compute='_compute_elapsed', string='Working hours to close',\n store=True, group_operator='avg')\n", (35355, 35454), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((35475, 35586), 'odoo.fields.Float', 'fields.Float', ([], {'compute': '"""_compute_elapsed"""', 'string': '"""Working days to assign"""', 'store': '(True)', 'group_operator': '"""avg"""'}), "(compute='_compute_elapsed', string='Working days to assign',\n store=True, group_operator='avg')\n", (35487, 35586), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((35608, 35718), 'odoo.fields.Float', 'fields.Float', ([], {'compute': '"""_compute_elapsed"""', 'string': '"""Working days to close"""', 'store': '(True)', 'group_operator': '"""avg"""'}), "(compute='_compute_elapsed', string='Working days to close',\n store=True, group_operator='avg')\n", (35620, 35718), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((35825, 35941), 'odoo.fields.One2many', 'fields.One2many', ([], {'domain': "(lambda self: [('model', '=', self._name), ('message_type', 'in', ['email',\n 'comment'])])"}), "(domain=lambda self: [('model', '=', self._name), (\n 'message_type', 'in', ['email', 'comment'])])\n", (35840, 35941), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((35990, 36048), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'related': '"""project_id.allow_recurring_tasks"""'}), "(related='project_id.allow_recurring_tasks')\n", (36004, 36048), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((36070, 36104), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Recurrent"""'}), "(string='Recurrent')\n", (36084, 36104), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((36127, 36212), 'odoo.fields.Integer', 'fields.Integer', ([], {'string': '"""Tasks in Recurrence"""', 'compute': '"""_compute_recurring_count"""'}), "(string='Tasks in Recurrence', compute='_compute_recurring_count'\n )\n", (36141, 36212), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((36228, 36282), 'odoo.fields.Many2one', 'fields.Many2one', (['"""project.task.recurrence"""'], {'copy': '(False)'}), "('project.task.recurrence', copy=False)\n", (36243, 36282), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((36307, 36451), 'odoo.fields.Selection', 'fields.Selection', (["[('this', 'This task'), ('subsequent', 'This and following tasks'), ('all',\n 'All tasks')]"], {'default': '"""this"""', 'store': '(False)'}), "([('this', 'This task'), ('subsequent',\n 'This and following tasks'), ('all', 'All tasks')], default='this',\n store=False)\n", (36323, 36451), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((36500, 36578), 'odoo.fields.Char', 'fields.Char', ([], {'string': '"""Next Recurrencies"""', 'compute': '"""_compute_recurrence_message"""'}), "(string='Next Recurrencies', compute='_compute_recurrence_message')\n", (36511, 36578), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((36602, 36697), 'odoo.fields.Integer', 'fields.Integer', ([], {'string': '"""Repeat Every"""', 'default': '(1)', 'compute': '"""_compute_repeat"""', 'readonly': '(False)'}), "(string='Repeat Every', default=1, compute='_compute_repeat',\n readonly=False)\n", (36616, 36697), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((36712, 36874), 'odoo.fields.Selection', 'fields.Selection', (["[('day', 'Days'), ('week', 'Weeks'), ('month', 'Months'), ('year', 'Years')]"], {'default': '"""week"""', 'compute': '"""_compute_repeat"""', 'readonly': '(False)'}), "([('day', 'Days'), ('week', 'Weeks'), ('month', 'Months'),\n ('year', 'Years')], default='week', compute='_compute_repeat', readonly\n =False)\n", (36728, 36874), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((36923, 37111), 'odoo.fields.Selection', 'fields.Selection', (["[('forever', 'Forever'), ('until', 'End Date'), ('after',\n 'Number of Repetitions')]"], {'default': '"""forever"""', 'string': '"""Until"""', 'compute': '"""_compute_repeat"""', 'readonly': '(False)'}), "([('forever', 'Forever'), ('until', 'End Date'), ('after',\n 'Number of Repetitions')], default='forever', string='Until', compute=\n '_compute_repeat', readonly=False)\n", (36939, 37111), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((37153, 37226), 'odoo.fields.Date', 'fields.Date', ([], {'string': '"""End Date"""', 'compute': '"""_compute_repeat"""', 'readonly': '(False)'}), "(string='End Date', compute='_compute_repeat', readonly=False)\n", (37164, 37226), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((37247, 37341), 'odoo.fields.Integer', 'fields.Integer', ([], {'string': '"""Repetitions"""', 'default': '(1)', 'compute': '"""_compute_repeat"""', 'readonly': '(False)'}), "(string='Repetitions', default=1, compute='_compute_repeat',\n readonly=False)\n", (37261, 37341), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((37361, 37503), 'odoo.fields.Selection', 'fields.Selection', (["[('date', 'Date of the Month'), ('day', 'Day of the Month')]"], {'default': '"""date"""', 'compute': '"""_compute_repeat"""', 'readonly': '(False)'}), "([('date', 'Date of the Month'), ('day', 'Day of the Month'\n )], default='date', compute='_compute_repeat', readonly=False)\n", (37377, 37503), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((37544, 37683), 'odoo.fields.Selection', 'fields.Selection', (["[('date', 'Date of the Year'), ('day', 'Day of the Year')]"], {'default': '"""date"""', 'compute': '"""_compute_repeat"""', 'readonly': '(False)'}), "([('date', 'Date of the Year'), ('day', 'Day of the Year')],\n default='date', compute='_compute_repeat', readonly=False)\n", (37560, 37683), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((37714, 37785), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Mon"""', 'compute': '"""_compute_repeat"""', 'readonly': '(False)'}), "(string='Mon', compute='_compute_repeat', readonly=False)\n", (37728, 37785), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((37796, 37867), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Tue"""', 'compute': '"""_compute_repeat"""', 'readonly': '(False)'}), "(string='Tue', compute='_compute_repeat', readonly=False)\n", (37810, 37867), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((37878, 37949), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Wed"""', 'compute': '"""_compute_repeat"""', 'readonly': '(False)'}), "(string='Wed', compute='_compute_repeat', readonly=False)\n", (37892, 37949), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((37960, 38031), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Thu"""', 'compute': '"""_compute_repeat"""', 'readonly': '(False)'}), "(string='Thu', compute='_compute_repeat', readonly=False)\n", (37974, 38031), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((38042, 38113), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Fri"""', 'compute': '"""_compute_repeat"""', 'readonly': '(False)'}), "(string='Fri', compute='_compute_repeat', readonly=False)\n", (38056, 38113), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((38124, 38195), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Sat"""', 'compute': '"""_compute_repeat"""', 'readonly': '(False)'}), "(string='Sat', compute='_compute_repeat', readonly=False)\n", (38138, 38195), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((38206, 38277), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'string': '"""Sun"""', 'compute': '"""_compute_repeat"""', 'readonly': '(False)'}), "(string='Sun', compute='_compute_repeat', readonly=False)\n", (38220, 38277), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((38430, 38596), 'odoo.fields.Selection', 'fields.Selection', (["[('first', 'First'), ('second', 'Second'), ('third', 'Third'), ('last', 'Last')\n ]"], {'default': '"""first"""', 'compute': '"""_compute_repeat"""', 'readonly': '(False)'}), "([('first', 'First'), ('second', 'Second'), ('third',\n 'Third'), ('last', 'Last')], default='first', compute='_compute_repeat',\n readonly=False)\n", (38446, 38596), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((38649, 38890), 'odoo.fields.Selection', 'fields.Selection', (["[('mon', 'Monday'), ('tue', 'Tuesday'), ('wed', 'Wednesday'), ('thu',\n 'Thursday'), ('fri', 'Friday'), ('sat', 'Saturday'), ('sun', 'Sunday')]"], {'string': '"""Day Of The Week"""', 'compute': '"""_compute_repeat"""', 'readonly': '(False)'}), "([('mon', 'Monday'), ('tue', 'Tuesday'), ('wed',\n 'Wednesday'), ('thu', 'Thursday'), ('fri', 'Friday'), ('sat',\n 'Saturday'), ('sun', 'Sunday')], string='Day Of The Week', compute=\n '_compute_repeat', readonly=False)\n", (38665, 38890), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((38960, 39307), 'odoo.fields.Selection', 'fields.Selection', (["[('january', 'January'), ('february', 'February'), ('march', 'March'), (\n 'april', 'April'), ('may', 'May'), ('june', 'June'), ('july', 'July'),\n ('august', 'August'), ('september', 'September'), ('october', 'October'\n ), ('november', 'November'), ('december', 'December')]"], {'compute': '"""_compute_repeat"""', 'readonly': '(False)'}), "([('january', 'January'), ('february', 'February'), (\n 'march', 'March'), ('april', 'April'), ('may', 'May'), ('june', 'June'),\n ('july', 'July'), ('august', 'August'), ('september', 'September'), (\n 'october', 'October'), ('november', 'November'), ('december',\n 'December')], compute='_compute_repeat', readonly=False)\n", (38976, 39307), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((39416, 39468), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'compute': '"""_compute_repeat_visibility"""'}), "(compute='_compute_repeat_visibility')\n", (39430, 39468), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((39491, 39543), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'compute': '"""_compute_repeat_visibility"""'}), "(compute='_compute_repeat_visibility')\n", (39505, 39543), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((39567, 39619), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'compute': '"""_compute_repeat_visibility"""'}), "(compute='_compute_repeat_visibility')\n", (39581, 39619), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((39644, 39696), 'odoo.fields.Boolean', 'fields.Boolean', ([], {'compute': '"""_compute_repeat_visibility"""'}), "(compute='_compute_repeat_visibility')\n", (39658, 39696), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((40036, 40121), 'odoo.api.depends', 'api.depends', (['"""recurring_task"""', '"""repeat_unit"""', '"""repeat_on_month"""', '"""repeat_on_year"""'], {}), "('recurring_task', 'repeat_unit', 'repeat_on_month',\n 'repeat_on_year')\n", (40047, 40121), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((40745, 40774), 'odoo.api.depends', 'api.depends', (['"""recurring_task"""'], {}), "('recurring_task')\n", (40756, 40774), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((41461, 41733), 'odoo.api.depends', 'api.depends', (['"""recurring_task"""', '"""repeat_interval"""', '"""repeat_unit"""', '"""repeat_type"""', '"""repeat_until"""', '"""repeat_number"""', '"""repeat_on_month"""', '"""repeat_on_year"""', '"""mon"""', '"""tue"""', '"""wed"""', '"""thu"""', '"""fri"""', '"""sat"""', '"""sun"""', '"""repeat_day"""', '"""repeat_week"""', '"""repeat_month"""', '"""repeat_weekday"""'], {}), "('recurring_task', 'repeat_interval', 'repeat_unit',\n 'repeat_type', 'repeat_until', 'repeat_number', 'repeat_on_month',\n 'repeat_on_year', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun',\n 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday')\n", (41472, 41733), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((43753, 43781), 'odoo.api.depends', 'api.depends', (['"""recurrence_id"""'], {}), "('recurrence_id')\n", (43764, 43781), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((44276, 44307), 'odoo.api.depends', 'api.depends', (['"""partner_id.email"""'], {}), "('partner_id.email')\n", (44287, 44307), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((44721, 44752), 'odoo.api.depends', 'api.depends', (['"""partner_id.phone"""'], {}), "('partner_id.phone')\n", (44732, 44752), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((45166, 45225), 'odoo.api.depends', 'api.depends', (['"""partner_email"""', '"""partner_phone"""', '"""partner_id"""'], {}), "('partner_email', 'partner_phone', 'partner_id')\n", (45177, 45225), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((46019, 46046), 'odoo.api.constrains', 'api.constrains', (['"""parent_id"""'], {}), "('parent_id')\n", (46033, 46046), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((46220, 46254), 'odoo.api.constrains', 'api.constrains', (['"""allowed_user_ids"""'], {}), "('allowed_user_ids')\n", (46234, 46254), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((47106, 47181), 'odoo.api.depends', 'api.depends', (['"""project_id.allowed_user_ids"""', '"""project_id.privacy_visibility"""'], {}), "('project_id.allowed_user_ids', 'project_id.privacy_visibility')\n", (47117, 47181), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((47978, 48031), 'odoo.api.depends', 'api.depends', (['"""create_date"""', '"""date_end"""', '"""date_assign"""'], {}), "('create_date', 'date_end', 'date_assign')\n", (47989, 48031), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((49486, 49525), 'odoo.api.depends', 'api.depends', (['"""stage_id"""', '"""kanban_state"""'], {}), "('stage_id', 'kanban_state')\n", (49497, 49525), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((50519, 50557), 'odoo.api.depends', 'api.depends', (['"""child_ids.planned_hours"""'], {}), "('child_ids.planned_hours')\n", (50530, 50557), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((50775, 50799), 'odoo.api.depends', 'api.depends', (['"""child_ids"""'], {}), "('child_ids')\n", (50786, 50799), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((50933, 50959), 'odoo.api.onchange', 'api.onchange', (['"""company_id"""'], {}), "('company_id')\n", (50945, 50959), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((51098, 51134), 'odoo.api.depends', 'api.depends', (['"""project_id.company_id"""'], {}), "('project_id.company_id')\n", (51109, 51134), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((51298, 51323), 'odoo.api.depends', 'api.depends', (['"""project_id"""'], {}), "('project_id')\n", (51309, 51323), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((51694, 51737), 'odoo.api.returns', 'api.returns', (['"""self"""', '(lambda value: value.id)'], {}), "('self', lambda value: value.id)\n", (51705, 51737), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((52069, 52096), 'odoo.api.constrains', 'api.constrains', (['"""parent_id"""'], {}), "('parent_id')\n", (52083, 52096), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((60873, 60933), 'odoo.api.depends', 'api.depends', (['"""parent_id.partner_id"""', '"""project_id.partner_id"""'], {}), "('parent_id.partner_id', 'project_id.partner_id')\n", (60884, 60933), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((61626, 61681), 'odoo.api.depends', 'api.depends', (['"""partner_id.email"""', '"""parent_id.email_from"""'], {}), "('partner_id.email', 'parent_id.email_from')\n", (61637, 61681), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((61891, 61945), 'odoo.api.depends', 'api.depends', (['"""parent_id.project_id.subtask_project_id"""'], {}), "('parent_id.project_id.subtask_project_id')\n", (61902, 61945), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((73717, 73751), 'odoo.fields.Char', 'fields.Char', (['"""Name"""'], {'required': '(True)'}), "('Name', required=True)\n", (73728, 73751), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((73764, 73822), 'odoo.fields.Integer', 'fields.Integer', ([], {'string': '"""Color"""', 'default': '_get_default_color'}), "(string='Color', default=_get_default_color)\n", (73778, 73822), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((52373, 52382), 'odoo._', '_', (['"""task"""'], {}), "('task')\n", (52374, 52382), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((57689, 57710), 'odoo.fields.Datetime.now', 'fields.Datetime.now', ([], {}), '()\n', (57708, 57710), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((73690, 73704), 'random.randint', 'randint', (['(1)', '(11)'], {}), '(1, 11)\n', (73697, 73704), False, 'from random import randint\n'), ((4130, 4147), 'odoo._', '_', (['"""Delete Stage"""'], {}), "('Delete Stage')\n", (4131, 4147), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((16447, 16665), 'odoo._', '_', (['"""The project cannot be shared with the recipient(s) because the privacy of the project is too restricted. Set the privacy to \'Visible by following customers\' in order to make it accessible by the recipient(s)."""'], {}), '("The project cannot be shared with the recipient(s) because the privacy of the project is too restricted. Set the privacy to \'Visible by following customers\' in order to make it accessible by the recipient(s)."\n )\n', (16448, 16665), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((21330, 21347), 'odoo._', '_', (['"""Confirmation"""'], {}), "('Confirmation')\n", (21331, 21347), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((24209, 24254), 'ast.literal_eval', 'ast.literal_eval', (["(self.alias_defaults or '{}')"], {}), "(self.alias_defaults or '{}')\n", (24225, 24254), False, 'import ast\n'), ((25957, 25975), 'odoo._', '_', (['"""Ratings of %s"""'], {}), "('Ratings of %s')\n", (25958, 25975), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((26016, 26051), 'ast.literal_eval', 'ast.literal_eval', (["action['context']"], {}), "(action['context'])\n", (26032, 26051), False, 'import ast\n'), ((41941, 41960), 'odoo.fields.Date.today', 'fields.Date.today', ([], {}), '()\n', (41958, 41960), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((48280, 48325), 'odoo.fields.Datetime.from_string', 'fields.Datetime.from_string', (['task.create_date'], {}), '(task.create_date)\n', (48307, 48325), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((50270, 50500), 'odoo._', '_', (['"""The task cannot be shared with the recipient(s) because the privacy of the project is too restricted. Set the privacy of the project to \'Visible by following customers\' in order to make it accessible by the recipient(s)."""'], {}), '("The task cannot be shared with the recipient(s) because the privacy of the project is too restricted. Set the privacy of the project to \'Visible by following customers\' in order to make it accessible by the recipient(s)."\n )\n', (50271, 50500), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((51891, 51916), 'odoo._', '_', (['"""%s (copy)"""', 'self.name'], {}), "('%s (copy)', self.name)\n", (51892, 51916), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((70932, 70948), 'odoo._', '_', (['"""Parent Task"""'], {}), "('Parent Task')\n", (70933, 70948), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((1232, 1244), 'odoo._', '_', (['"""Blocked"""'], {}), "('Blocked')\n", (1233, 1244), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((1494, 1504), 'odoo._', '_', (['"""Ready"""'], {}), "('Ready')\n", (1495, 1504), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((1752, 1768), 'odoo._', '_', (['"""In Progress"""'], {}), "('In Progress')\n", (1753, 1768), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((16968, 16989), 'odoo.fields.datetime.now', 'fields.datetime.now', ([], {}), '()\n', (16987, 16989), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((18569, 18583), 'odoo._', '_', (['"""%s (copy)"""'], {}), "('%s (copy)')\n", (18570, 18583), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((45574, 45664), 'odoo._', '_', (['"""By saving this change, the customer email and phone number will also be updated."""'], {}), "('By saving this change, the customer email and phone number will also be updated.'\n )\n", (45575, 45664), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((46153, 46212), 'odoo._', '_', (['"""Error! You cannot create recursive hierarchy of tasks."""'], {}), "('Error! You cannot create recursive hierarchy of tasks.')\n", (46154, 46212), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((48393, 48438), 'odoo.fields.Datetime.from_string', 'fields.Datetime.from_string', (['task.date_assign'], {}), '(task.date_assign)\n', (48420, 48438), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((48882, 48924), 'odoo.fields.Datetime.from_string', 'fields.Datetime.from_string', (['task.date_end'], {}), '(task.date_end)\n', (48909, 48924), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((54874, 54897), 'odoo.fields.Datetime.today', 'fields.Datetime.today', ([], {}), '()\n', (54895, 54897), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((55352, 55371), 'odoo.fields.Date.today', 'fields.Date.today', ([], {}), '()\n', (55369, 55371), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((55374, 55391), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (55383, 55391), False, 'from datetime import timedelta, datetime\n'), ((56704, 56725), 'odoo.fields.Datetime.now', 'fields.Datetime.now', ([], {}), '()\n', (56723, 56725), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((56967, 56988), 'odoo.fields.Datetime.now', 'fields.Datetime.now', ([], {}), '()\n', (56986, 56988), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((57288, 57311), 'odoo.fields.Datetime.today', 'fields.Datetime.today', ([], {}), '()\n', (57309, 57311), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((57805, 57857), 'odoo._', '_', (['"""Sorry. You can\'t set a task as its parent task."""'], {}), '("Sorry. You can\'t set a task as its parent task.")\n', (57806, 57857), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((58038, 58116), 'odoo._', '_', (['"""You cannot archive recurring tasks. Please, disable the recurrence first."""'], {}), "('You cannot archive recurring tasks. Please, disable the recurrence first.')\n", (58039, 58116), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((60412, 60433), 'odoo.fields.Datetime.now', 'fields.Datetime.now', ([], {}), '()\n', (60431, 60433), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((60624, 60701), 'odoo._', '_', (['"""You cannot delete recurring tasks. Please, disable the recurrence first."""'], {}), "('You cannot delete recurring tasks. Please, disable the recurrence first.')\n", (60625, 60701), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((67155, 67170), 'odoo._', '_', (['"""No Subject"""'], {}), "('No Subject')\n", (67156, 67170), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((21857, 21972), 'odoo._', '_', (['"""You cannot delete a project containing tasks. You can either archive it or first delete all of its tasks."""'], {}), "('You cannot delete a project containing tasks. You can either archive it or first delete all of its tasks.'\n )\n", (21858, 21972), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((26621, 26650), 'odoo._', '_', (['"""Unknown Analytic Account"""'], {}), "('Unknown Analytic Account')\n", (26622, 26650), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((27680, 27701), 'odoo.fields.Datetime.now', 'fields.Datetime.now', ([], {}), '()\n', (27699, 27701), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((42252, 42273), 'datetime.timedelta', 'timedelta', ([], {'days': 'delta'}), '(days=delta)\n', (42261, 42273), False, 'from datetime import timedelta, datetime\n'), ((43289, 43342), 'odoo._', '_', (['"""<p><em>Number of tasks: %(tasks_count)s</em></p>"""'], {}), "('<p><em>Number of tasks: %(tasks_count)s</em></p>')\n", (43290, 43342), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((45733, 45801), 'odoo._', '_', (['"""By saving this change, the customer email will also be updated."""'], {}), "('By saving this change, the customer email will also be updated.')\n", (45734, 45801), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((46592, 46704), 'odoo._', '_', (['"""The project visibility setting doesn\'t allow portal users to see the project\'s tasks. (%s)"""', 'user_names'], {}), '("The project visibility setting doesn\'t allow portal users to see the project\'s tasks. (%s)"\n , user_names)\n', (46593, 46704), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((52237, 52298), 'odoo._', '_', (['"""Error! You cannot create recursive hierarchy of task(s)."""'], {}), "('Error! You cannot create recursive hierarchy of task(s).')\n", (52238, 52298), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((55082, 55105), 'odoo.fields.Datetime.today', 'fields.Datetime.today', ([], {}), '()\n', (55103, 55105), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((59603, 59725), 'odoo.osv.expression.OR', 'OR', (["[recurrence_domain, ['&', ('recurrence_id', '=', task.recurrence_id.id), (\n 'create_date', '>=', task.create_date)]]"], {}), "([recurrence_domain, ['&', ('recurrence_id', '=', task.recurrence_id.id),\n ('create_date', '>=', task.create_date)]])\n", (59605, 59725), False, 'from odoo.osv.expression import OR\n'), ((64586, 64600), 'odoo._', '_', (['"""I take it"""'], {}), "('I take it')\n", (64587, 64600), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((68339, 68358), 'odoo._', '_', (['"""Customer Email"""'], {}), "('Customer Email')\n", (68340, 68358), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((68389, 68402), 'odoo._', '_', (['"""Customer"""'], {}), "('Customer')\n", (68390, 68402), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((43726, 43745), 'odoo.fields.Date.today', 'fields.Date.today', ([], {}), '()\n', (43743, 43745), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((45875, 45950), 'odoo._', '_', (['"""By saving this change, the customer phone number will also be updated."""'], {}), "('By saving this change, the customer phone number will also be updated.')\n", (45876, 45950), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((59036, 59059), 'odoo.fields.Datetime.today', 'fields.Datetime.today', ([], {}), '()\n', (59057, 59059), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((55234, 55257), 'odoo.fields.Datetime.today', 'fields.Datetime.today', ([], {}), '()\n', (55255, 55257), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n'), ((68639, 68658), 'odoo._', '_', (['"""Customer Email"""'], {}), "('Customer Email')\n", (68640, 68658), False, 'from odoo import api, fields, models, tools, SUPERUSER_ID, _\n')] |
import time
import logger
from basetestcase import BaseTestCase
from couchbase_helper.documentgenerator import DocumentGenerator
from membase.api.rest_client import RestConnection
from couchbase_helper.documentgenerator import BlobGenerator
class DocsTests(BaseTestCase):
def setUp(self):
super(DocsTests, self).setUp()
def tearDown(self):
super(DocsTests, self).tearDown()
def test_docs_int_big_values(self):
degree = self.input.param("degree", 53)
error = self.input.param("error", False)
number = 2**degree
first = ['james', 'sharon']
template = '{{ "number": {0}, "first_name": "{1}" }}'
gen_load = DocumentGenerator('test_docs', template, [number,], first,
start=0, end=self.num_items)
self.log.info("create %s documents..." % (self.num_items))
try:
self._load_all_buckets(self.master, gen_load, "create", 0)
self._verify_stats_all_buckets([self.master])
except Exception as e:
if error:
self.log.info("Unable to create documents as expected: %s" % str(e))
else:
raise e
else:
if error:
self.fail("Able to create documents with value: %s" % str(number))
#docs.docs.DocsTests.test_load_memory,nodes_init=3,standard_buckets=3,memcached_buckets=1,replicas=2,quota_percent=75
"""
1) Configure a cluster with 4 Couchbase Buckets and 1 Memcached Buckets.
2) Total memory quota allocated for Couchbase should be approx. 75% (12G) of total RAM.
3) Load initial data on all buckets upto 60% of each memory quota
4) Pick one bucket and do the following (5) to (8)
5) Insert new items upto high_wat_mark (75% of memory quota)
6) Expire/Delete/update random items (ratio of expiration vs delete ~= 8:2)
7) Repeat (6) until "ep_total_del_items" is ~= (3 X # of items being loaded in (3))
8) Expire 90% of remaining items
9) Insert new items or update existing items across buckets
10) See if we can run into "Hard out of Memory" error (UI)
"""
def test_load_memory(self):
num_items = self.quota * 1024 * 0.6 / self.value_size
num_items = num_items / len(self.buckets)
self.log.info("Load initial data on all buckets upto 60% of each memory quota")
gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=0,
end=num_items)
self._load_all_buckets(self.master, gen_load, "create", 0)
self.log.info("Insert new items upto high_wat_mark (75% of memory quota)")
for bucket in self.buckets:
if bucket.type != 'memcached':
bucket_to_load = bucket
break
new_num_items = self.quota * 1024 * 0.15 / self.value_size
gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=num_items,
end=new_num_items + num_items)
load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_load,
bucket_to_load.kvs[1], 'create', compression=self.sdk_compression)
load.result()
end_time = time.time() + 60*60*3
while time.time() < end_time:
self.log.info("check memUsed")
rest = RestConnection(self.master)
for bucket in rest.get_buckets():
self.log.info("*****************************\
bucket %s: memUsed %s\
****************************" % (bucket.name,
bucket.stats.memUsed))
self.log.info("Expire/Delete/update random items (ratio \
of expiration vs delete ~= 8:2)")
current_num = 0
wait_task = self.cluster.async_wait_for_stats(self.servers[:self.nodes_init], bucket_to_load,
'all', 'ep_total_del_items', '==', num_items * 3)
while wait_task.state != "FINISHED":
gen_update = BlobGenerator('mike', 'mike-', self.value_size, start=current_num,
end=current_num + 5000)
gen_expire = BlobGenerator('mike', 'mike-', self.value_size, start=current_num + 5000,
end=current_num + 6600)
gen_delete = BlobGenerator('mike', 'mike-', self.value_size, start=current_num + 6600,
end=current_num + 7000)
tasks = []
tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name,
gen_update, bucket_to_load.kvs[1], 'update', compression=self.sdk_compression))
tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name,
gen_expire, bucket_to_load.kvs[1], 'update', exp=1,
compression=self.sdk_compression))
tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name,
gen_delete, bucket_to_load.kvs[1], 'delete', compression=self.sdk_compression))
for task in tasks:
task.result()
current_num += 7000
self.log.info("Expire 90% of remaining items")
remain_keys, _ = bucket_to_load.kvs[1].key_set()
last_key_to_expire = remain_keys[0.9 * len(remain_keys)][4:]
gen_expire = BlobGenerator('mike', 'mike-', self.value_size, start=0,
end=last_key_to_expire)
load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name,
gen_expire, bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression)
load.result()
self.log.info("Insert new items or update existing items across buckets")
gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=new_num_items + num_items,
end=new_num_items * 2 + num_items)
self._load_all_buckets(self.master, gen_load, "create", 0)
| [
"couchbase_helper.documentgenerator.BlobGenerator",
"time.time",
"couchbase_helper.documentgenerator.DocumentGenerator",
"membase.api.rest_client.RestConnection"
] | [((683, 774), 'couchbase_helper.documentgenerator.DocumentGenerator', 'DocumentGenerator', (['"""test_docs"""', 'template', '[number]', 'first'], {'start': '(0)', 'end': 'self.num_items'}), "('test_docs', template, [number], first, start=0, end=self\n .num_items)\n", (700, 774), False, 'from couchbase_helper.documentgenerator import DocumentGenerator\n'), ((2397, 2468), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""mike"""', '"""mike-"""', 'self.value_size'], {'start': '(0)', 'end': 'num_items'}), "('mike', 'mike-', self.value_size, start=0, end=num_items)\n", (2410, 2468), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((2876, 2976), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""mike"""', '"""mike-"""', 'self.value_size'], {'start': 'num_items', 'end': '(new_num_items + num_items)'}), "('mike', 'mike-', self.value_size, start=num_items, end=\n new_num_items + num_items)\n", (2889, 2976), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((5651, 5736), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""mike"""', '"""mike-"""', 'self.value_size'], {'start': '(0)', 'end': 'last_key_to_expire'}), "('mike', 'mike-', self.value_size, start=0, end=last_key_to_expire\n )\n", (5664, 5736), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((6090, 6209), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""mike"""', '"""mike-"""', 'self.value_size'], {'start': '(new_num_items + num_items)', 'end': '(new_num_items * 2 + num_items)'}), "('mike', 'mike-', self.value_size, start=new_num_items +\n num_items, end=new_num_items * 2 + num_items)\n", (6103, 6209), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((3238, 3249), 'time.time', 'time.time', ([], {}), '()\n', (3247, 3249), False, 'import time\n'), ((3274, 3285), 'time.time', 'time.time', ([], {}), '()\n', (3283, 3285), False, 'import time\n'), ((3360, 3387), 'membase.api.rest_client.RestConnection', 'RestConnection', (['self.master'], {}), '(self.master)\n', (3374, 3387), False, 'from membase.api.rest_client import RestConnection\n'), ((4150, 4245), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""mike"""', '"""mike-"""', 'self.value_size'], {'start': 'current_num', 'end': '(current_num + 5000)'}), "('mike', 'mike-', self.value_size, start=current_num, end=\n current_num + 5000)\n", (4163, 4245), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((4308, 4409), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""mike"""', '"""mike-"""', 'self.value_size'], {'start': '(current_num + 5000)', 'end': '(current_num + 6600)'}), "('mike', 'mike-', self.value_size, start=current_num + 5000,\n end=current_num + 6600)\n", (4321, 4409), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((4473, 4574), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""mike"""', '"""mike-"""', 'self.value_size'], {'start': '(current_num + 6600)', 'end': '(current_num + 7000)'}), "('mike', 'mike-', self.value_size, start=current_num + 6600,\n end=current_num + 7000)\n", (4486, 4574), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n')] |
from bs4 import BeautifulSoup
import requests
from urllib.request import urlretrieve
ROOT = 'http://pdaotao.duytan.edu.vn'
def get_url_sub(sub, id_, page):
all_td_tag = []
for i in range(1, page+1):
print('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i))
r = requests.get('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i))
soup = BeautifulSoup(r.text, 'lxml')
list_td_tag = soup.find_all('td', attrs={'style': 'padding-top:10px'})
all_td_tag = all_td_tag + list_td_tag
for td_tag in all_td_tag:
if (((sub+id_) in str(td_tag.a.contents[0])) or
((sub+' '+id_) in str(td_tag.a.contents[0])) or
((sub+'_'+id_) in str(td_tag.a.contents[0]))):
print('\nComplete!!!')
print(' '.join(str(td_tag.a.string).split()))
print(str(td_tag.a['href']).replace('..', ROOT))
return str(td_tag.a['href']).replace('..', ROOT)
def get_excel_url(url):
r = requests.get(url)
soup = BeautifulSoup(r.text,'lxml')
list_span_tags = soup.find_all('span',class_='txt_l4')
excel_url = list_span_tags[1].a['href'].replace('..',ROOT)
return excel_url
# a = get_excel_url('http://pdaotao.duytan.edu.vn/EXAM_LIST_Detail/?ID=52289&lang=VN')
def main():
sub = input('Nhap ten mon: ')
id_ = input('Nhap id mon: ')
url = get_url_sub(sub,id_,4)
if url == None:
print('Khong tim thay mon nao nhu nay ({} {}) ca :('.format(sub, id_))
return
else:
print('get excel URL!!!')
excel_url = get_excel_url(url)
excel_url = excel_url.replace(' ','%20')
print('Download excel file!!!')
save_at = 'C:/Users/truon/Desktop/'
filename = save_at + excel_url.split('/')[-1].replace('%20',' ')
urlretrieve(excel_url,filename)
print('Done!')
main()
| [
"bs4.BeautifulSoup",
"requests.get",
"urllib.request.urlretrieve"
] | [((1004, 1021), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1016, 1021), False, 'import requests\n'), ((1033, 1062), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.text', '"""lxml"""'], {}), "(r.text, 'lxml')\n", (1046, 1062), False, 'from bs4 import BeautifulSoup\n'), ((401, 430), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.text', '"""lxml"""'], {}), "(r.text, 'lxml')\n", (414, 430), False, 'from bs4 import BeautifulSoup\n'), ((1818, 1850), 'urllib.request.urlretrieve', 'urlretrieve', (['excel_url', 'filename'], {}), '(excel_url, filename)\n', (1829, 1850), False, 'from urllib.request import urlretrieve\n')] |
# coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Uploader module that handles batch jobs sent from Task Queue.
This module receives batch jobs from TaskQueue. For each job, the module loads
data from BigQuery and sends it to Merchant Center.
"""
import http
import json
import logging
import socket
from typing import List, Tuple
import flask
from google.cloud import bigquery
from google.cloud import logging as cloud_logging
from googleapiclient import errors
import batch_creator
import bigquery_client
import constants
import content_api_client
import result_recorder
import shoptimizer_client
from models import failure
from models import process_result
from models import upload_task
app = flask.Flask(__name__)
_logging_client = cloud_logging.Client()
_logging_client.setup_logging(log_level=logging.DEBUG)
_SHOPTIMIZER_CONFIG_FILE_PATH = 'config/shoptimizer_config.json'
OPERATION_TO_METHOD = {
constants.Operation.UPSERT: constants.Method.INSERT,
constants.Operation.DELETE: constants.Method.DELETE,
constants.Operation.PREVENT_EXPIRING: constants.Method.INSERT
}
# Used to check if this is the last retry for alerting purposes.
# Should match task_retry_limit in appengine/initiator/queue.yaml.
TASK_RETRY_LIMIT = 5
@app.route('/insert_items', methods=['POST'])
def run_insert_process() -> Tuple[str, http.HTTPStatus]:
"""Handles uploading tasks pushed from Task Queue."""
return _run_process(constants.Operation.UPSERT)
@app.route('/delete_items', methods=['POST'])
def run_delete_process() -> Tuple[str, http.HTTPStatus]:
"""Handles deleting tasks pushed from Task Queue."""
return _run_process(constants.Operation.DELETE)
@app.route('/prevent_expiring_items', methods=['POST'])
def run_prevent_expiring_process() -> Tuple[str, http.HTTPStatus]:
"""Handles prevent expiring tasks pushed from Task Queue."""
return _run_process(constants.Operation.PREVENT_EXPIRING)
def _run_process(operation: constants.Operation) -> Tuple[str, http.HTTPStatus]:
"""Handles tasks pushed from Task Queue.
When tasks are enqueued to Task Queue by initiator, this method will be
called. It extracts necessary information from a Task Queue message. The
following processes are executed in this function:
- Loading items to process from BigQuery.
- Converts items into a batch that can be sent to Content API for Shopping.
- Sending items to Content API for Shopping (Merchant Center).
- Records the results of the Content API for Shopping call.
Args:
operation: Type of operation to perform on the items.
Returns:
The result of HTTP request.
"""
request_body = json.loads(flask.request.data.decode('utf-8'))
task = upload_task.UploadTask.from_json(request_body)
if task.batch_size == 0:
return 'OK', http.HTTPStatus.OK
batch_number = int(task.start_index / task.batch_size) + 1
logging.info(
'%s started. Batch #%d info: start_index: %d, batch_size: %d,'
'initiation timestamp: %s', operation.value, batch_number,
task.start_index, task.batch_size, task.timestamp)
try:
items = _load_items_from_bigquery(operation, task)
except errors.HttpError:
return 'Error loading items from BigQuery', http.HTTPStatus.INTERNAL_SERVER_ERROR
result = process_result.ProcessResult([], [], [])
try:
if not items:
logging.error(
'Batch #%d, operation %s: 0 items loaded from BigQuery so batch not sent to Content API. Start_index: %d, batch_size: %d,'
'initiation timestamp: %s', batch_number, operation.value,
task.start_index, task.batch_size, task.timestamp)
return 'No items to process', http.HTTPStatus.OK
method = OPERATION_TO_METHOD.get(operation)
# Creates batch from items loaded from BigQuery
original_batch, skipped_item_ids, batch_id_to_item_id = batch_creator.create_batch(
batch_number, items, method)
# Optimizes batch via Shoptimizer for upsert/prevent_expiring operations
if operation != constants.Operation.DELETE and constants.SHOPTIMIZER_API_INTEGRATION_ON:
batch_to_send_to_content_api = _create_optimized_batch(
original_batch, batch_number, operation)
else:
batch_to_send_to_content_api = original_batch
# Sends batch of items to Content API for Shopping
api_client = content_api_client.ContentApiClient()
successful_item_ids, item_failures = api_client.process_items(
batch_to_send_to_content_api, batch_number, batch_id_to_item_id, method)
result = process_result.ProcessResult(
successfully_processed_item_ids=successful_item_ids,
content_api_failures=item_failures,
skipped_item_ids=skipped_item_ids)
except errors.HttpError as http_error:
error_status_code = http_error.resp.status
error_reason = http_error.resp.reason
result = _handle_content_api_error(error_status_code, error_reason,
batch_number, http_error, items,
operation, task)
return error_reason, error_status_code
except socket.timeout as timeout_error:
error_status_code = http.HTTPStatus.REQUEST_TIMEOUT
error_reason = 'Socket timeout'
result = _handle_content_api_error(error_status_code, error_reason,
batch_number, timeout_error, items,
operation, task)
return error_reason, error_status_code
else:
logging.info(
'Batch #%d with operation %s and initiation timestamp %s successfully processed %s items, failed to process %s items and skipped %s items.',
batch_number, operation.value, task.timestamp,
result.get_success_count(), result.get_failure_count(),
result.get_skipped_count())
finally:
recorder = result_recorder.ResultRecorder.from_service_account_json(
constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_MONITORING,
constants.TABLE_ID_FOR_RESULT_COUNTS_MONITORING,
constants.TABLE_ID_FOR_ITEM_RESULTS_MONITORING)
recorder.insert_result(operation.value, result, task.timestamp,
batch_number)
return 'OK', http.HTTPStatus.OK
def _load_items_from_bigquery(
operation: constants.Operation,
task: upload_task.UploadTask) -> List[bigquery.Row]:
"""Loads items from BigQuery.
Args:
operation: The operation to be performed on this batch of items.
task: The Cloud Task object that initiated this request.
Returns:
The list of items loaded from BigQuery.
"""
table_id = f'process_items_to_{operation.value}_{task.timestamp}'
bq_client = bigquery_client.BigQueryClient.from_service_account_json(
constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_PROCESSING,
table_id)
try:
items_iterator = bq_client.load_items(task.start_index, task.batch_size)
except errors.HttpError as http_error:
logging.exception(
'Error loading items from %s.%s. HTTP status: %s. Error: %s',
constants.DATASET_ID_FOR_PROCESSING, table_id, http_error.resp.status,
http_error.resp.reason)
raise
return list(items_iterator)
def _create_optimized_batch(batch: constants.Batch, batch_number: int,
operation: constants.Operation) -> constants.Batch:
"""Creates an optimized batch by calling the Shoptimizer API.
Args:
batch: The batch of product data to be optimized.
batch_number: The number that identifies this batch.
operation: The operation to be performed on this batch (upsert, delete,
prevent_expiring).
Returns:
The batch returned from the Shoptimizer API Client.
"""
try:
optimization_client = shoptimizer_client.ShoptimizerClient(
batch_number, operation)
except (OSError, ValueError):
return batch
return optimization_client.shoptimize(batch)
def _handle_content_api_error(
error_status_code: int, error_reason: str, batch_num: int, error: Exception,
item_rows: List[bigquery.Row], operation: constants.Operation,
task: upload_task.UploadTask) -> process_result.ProcessResult:
"""Logs network related errors returned from Content API and returns a list of item failures.
Args:
error_status_code: HTTP status code from Content API.
error_reason: The reason for the error.
batch_num: The batch number.
error: The error thrown by Content API.
item_rows: The items being processed in this batch.
operation: The operation to be performed on this batch of items.
task: The Cloud Task object that initiated this request.
Returns:
The list of items that failed due to the error, wrapped in a
process_result.
"""
logging.warning(
'Batch #%d with operation %s and initiation timestamp %s failed. HTTP status: %s. Error: %s',
batch_num, operation.value, task.timestamp, error_status_code,
error_reason)
# If the batch API call received an HttpError, mark every id as failed.
item_failures = [
failure.Failure(str(item_row.get('item_id', 'Missing ID')), error_reason)
for item_row in item_rows
]
api_result = process_result.ProcessResult([], item_failures, [])
if content_api_client.suggest_retry(
error_status_code) and _get_execution_attempt() < TASK_RETRY_LIMIT:
logging.warning(
'Batch #%d with operation %s and initiation timestamp %s will be requeued for retry',
batch_num, operation.value, task.timestamp)
else:
logging.error(
'Batch #%d with operation %s and initiation timestamp %s failed and will not be retried. Error: %s',
batch_num, operation.value, task.timestamp, error)
return api_result
def _get_execution_attempt() -> int:
"""Returns the number of times this task has previously been executed.
If the execution count header does not exist, it means the request did not
come from Cloud Tasks.
In this case, there will be no retry, so set execution attempt to the retry
limit.
Returns:
int, the number of times this task has previously been executed.
"""
execution_attempt = flask.request.headers.get(
'X-AppEngine-TaskExecutionCount', '')
if execution_attempt:
return int(execution_attempt)
else:
return TASK_RETRY_LIMIT
if __name__ == '__main__':
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
| [
"models.upload_task.UploadTask.from_json",
"models.process_result.ProcessResult",
"content_api_client.ContentApiClient",
"flask.Flask",
"batch_creator.create_batch",
"result_recorder.ResultRecorder.from_service_account_json",
"flask.request.data.decode",
"bigquery_client.BigQueryClient.from_service_account_json",
"logging.warning",
"content_api_client.suggest_retry",
"google.cloud.logging.Client",
"shoptimizer_client.ShoptimizerClient",
"logging.exception",
"logging.error",
"logging.info",
"flask.request.headers.get"
] | [((1246, 1267), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (1257, 1267), False, 'import flask\n'), ((1287, 1309), 'google.cloud.logging.Client', 'cloud_logging.Client', ([], {}), '()\n', (1307, 1309), True, 'from google.cloud import logging as cloud_logging\n'), ((3229, 3275), 'models.upload_task.UploadTask.from_json', 'upload_task.UploadTask.from_json', (['request_body'], {}), '(request_body)\n', (3261, 3275), False, 'from models import upload_task\n'), ((3404, 3600), 'logging.info', 'logging.info', (['"""%s started. Batch #%d info: start_index: %d, batch_size: %d,initiation timestamp: %s"""', 'operation.value', 'batch_number', 'task.start_index', 'task.batch_size', 'task.timestamp'], {}), "(\n '%s started. Batch #%d info: start_index: %d, batch_size: %d,initiation timestamp: %s'\n , operation.value, batch_number, task.start_index, task.batch_size,\n task.timestamp)\n", (3416, 3600), False, 'import logging\n'), ((3797, 3837), 'models.process_result.ProcessResult', 'process_result.ProcessResult', (['[]', '[]', '[]'], {}), '([], [], [])\n', (3825, 3837), False, 'from models import process_result\n'), ((7171, 7315), 'bigquery_client.BigQueryClient.from_service_account_json', 'bigquery_client.BigQueryClient.from_service_account_json', (['constants.GCP_SERVICE_ACCOUNT_PATH', 'constants.DATASET_ID_FOR_PROCESSING', 'table_id'], {}), '(constants.\n GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_PROCESSING, table_id)\n', (7227, 7315), False, 'import bigquery_client\n'), ((9229, 9429), 'logging.warning', 'logging.warning', (['"""Batch #%d with operation %s and initiation timestamp %s failed. HTTP status: %s. Error: %s"""', 'batch_num', 'operation.value', 'task.timestamp', 'error_status_code', 'error_reason'], {}), "(\n 'Batch #%d with operation %s and initiation timestamp %s failed. HTTP status: %s. Error: %s'\n , batch_num, operation.value, task.timestamp, error_status_code,\n error_reason)\n", (9244, 9429), False, 'import logging\n'), ((9660, 9711), 'models.process_result.ProcessResult', 'process_result.ProcessResult', (['[]', 'item_failures', '[]'], {}), '([], item_failures, [])\n', (9688, 9711), False, 'from models import process_result\n'), ((10620, 10683), 'flask.request.headers.get', 'flask.request.headers.get', (['"""X-AppEngine-TaskExecutionCount"""', '""""""'], {}), "('X-AppEngine-TaskExecutionCount', '')\n", (10645, 10683), False, 'import flask\n'), ((3184, 3218), 'flask.request.data.decode', 'flask.request.data.decode', (['"""utf-8"""'], {}), "('utf-8')\n", (3209, 3218), False, 'import flask\n'), ((4364, 4419), 'batch_creator.create_batch', 'batch_creator.create_batch', (['batch_number', 'items', 'method'], {}), '(batch_number, items, method)\n', (4390, 4419), False, 'import batch_creator\n'), ((4848, 4885), 'content_api_client.ContentApiClient', 'content_api_client.ContentApiClient', ([], {}), '()\n', (4883, 4885), False, 'import content_api_client\n'), ((5048, 5209), 'models.process_result.ProcessResult', 'process_result.ProcessResult', ([], {'successfully_processed_item_ids': 'successful_item_ids', 'content_api_failures': 'item_failures', 'skipped_item_ids': 'skipped_item_ids'}), '(successfully_processed_item_ids=\n successful_item_ids, content_api_failures=item_failures,\n skipped_item_ids=skipped_item_ids)\n', (5076, 5209), False, 'from models import process_result\n'), ((6335, 6575), 'result_recorder.ResultRecorder.from_service_account_json', 'result_recorder.ResultRecorder.from_service_account_json', (['constants.GCP_SERVICE_ACCOUNT_PATH', 'constants.DATASET_ID_FOR_MONITORING', 'constants.TABLE_ID_FOR_RESULT_COUNTS_MONITORING', 'constants.TABLE_ID_FOR_ITEM_RESULTS_MONITORING'], {}), '(constants.\n GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_MONITORING,\n constants.TABLE_ID_FOR_RESULT_COUNTS_MONITORING, constants.\n TABLE_ID_FOR_ITEM_RESULTS_MONITORING)\n', (6391, 6575), False, 'import result_recorder\n'), ((8238, 8299), 'shoptimizer_client.ShoptimizerClient', 'shoptimizer_client.ShoptimizerClient', (['batch_number', 'operation'], {}), '(batch_number, operation)\n', (8274, 8299), False, 'import shoptimizer_client\n'), ((9718, 9769), 'content_api_client.suggest_retry', 'content_api_client.suggest_retry', (['error_status_code'], {}), '(error_status_code)\n', (9750, 9769), False, 'import content_api_client\n'), ((9830, 9985), 'logging.warning', 'logging.warning', (['"""Batch #%d with operation %s and initiation timestamp %s will be requeued for retry"""', 'batch_num', 'operation.value', 'task.timestamp'], {}), "(\n 'Batch #%d with operation %s and initiation timestamp %s will be requeued for retry'\n , batch_num, operation.value, task.timestamp)\n", (9845, 9985), False, 'import logging\n'), ((10005, 10180), 'logging.error', 'logging.error', (['"""Batch #%d with operation %s and initiation timestamp %s failed and will not be retried. Error: %s"""', 'batch_num', 'operation.value', 'task.timestamp', 'error'], {}), "(\n 'Batch #%d with operation %s and initiation timestamp %s failed and will not be retried. Error: %s'\n , batch_num, operation.value, task.timestamp, error)\n", (10018, 10180), False, 'import logging\n'), ((3869, 4126), 'logging.error', 'logging.error', (['"""Batch #%d, operation %s: 0 items loaded from BigQuery so batch not sent to Content API. Start_index: %d, batch_size: %d,initiation timestamp: %s"""', 'batch_number', 'operation.value', 'task.start_index', 'task.batch_size', 'task.timestamp'], {}), "(\n 'Batch #%d, operation %s: 0 items loaded from BigQuery so batch not sent to Content API. Start_index: %d, batch_size: %d,initiation timestamp: %s'\n , batch_number, operation.value, task.start_index, task.batch_size,\n task.timestamp)\n", (3882, 4126), False, 'import logging\n'), ((7453, 7635), 'logging.exception', 'logging.exception', (['"""Error loading items from %s.%s. HTTP status: %s. Error: %s"""', 'constants.DATASET_ID_FOR_PROCESSING', 'table_id', 'http_error.resp.status', 'http_error.resp.reason'], {}), "('Error loading items from %s.%s. HTTP status: %s. Error: %s',\n constants.DATASET_ID_FOR_PROCESSING, table_id, http_error.resp.status,\n http_error.resp.reason)\n", (7470, 7635), False, 'import logging\n')] |
# -*- coding: utf-8 -*-
'''
Framework: Tensorflow
Training samples: 1600
Validation samples: 400
RNN with 128 units
Optimizer: Adam
Epoch: 100
Loss: Cross Entropy
Activation function: Relu for network and Soft-max for regression
Regularization: Drop-out, keep_prob = 0.8
Accuracy of Validation set: 95%
'''
from __future__ import division, print_function, absolute_import
import tflearn
from tflearn.data_utils import to_categorical, pad_sequences
from data_denbigh import *
X, Y = getDenbighData()
#Hyperparams
neurons_num = 128 # Number of neurons in the RNN layer
keep_prob = 0.5 # Keep probability for the drop-out regularization
learning_rate = 0.001 # Learning rate for mini-batch SGD
batch_size = 32 # Batch size
n_epoch = 100 # Number of epoch
#Data preprocessing/ Converting data to vector for the
X = pad_sequences(X, maxlen=5, value=0.)
Y = to_categorical(Y, 2)
#Build the network
net = tflearn.input_data([None, 5])
net = tflearn.embedding(net, input_dim=10000, output_dim=128)
net = tflearn.simple_rnn(net, neurons_num, dropout=keep_prob)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
loss='categorical_crossentropy')
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(X, Y, validation_set=0.2, show_metric=True,
batch_size=batch_size, n_epoch=n_epoch)
model.save('./model.tfl') | [
"tflearn.data_utils.to_categorical",
"tflearn.embedding",
"tflearn.DNN",
"tflearn.data_utils.pad_sequences",
"tflearn.simple_rnn",
"tflearn.regression",
"tflearn.fully_connected",
"tflearn.input_data"
] | [((824, 861), 'tflearn.data_utils.pad_sequences', 'pad_sequences', (['X'], {'maxlen': '(5)', 'value': '(0.0)'}), '(X, maxlen=5, value=0.0)\n', (837, 861), False, 'from tflearn.data_utils import to_categorical, pad_sequences\n'), ((865, 885), 'tflearn.data_utils.to_categorical', 'to_categorical', (['Y', '(2)'], {}), '(Y, 2)\n', (879, 885), False, 'from tflearn.data_utils import to_categorical, pad_sequences\n'), ((911, 940), 'tflearn.input_data', 'tflearn.input_data', (['[None, 5]'], {}), '([None, 5])\n', (929, 940), False, 'import tflearn\n'), ((947, 1002), 'tflearn.embedding', 'tflearn.embedding', (['net'], {'input_dim': '(10000)', 'output_dim': '(128)'}), '(net, input_dim=10000, output_dim=128)\n', (964, 1002), False, 'import tflearn\n'), ((1009, 1064), 'tflearn.simple_rnn', 'tflearn.simple_rnn', (['net', 'neurons_num'], {'dropout': 'keep_prob'}), '(net, neurons_num, dropout=keep_prob)\n', (1027, 1064), False, 'import tflearn\n'), ((1071, 1124), 'tflearn.fully_connected', 'tflearn.fully_connected', (['net', '(2)'], {'activation': '"""softmax"""'}), "(net, 2, activation='softmax')\n", (1094, 1124), False, 'import tflearn\n'), ((1131, 1239), 'tflearn.regression', 'tflearn.regression', (['net'], {'optimizer': '"""adam"""', 'learning_rate': 'learning_rate', 'loss': '"""categorical_crossentropy"""'}), "(net, optimizer='adam', learning_rate=learning_rate, loss\n ='categorical_crossentropy')\n", (1149, 1239), False, 'import tflearn\n'), ((1244, 1283), 'tflearn.DNN', 'tflearn.DNN', (['net'], {'tensorboard_verbose': '(0)'}), '(net, tensorboard_verbose=0)\n', (1255, 1283), False, 'import tflearn\n')] |
import numpy as np
import tensorflow as tf
H = 2
N = 2
M = 3
BS = 10
def my_softmax(arr):
max_elements = np.reshape(np.max(arr, axis = 2), (BS, N, 1))
arr = arr - max_elements
exp_array = np.exp(arr)
print (exp_array)
sum_array = np.reshape(np.sum(exp_array, axis=2), (BS, N, 1))
return exp_array /sum_array
def masked_softmax(logits, mask, dim):
"""
Takes masked softmax over given dimension of logits.
Inputs:
logits: Numpy array. We want to take softmax over dimension dim.
mask: Numpy array of same shape as logits.
Has 1s where there's real data in logits, 0 where there's padding
dim: int. dimension over which to take softmax
Returns:
masked_logits: Numpy array same shape as logits.
This is the same as logits, but with 1e30 subtracted
(i.e. very large negative number) in the padding locations.
prob_dist: Numpy array same shape as logits.
The result of taking softmax over masked_logits in given dimension.
Should be 0 in padding locations.
Should sum to 1 over given dimension.
"""
exp_mask = (1 - tf.cast(mask, 'float64')) * (-1e30) # -large where there's padding, 0 elsewhere
print (exp_mask)
masked_logits = tf.add(logits, exp_mask) # where there's padding, set logits to -large
prob_dist = tf.nn.softmax(masked_logits, dim)
return masked_logits, prob_dist
def test_build_similarity(contexts, questions):
w_sim_1 = tf.get_variable('w_sim_1',
initializer=w_1) # 2 * H
w_sim_2 = tf.get_variable('w_sim_2',
initializer=w_2) # 2 * self.hidden_size
w_sim_3 = tf.get_variable('w_sim_3',
initializer=w_3) # 2 * self.hidden_size
q_tile = tf.tile(tf.expand_dims(questions, 0), [N, 1, 1, 1]) # N x BS x M x 2H
q_tile = tf.transpose(q_tile, (1, 0, 3, 2)) # BS x N x 2H x M
contexts = tf.expand_dims(contexts, -1) # BS x N x 2H x 1
result = (contexts * q_tile) # BS x N x 2H x M
tf.assert_equal(tf.shape(result), [BS, N, 2 * H, M])
result = tf.transpose(result, (0, 1, 3, 2)) # BS x N x M x 2H
result = tf.reshape(result, (-1, N * M, 2 * H)) # BS x (NxM) x 2H
tf.assert_equal(tf.shape(result), [BS, N*M, 2*H])
# w_sim_1 = tf.tile(tf.expand_dims(w_sim_1, 0), [BS, 1])
# w_sim_2 = tf.tile(tf.expand_dims(w_sim_2, 0), [BS, 1])
# w_sim_3 = tf.tile(tf.expand_dims(w_sim_3, 0), [BS, 1])
term1 = tf.matmul(tf.reshape(contexts, (BS * N, 2*H)), tf.expand_dims(w_sim_1, -1)) # BS x N
term1 = tf.reshape(term1, (-1, N))
term2 = tf.matmul(tf.reshape(questions, (BS * M, 2*H)), tf.expand_dims(w_sim_2, -1)) # BS x M
term2 = tf.reshape(term2, (-1, M))
term3 = tf.matmul(tf.reshape(result, (BS * N * M, 2* H)), tf.expand_dims(w_sim_3, -1))
term3 = tf.reshape(term3, (-1, N, M)) # BS x N x M
S = tf.reshape(term1,(-1, N, 1)) + term3 + tf.reshape(term2, (-1, 1, M))
return S
def test_build_sim_mask():
context_mask = np.array([True, True]) # BS x N
question_mask = np.array([True, True, False]) # BS x M
context_mask = np.tile(context_mask, [BS, 1])
question_mask = np.tile(question_mask, [BS, 1])
context_mask = tf.get_variable('context_mask', initializer=context_mask)
question_mask = tf.get_variable('question_mask', initializer=question_mask)
context_mask = tf.expand_dims(context_mask, -1) # BS x N x 1
question_mask = tf.expand_dims(question_mask, -1) # BS x M x 1
question_mask = tf.transpose(question_mask, (0, 2, 1)) # BS x 1 x M
sim_mask = tf.matmul(tf.cast(context_mask, dtype=tf.int32),
tf.cast(question_mask, dtype=tf.int32)) # BS x N x M
return sim_mask
def test_build_c2q(S, S_mask, questions):
_, alpha = masked_softmax(S, mask, 2) # BS x N x M
return tf.matmul(alpha, questions)
def test_build_q2c(S, S_mask, contexts):
# S = BS x N x M
# contexts = BS x N x 2H
m = tf.reduce_max(S * tf.cast(S_mask, dtype=tf.float64), axis=2) # BS x N
beta = tf.expand_dims(tf.nn.softmax(m), -1) # BS x N x 1
beta = tf.transpose(beta, (0, 2, 1))
q2c = tf.matmul(beta, contexts)
return m, beta, q2c
def test_concatenation(c2q, q2c):
q2c = tf.tile(q2c, (1, N, 1))
output = tf.concat([c2q, q2c], axis=2)
tf.assert_equal(tf.shape(output), [BS, N, 4*H])
return output
if __name__== "__main__":
w_1 = np.array([1., 2., 3., 4.])
w_2 = np.array([5., 6., 7., 8.])
w_3 = np.array([13., 12., 11., 10.])
c = np.array([[[1., 2., 3., 4.], [5., 6., 7., 8.]]]) # BS x N x 2H
q = np.array([[[1., 2., 3., 0.], [5., 6., 7., 4.], [8., 9. , 10., 11.]]]) # BS x M x 2H
c = np.tile(c, [BS, 1, 1])
q = np.tile(q, [BS, 1, 1])
questions = tf.get_variable('questions', initializer=q)
contexts = tf.get_variable('contexts', initializer=c)
S = test_build_similarity(contexts, questions)
mask = test_build_sim_mask()
c2q = test_build_c2q(S, mask, questions)
m, beta, q2c = test_build_q2c(S, mask, contexts)
output = test_concatenation(c2q, q2c)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
S_result, mask_result, c2q_r = sess.run([S, mask, c2q])
actual_result = np.tile(np.array([[228, 772, 1372], [548, 1828, 3140]]), [BS, 1, 1])
assert np.array_equal(actual_result, S_result), 'Arrays are not equal'
print ("Building similarity matrix is successful!")
print ("Context 2 Question attention")
m_r, beta_r, q2c_r = sess.run([m, beta, q2c])
output_r = sess.run(output)
| [
"tensorflow.tile",
"tensorflow.shape",
"tensorflow.get_variable",
"tensorflow.transpose",
"numpy.array",
"tensorflow.nn.softmax",
"tensorflow.cast",
"tensorflow.Session",
"numpy.max",
"numpy.exp",
"tensorflow.concat",
"tensorflow.matmul",
"numpy.tile",
"tensorflow.add",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.global_variables_initializer",
"numpy.sum",
"numpy.array_equal"
] | [((201, 212), 'numpy.exp', 'np.exp', (['arr'], {}), '(arr)\n', (207, 212), True, 'import numpy as np\n'), ((1260, 1284), 'tensorflow.add', 'tf.add', (['logits', 'exp_mask'], {}), '(logits, exp_mask)\n', (1266, 1284), True, 'import tensorflow as tf\n'), ((1347, 1380), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['masked_logits', 'dim'], {}), '(masked_logits, dim)\n', (1360, 1380), True, 'import tensorflow as tf\n'), ((1481, 1524), 'tensorflow.get_variable', 'tf.get_variable', (['"""w_sim_1"""'], {'initializer': 'w_1'}), "('w_sim_1', initializer=w_1)\n", (1496, 1524), True, 'import tensorflow as tf\n'), ((1555, 1598), 'tensorflow.get_variable', 'tf.get_variable', (['"""w_sim_2"""'], {'initializer': 'w_2'}), "('w_sim_2', initializer=w_2)\n", (1570, 1598), True, 'import tensorflow as tf\n'), ((1644, 1687), 'tensorflow.get_variable', 'tf.get_variable', (['"""w_sim_3"""'], {'initializer': 'w_3'}), "('w_sim_3', initializer=w_3)\n", (1659, 1687), True, 'import tensorflow as tf\n'), ((1816, 1850), 'tensorflow.transpose', 'tf.transpose', (['q_tile', '(1, 0, 3, 2)'], {}), '(q_tile, (1, 0, 3, 2))\n', (1828, 1850), True, 'import tensorflow as tf\n'), ((1884, 1912), 'tensorflow.expand_dims', 'tf.expand_dims', (['contexts', '(-1)'], {}), '(contexts, -1)\n', (1898, 1912), True, 'import tensorflow as tf\n'), ((2052, 2086), 'tensorflow.transpose', 'tf.transpose', (['result', '(0, 1, 3, 2)'], {}), '(result, (0, 1, 3, 2))\n', (2064, 2086), True, 'import tensorflow as tf\n'), ((2118, 2156), 'tensorflow.reshape', 'tf.reshape', (['result', '(-1, N * M, 2 * H)'], {}), '(result, (-1, N * M, 2 * H))\n', (2128, 2156), True, 'import tensorflow as tf\n'), ((2522, 2548), 'tensorflow.reshape', 'tf.reshape', (['term1', '(-1, N)'], {}), '(term1, (-1, N))\n', (2532, 2548), True, 'import tensorflow as tf\n'), ((2659, 2685), 'tensorflow.reshape', 'tf.reshape', (['term2', '(-1, M)'], {}), '(term2, (-1, M))\n', (2669, 2685), True, 'import tensorflow as tf\n'), ((2789, 2818), 'tensorflow.reshape', 'tf.reshape', (['term3', '(-1, N, M)'], {}), '(term3, (-1, N, M))\n', (2799, 2818), True, 'import tensorflow as tf\n'), ((2969, 2991), 'numpy.array', 'np.array', (['[True, True]'], {}), '([True, True])\n', (2977, 2991), True, 'import numpy as np\n'), ((3021, 3050), 'numpy.array', 'np.array', (['[True, True, False]'], {}), '([True, True, False])\n', (3029, 3050), True, 'import numpy as np\n'), ((3079, 3109), 'numpy.tile', 'np.tile', (['context_mask', '[BS, 1]'], {}), '(context_mask, [BS, 1])\n', (3086, 3109), True, 'import numpy as np\n'), ((3130, 3161), 'numpy.tile', 'np.tile', (['question_mask', '[BS, 1]'], {}), '(question_mask, [BS, 1])\n', (3137, 3161), True, 'import numpy as np\n'), ((3181, 3238), 'tensorflow.get_variable', 'tf.get_variable', (['"""context_mask"""'], {'initializer': 'context_mask'}), "('context_mask', initializer=context_mask)\n", (3196, 3238), True, 'import tensorflow as tf\n'), ((3259, 3318), 'tensorflow.get_variable', 'tf.get_variable', (['"""question_mask"""'], {'initializer': 'question_mask'}), "('question_mask', initializer=question_mask)\n", (3274, 3318), True, 'import tensorflow as tf\n'), ((3338, 3370), 'tensorflow.expand_dims', 'tf.expand_dims', (['context_mask', '(-1)'], {}), '(context_mask, -1)\n', (3352, 3370), True, 'import tensorflow as tf\n'), ((3404, 3437), 'tensorflow.expand_dims', 'tf.expand_dims', (['question_mask', '(-1)'], {}), '(question_mask, -1)\n', (3418, 3437), True, 'import tensorflow as tf\n'), ((3471, 3509), 'tensorflow.transpose', 'tf.transpose', (['question_mask', '(0, 2, 1)'], {}), '(question_mask, (0, 2, 1))\n', (3483, 3509), True, 'import tensorflow as tf\n'), ((3781, 3808), 'tensorflow.matmul', 'tf.matmul', (['alpha', 'questions'], {}), '(alpha, questions)\n', (3790, 3808), True, 'import tensorflow as tf\n'), ((4051, 4080), 'tensorflow.transpose', 'tf.transpose', (['beta', '(0, 2, 1)'], {}), '(beta, (0, 2, 1))\n', (4063, 4080), True, 'import tensorflow as tf\n'), ((4091, 4116), 'tensorflow.matmul', 'tf.matmul', (['beta', 'contexts'], {}), '(beta, contexts)\n', (4100, 4116), True, 'import tensorflow as tf\n'), ((4186, 4209), 'tensorflow.tile', 'tf.tile', (['q2c', '(1, N, 1)'], {}), '(q2c, (1, N, 1))\n', (4193, 4209), True, 'import tensorflow as tf\n'), ((4223, 4252), 'tensorflow.concat', 'tf.concat', (['[c2q, q2c]'], {'axis': '(2)'}), '([c2q, q2c], axis=2)\n', (4232, 4252), True, 'import tensorflow as tf\n'), ((4360, 4390), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0, 4.0]'], {}), '([1.0, 2.0, 3.0, 4.0])\n', (4368, 4390), True, 'import numpy as np\n'), ((4397, 4427), 'numpy.array', 'np.array', (['[5.0, 6.0, 7.0, 8.0]'], {}), '([5.0, 6.0, 7.0, 8.0])\n', (4405, 4427), True, 'import numpy as np\n'), ((4434, 4468), 'numpy.array', 'np.array', (['[13.0, 12.0, 11.0, 10.0]'], {}), '([13.0, 12.0, 11.0, 10.0])\n', (4442, 4468), True, 'import numpy as np\n'), ((4474, 4530), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]]'], {}), '([[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]])\n', (4482, 4530), True, 'import numpy as np\n'), ((4545, 4630), 'numpy.array', 'np.array', (['[[[1.0, 2.0, 3.0, 0.0], [5.0, 6.0, 7.0, 4.0], [8.0, 9.0, 10.0, 11.0]]]'], {}), '([[[1.0, 2.0, 3.0, 0.0], [5.0, 6.0, 7.0, 4.0], [8.0, 9.0, 10.0, 11.0]]]\n )\n', (4553, 4630), True, 'import numpy as np\n'), ((4637, 4659), 'numpy.tile', 'np.tile', (['c', '[BS, 1, 1]'], {}), '(c, [BS, 1, 1])\n', (4644, 4659), True, 'import numpy as np\n'), ((4668, 4690), 'numpy.tile', 'np.tile', (['q', '[BS, 1, 1]'], {}), '(q, [BS, 1, 1])\n', (4675, 4690), True, 'import numpy as np\n'), ((4709, 4752), 'tensorflow.get_variable', 'tf.get_variable', (['"""questions"""'], {'initializer': 'q'}), "('questions', initializer=q)\n", (4724, 4752), True, 'import tensorflow as tf\n'), ((4768, 4810), 'tensorflow.get_variable', 'tf.get_variable', (['"""contexts"""'], {'initializer': 'c'}), "('contexts', initializer=c)\n", (4783, 4810), True, 'import tensorflow as tf\n'), ((5048, 5081), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5079, 5081), True, 'import tensorflow as tf\n'), ((121, 140), 'numpy.max', 'np.max', (['arr'], {'axis': '(2)'}), '(arr, axis=2)\n', (127, 140), True, 'import numpy as np\n'), ((262, 287), 'numpy.sum', 'np.sum', (['exp_array'], {'axis': '(2)'}), '(exp_array, axis=2)\n', (268, 287), True, 'import numpy as np\n'), ((1740, 1768), 'tensorflow.expand_dims', 'tf.expand_dims', (['questions', '(0)'], {}), '(questions, 0)\n', (1754, 1768), True, 'import tensorflow as tf\n'), ((2002, 2018), 'tensorflow.shape', 'tf.shape', (['result'], {}), '(result)\n', (2010, 2018), True, 'import tensorflow as tf\n'), ((2195, 2211), 'tensorflow.shape', 'tf.shape', (['result'], {}), '(result)\n', (2203, 2211), True, 'import tensorflow as tf\n'), ((2435, 2472), 'tensorflow.reshape', 'tf.reshape', (['contexts', '(BS * N, 2 * H)'], {}), '(contexts, (BS * N, 2 * H))\n', (2445, 2472), True, 'import tensorflow as tf\n'), ((2472, 2499), 'tensorflow.expand_dims', 'tf.expand_dims', (['w_sim_1', '(-1)'], {}), '(w_sim_1, -1)\n', (2486, 2499), True, 'import tensorflow as tf\n'), ((2571, 2609), 'tensorflow.reshape', 'tf.reshape', (['questions', '(BS * M, 2 * H)'], {}), '(questions, (BS * M, 2 * H))\n', (2581, 2609), True, 'import tensorflow as tf\n'), ((2609, 2636), 'tensorflow.expand_dims', 'tf.expand_dims', (['w_sim_2', '(-1)'], {}), '(w_sim_2, -1)\n', (2623, 2636), True, 'import tensorflow as tf\n'), ((2708, 2747), 'tensorflow.reshape', 'tf.reshape', (['result', '(BS * N * M, 2 * H)'], {}), '(result, (BS * N * M, 2 * H))\n', (2718, 2747), True, 'import tensorflow as tf\n'), ((2748, 2775), 'tensorflow.expand_dims', 'tf.expand_dims', (['w_sim_3', '(-1)'], {}), '(w_sim_3, -1)\n', (2762, 2775), True, 'import tensorflow as tf\n'), ((2879, 2908), 'tensorflow.reshape', 'tf.reshape', (['term2', '(-1, 1, M)'], {}), '(term2, (-1, 1, M))\n', (2889, 2908), True, 'import tensorflow as tf\n'), ((3548, 3585), 'tensorflow.cast', 'tf.cast', (['context_mask'], {'dtype': 'tf.int32'}), '(context_mask, dtype=tf.int32)\n', (3555, 3585), True, 'import tensorflow as tf\n'), ((3599, 3637), 'tensorflow.cast', 'tf.cast', (['question_mask'], {'dtype': 'tf.int32'}), '(question_mask, dtype=tf.int32)\n', (3606, 3637), True, 'import tensorflow as tf\n'), ((4005, 4021), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['m'], {}), '(m)\n', (4018, 4021), True, 'import tensorflow as tf\n'), ((4273, 4289), 'tensorflow.shape', 'tf.shape', (['output'], {}), '(output)\n', (4281, 4289), True, 'import tensorflow as tf\n'), ((5091, 5103), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5101, 5103), True, 'import tensorflow as tf\n'), ((5308, 5347), 'numpy.array_equal', 'np.array_equal', (['actual_result', 'S_result'], {}), '(actual_result, S_result)\n', (5322, 5347), True, 'import numpy as np\n'), ((1139, 1163), 'tensorflow.cast', 'tf.cast', (['mask', '"""float64"""'], {}), "(mask, 'float64')\n", (1146, 1163), True, 'import tensorflow as tf\n'), ((2840, 2869), 'tensorflow.reshape', 'tf.reshape', (['term1', '(-1, N, 1)'], {}), '(term1, (-1, N, 1))\n', (2850, 2869), True, 'import tensorflow as tf\n'), ((3927, 3960), 'tensorflow.cast', 'tf.cast', (['S_mask'], {'dtype': 'tf.float64'}), '(S_mask, dtype=tf.float64)\n', (3934, 3960), True, 'import tensorflow as tf\n'), ((5232, 5279), 'numpy.array', 'np.array', (['[[228, 772, 1372], [548, 1828, 3140]]'], {}), '([[228, 772, 1372], [548, 1828, 3140]])\n', (5240, 5279), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from astropy import convolution
from scipy.signal import medfilt
import astropy.units as u
from ..spectra.spectrum1d import Spectrum1D
from ..tests.spectral_examples import simulated_spectra
from ..manipulation.smoothing import (convolution_smooth, box_smooth,
gaussian_smooth, trapezoid_smooth,
median_smooth)
def compare_flux(flux_smooth1, flux_smooth2, flux_original, rtol=0.01):
"""
There are two things to compare for each set of smoothing:
1. Compare the smoothed flux from the astropy machinery vs
the smoothed flux from specutils. This is done by
comparing flux_smooth1 and flux_smooth2.
2. Next we want to compare the smoothed flux to the original
flux. This is a little more difficult as smoothing will
make a difference for median filter, but less so for
convolution based smoothing if the kernel is normalized
(area under the kernel = 1).
In this second case the rtol (relative tolerance) is used
judiciously.
"""
# Compare, element by element, the two smoothed fluxes.
assert np.allclose(flux_smooth1, flux_smooth2)
# Compare the total spectral flux of the smoothed to the original.
assert np.allclose(sum(flux_smooth1), sum(flux_original), rtol=rtol)
def test_smooth_custom_kernel(simulated_spectra):
"""
Test CustomKernel smoothing with correct parmaeters.
"""
# Create the original spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
flux_original = spec1.flux
# Create a custom kernel (some weird asymmetric-ness)
numpy_kernel = np.array([0.5, 1, 2, 0.5, 0.2])
numpy_kernel = numpy_kernel / np.sum(numpy_kernel)
custom_kernel = convolution.CustomKernel(numpy_kernel)
flux_smoothed_astropy = convolution.convolve(flux_original, custom_kernel)
# Calculate the custom smoothed
spec1_smoothed = convolution_smooth(spec1, custom_kernel)
compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value)
@pytest.mark.parametrize("width", [1, 2.3])
def test_smooth_box_good(simulated_spectra, width):
"""
Test Box1DKernel smoothing with correct parmaeters.
Width values need to be a number greater than 0.
"""
# Create the original spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
flux_original = spec1.flux
# Calculate the smoothed flux using Astropy
box_kernel = convolution.Box1DKernel(width)
flux_smoothed_astropy = convolution.convolve(flux_original, box_kernel)
# Calculate the box smoothed
spec1_smoothed = box_smooth(spec1, width)
compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value)
# Check the input and output units
assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit
assert spec1.flux.unit == spec1_smoothed.flux.unit
@pytest.mark.parametrize("width", [-1, 0, 'a'])
def test_smooth_box_bad(simulated_spectra, width):
"""
Test Box1DKernel smoothing with incorrect parmaeters.
Width values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
# Test bad input parameters
with pytest.raises(ValueError):
box_smooth(spec1, width)
@pytest.mark.parametrize("stddev", [1, 2.3])
def test_smooth_gaussian_good(simulated_spectra, stddev):
"""
Test Gaussian1DKernel smoothing with correct parmaeters.
Standard deviation values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
flux_original = spec1.flux
# Calculate the smoothed flux using Astropy
gaussian_kernel = convolution.Gaussian1DKernel(stddev)
flux_smoothed_astropy = convolution.convolve(flux_original, gaussian_kernel)
# Test gaussian smoothing
spec1_smoothed = gaussian_smooth(spec1, stddev)
compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.02)
# Check the input and output units
assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit
assert spec1.flux.unit == spec1_smoothed.flux.unit
@pytest.mark.parametrize("stddev", [-1, 0, 'a'])
def test_smooth_gaussian_bad(simulated_spectra, stddev):
"""
Test MexicanHat1DKernel smoothing with incorrect parmaeters.
Standard deviation values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
# Test bad input paramters
with pytest.raises(ValueError):
gaussian_smooth(spec1, stddev)
@pytest.mark.parametrize("stddev", [1, 2.3])
def test_smooth_trapezoid_good(simulated_spectra, stddev):
"""
Test Trapezoid1DKernel smoothing with correct parmaeters.
Standard deviation values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
flux_original = spec1.flux
# Create the flux_smoothed which is what we want to compare to
trapezoid_kernel = convolution.Trapezoid1DKernel(stddev)
flux_smoothed_astropy = convolution.convolve(flux_original, trapezoid_kernel)
# Test trapezoid smoothing
spec1_smoothed = trapezoid_smooth(spec1, stddev)
compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value)
# Check the input and output units
assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit
assert spec1.flux.unit == spec1_smoothed.flux.unit
@pytest.mark.parametrize("stddev", [-1, 0, 'a'])
def test_smooth_trapezoid_bad(simulated_spectra, stddev):
"""
Test Trapezoid1DKernel smoothing with incorrect parmaeters.
Standard deviation values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
# Test bad parameters
with pytest.raises(ValueError):
trapezoid_smooth(spec1, stddev)
@pytest.mark.parametrize("width", [1, 3, 9])
def test_smooth_median_good(simulated_spectra, width):
"""
Test Median smoothing with correct parmaeters.
Width values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
flux_original = spec1.flux
# Create the flux_smoothed which is what we want to compare to
flux_smoothed_astropy = medfilt(flux_original, width)
# Test median smoothing
spec1_smoothed = median_smooth(spec1, width)
compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.15)
# Check the input and output units
assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit
assert spec1.flux.unit == spec1_smoothed.flux.unit
@pytest.mark.parametrize("width", [-1, 0, 'a'])
def test_smooth_median_bad(simulated_spectra, width):
"""
Test Median smoothing with incorrect parmaeters.
Width values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
# Test bad parameters
with pytest.raises(ValueError):
median_smooth(spec1, width)
| [
"numpy.allclose",
"astropy.convolution.CustomKernel",
"pytest.mark.parametrize",
"numpy.array",
"astropy.convolution.convolve",
"astropy.convolution.Gaussian1DKernel",
"astropy.convolution.Box1DKernel",
"scipy.signal.medfilt",
"numpy.sum",
"pytest.raises",
"astropy.convolution.Trapezoid1DKernel"
] | [((2105, 2147), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""width"""', '[1, 2.3]'], {}), "('width', [1, 2.3])\n", (2128, 2147), False, 'import pytest\n'), ((2941, 2987), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""width"""', "[-1, 0, 'a']"], {}), "('width', [-1, 0, 'a'])\n", (2964, 2987), False, 'import pytest\n'), ((3343, 3386), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stddev"""', '[1, 2.3]'], {}), "('stddev', [1, 2.3])\n", (3366, 3386), False, 'import pytest\n'), ((4227, 4274), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stddev"""', "[-1, 0, 'a']"], {}), "('stddev', [-1, 0, 'a'])\n", (4250, 4274), False, 'import pytest\n'), ((4661, 4704), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stddev"""', '[1, 2.3]'], {}), "('stddev', [1, 2.3])\n", (4684, 4704), False, 'import pytest\n'), ((5560, 5607), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""stddev"""', "[-1, 0, 'a']"], {}), "('stddev', [-1, 0, 'a'])\n", (5583, 5607), False, 'import pytest\n'), ((5990, 6033), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""width"""', '[1, 3, 9]'], {}), "('width', [1, 3, 9])\n", (6013, 6033), False, 'import pytest\n'), ((6780, 6826), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""width"""', "[-1, 0, 'a']"], {}), "('width', [-1, 0, 'a'])\n", (6803, 6826), False, 'import pytest\n'), ((1191, 1230), 'numpy.allclose', 'np.allclose', (['flux_smooth1', 'flux_smooth2'], {}), '(flux_smooth1, flux_smooth2)\n', (1202, 1230), True, 'import numpy as np\n'), ((1689, 1720), 'numpy.array', 'np.array', (['[0.5, 1, 2, 0.5, 0.2]'], {}), '([0.5, 1, 2, 0.5, 0.2])\n', (1697, 1720), True, 'import numpy as np\n'), ((1797, 1835), 'astropy.convolution.CustomKernel', 'convolution.CustomKernel', (['numpy_kernel'], {}), '(numpy_kernel)\n', (1821, 1835), False, 'from astropy import convolution\n'), ((1864, 1914), 'astropy.convolution.convolve', 'convolution.convolve', (['flux_original', 'custom_kernel'], {}), '(flux_original, custom_kernel)\n', (1884, 1914), False, 'from astropy import convolution\n'), ((2502, 2532), 'astropy.convolution.Box1DKernel', 'convolution.Box1DKernel', (['width'], {}), '(width)\n', (2525, 2532), False, 'from astropy import convolution\n'), ((2561, 2608), 'astropy.convolution.convolve', 'convolution.convolve', (['flux_original', 'box_kernel'], {}), '(flux_original, box_kernel)\n', (2581, 2608), False, 'from astropy import convolution\n'), ((3762, 3798), 'astropy.convolution.Gaussian1DKernel', 'convolution.Gaussian1DKernel', (['stddev'], {}), '(stddev)\n', (3790, 3798), False, 'from astropy import convolution\n'), ((3827, 3879), 'astropy.convolution.convolve', 'convolution.convolve', (['flux_original', 'gaussian_kernel'], {}), '(flux_original, gaussian_kernel)\n', (3847, 3879), False, 'from astropy import convolution\n'), ((5102, 5139), 'astropy.convolution.Trapezoid1DKernel', 'convolution.Trapezoid1DKernel', (['stddev'], {}), '(stddev)\n', (5131, 5139), False, 'from astropy import convolution\n'), ((5168, 5221), 'astropy.convolution.convolve', 'convolution.convolve', (['flux_original', 'trapezoid_kernel'], {}), '(flux_original, trapezoid_kernel)\n', (5188, 5221), False, 'from astropy import convolution\n'), ((6408, 6437), 'scipy.signal.medfilt', 'medfilt', (['flux_original', 'width'], {}), '(flux_original, width)\n', (6415, 6437), False, 'from scipy.signal import medfilt\n'), ((1755, 1775), 'numpy.sum', 'np.sum', (['numpy_kernel'], {}), '(numpy_kernel)\n', (1761, 1775), True, 'import numpy as np\n'), ((3280, 3305), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3293, 3305), False, 'import pytest\n'), ((4592, 4617), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4605, 4617), False, 'import pytest\n'), ((5920, 5945), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5933, 5945), False, 'import pytest\n'), ((7111, 7136), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7124, 7136), False, 'import pytest\n')] |
import json
import matplotlib.pyplot as plt
from pprint import pprint
import numpy as np
from scipy.stats import linregress
from util.stats import *
with open('data/game_stats.json', 'r') as f:
df = json.load(f)
X, y = [], []
for match, stats in df.items():
home, away = stats['home'], stats['away']
if home['mp'] != away['mp'] != '240': continue
try:
ft_dif = float(home['fta']) - float(away['fta'])
pt_dif = float(home['pts']) - float(away['pts'])
if abs(pt_dif) > 10: continue
except:
continue
X.append(ft_dif)
y.append(pt_dif)
c = 0
for f, p in zip(X, y):
if f * p > 0:
c += 1
print(c / len(X))
slope, intercept, r, p, std = linregress(X, y)
f = lambda x: x*slope + intercept
fit_y = [f(min(X)), f(max(X))]
plt.xlabel('Free Throw Attempts')
plt.ylabel('Point Differential')
plt.title('FTA vs Point Differential')
print(correlation(X, y))
plt.plot([min(X), max(X)], fit_y, color = 'red')
plt.scatter(X, y)
plt.show() | [
"scipy.stats.linregress",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.scatter",
"json.load",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((654, 670), 'scipy.stats.linregress', 'linregress', (['X', 'y'], {}), '(X, y)\n', (664, 670), False, 'from scipy.stats import linregress\n'), ((737, 770), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Free Throw Attempts"""'], {}), "('Free Throw Attempts')\n", (747, 770), True, 'import matplotlib.pyplot as plt\n'), ((771, 803), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Point Differential"""'], {}), "('Point Differential')\n", (781, 803), True, 'import matplotlib.pyplot as plt\n'), ((804, 842), 'matplotlib.pyplot.title', 'plt.title', (['"""FTA vs Point Differential"""'], {}), "('FTA vs Point Differential')\n", (813, 842), True, 'import matplotlib.pyplot as plt\n'), ((917, 934), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X', 'y'], {}), '(X, y)\n', (928, 934), True, 'import matplotlib.pyplot as plt\n'), ((936, 946), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (944, 946), True, 'import matplotlib.pyplot as plt\n'), ((201, 213), 'json.load', 'json.load', (['f'], {}), '(f)\n', (210, 213), False, 'import json\n')] |
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import img_to_array, load_img
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.preprocessing import LabelEncoder, StandardScaler
def load_numeric_training(standardize=True):
data = pd.read_csv('../train.csv')
ID = data.pop('id')
y = data.pop('species')
y = LabelEncoder().fit(y).transform(y)
X = StandardScaler().fit(data).transform(data) if standardize else data.values
return ID.values, X, y
def load_numeric_test(standardize=True):
data = pd.read_csv('../test.csv')
ID = data.pop('id')
test = StandardScaler().fit(data).transform(data) if standardize else data.values
return ID.values, test
def resize_img(img, max_dim=96):
max_axis = np.argmax(img.size)
scale = max_dim / img.size[max_axis]
return img.resize((int(img.size[0] * scale), int(img.size[1] * scale)))
def load_img_data(ids, max_dim=96, center=True):
X = np.empty((len(ids), max_dim, max_dim, 1))
for i, id in enumerate(ids):
img = load_img('../images/{}.jpg'.format(id), grayscale=True)
img = resize_img(img, max_dim=max_dim)
x = img_to_array(img)
h, w = x.shape[:2]
if center:
h1 = (max_dim - h) >> 1
h2 = h1 + h
w1 = (max_dim - w) >> 1
w2 = w1 + w
else:
h1, h2, w1, w2 = 0, h, 0, w
X[i][h1:h2, w1:w2][:] = x
return np.around(X / 255)
def load_train_data(split=0.9, random_state=7):
ID, X_num_train, y = load_numeric_training()
X_img_train = load_img_data(ID)
sss = StratifiedShuffleSplit(n_splits=1, train_size=split, test_size=1 - split, random_state=random_state)
train_idx, val_idx = next(sss.split(X_num_train, y))
ID_tr, X_num_tr, X_img_tr, y_tr = ID[train_idx], X_num_train[train_idx], X_img_train[train_idx], y[train_idx]
ID_val, X_num_val, X_img_val, y_val = ID[val_idx], X_num_train[val_idx], X_img_train[val_idx], y[val_idx]
return (ID_tr, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val)
def load_test_data():
ID, X_num_test = load_numeric_test()
X_img_test = load_img_data(ID)
return ID, X_num_test, X_img_test
print('Loading train data ...')
(ID_train, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val) = load_train_data()
# Prepare ID-to-label and ID-to-numerical dictionary
ID_y_dic, ID_num_dic = {}, {}
for i in range(len(ID_train)):
ID_y_dic[ID_train[i]] = y_tr[i]
ID_num_dic[ID_train[i]] = X_num_tr[i, :]
print('Loading test data ...')
ID_test, X_num_test, X_img_test = load_test_data()
# Convert label to categorical/one-hot
ID_train, y_tr, y_val = to_categorical(ID_train), to_categorical(y_tr), to_categorical((y_val))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float32_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def write_val_data():
val_data_path = '../tfrecords/val_data_1.tfrecords'
if os.path.exists(val_data_path):
print('Warning: old file exists, removed.')
os.remove(val_data_path)
val_image, val_num, val_label = X_img_val.astype(np.bool), X_num_val.astype(np.float64), y_val.astype(np.bool)
print(val_image.shape, val_num.shape, val_label.shape)
val_writer = tf.python_io.TFRecordWriter(val_data_path)
print('Writing data into tfrecord ...')
for i in range(len(val_image)):
image, num, label = val_image[i], val_num[i], val_label[i]
feature = {'image': _bytes_feature(image.tostring()),
'num': _bytes_feature(num.tostring()),
'label': _bytes_feature(label.tostring())}
example = tf.train.Example(features=tf.train.Features(feature=feature))
val_writer.write(example.SerializeToString())
print('Done!')
def write_train_data():
imgen = ImageDataGenerator(rotation_range=20, zoom_range=0.2, horizontal_flip=True,
vertical_flip=True, fill_mode='nearest')
imgen_train = imgen.flow(X_img_tr, ID_train, batch_size=32, seed=7)
print('Generating augmented images')
all_images = []
all_ID = []
p = True
for i in range(28 * 200):
print('Generating augmented images for epoch {}, batch {}'.format(i // 28, i % 28))
X, ID = imgen_train.next()
all_images.append(X)
all_ID.append(np.argmax(ID, axis=1))
all_images = np.concatenate(all_images).astype(np.bool)
all_ID = np.concatenate(all_ID)
all_y = np.zeros(all_ID.shape)
all_nums = np.zeros((all_ID.shape[0], X_num_tr.shape[1]))
for i in range(len(all_ID)):
all_nums[i, :] = ID_num_dic[all_ID[i]]
all_y[i] = ID_y_dic[all_ID[i]]
all_y = to_categorical(all_y).astype(np.bool)
print('Data shapes:')
print('Image:', all_images.shape)
print('Label:', all_y.shape)
print('Numerical:', all_nums.shape)
train_data_path = '../tfrecords/train_data_1.tfrecords'
if os.path.exists(train_data_path):
print('Warning: old file exists, removed.')
os.remove(train_data_path)
# compression = tf.python_io.TFRecordCompressionType.GZIP
# train_writer = tf.python_io.TFRecordWriter(train_data_path, options=tf.python_io.TFRecordOptions(compression))
train_writer = tf.python_io.TFRecordWriter(train_data_path)
print('Writing data into tfrecord ...')
for i in range(len(all_images)):
if i % 891 == 0:
print('Writing {} th epoch data ...'.format(i // 891))
image, num, label = all_images[i], all_nums[i], all_y[i]
feature = {'image': _bytes_feature(image.tostring()),
'num': _bytes_feature(num.tostring()),
'label': _bytes_feature(label.tostring())}
example = tf.train.Example(features=tf.train.Features(feature=feature))
train_writer.write(example.SerializeToString())
print('Done!')
write_val_data()
| [
"sklearn.model_selection.StratifiedShuffleSplit",
"os.path.exists",
"keras.preprocessing.image.img_to_array",
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"numpy.argmax",
"keras.preprocessing.image.ImageDataGenerator",
"tensorflow.train.BytesList",
"tensorflow.train.Int64List",
"sklearn.preprocessing.StandardScaler",
"numpy.zeros",
"keras.utils.np_utils.to_categorical",
"tensorflow.train.Features",
"numpy.around",
"numpy.concatenate",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.train.FloatList",
"os.remove"
] | [((419, 446), 'pandas.read_csv', 'pd.read_csv', (['"""../train.csv"""'], {}), "('../train.csv')\n", (430, 446), True, 'import pandas as pd\n'), ((706, 732), 'pandas.read_csv', 'pd.read_csv', (['"""../test.csv"""'], {}), "('../test.csv')\n", (717, 732), True, 'import pandas as pd\n'), ((920, 939), 'numpy.argmax', 'np.argmax', (['img.size'], {}), '(img.size)\n', (929, 939), True, 'import numpy as np\n'), ((1603, 1621), 'numpy.around', 'np.around', (['(X / 255)'], {}), '(X / 255)\n', (1612, 1621), True, 'import numpy as np\n'), ((1767, 1871), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(1)', 'train_size': 'split', 'test_size': '(1 - split)', 'random_state': 'random_state'}), '(n_splits=1, train_size=split, test_size=1 - split,\n random_state=random_state)\n', (1789, 1871), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((2844, 2868), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['ID_train'], {}), '(ID_train)\n', (2858, 2868), False, 'from keras.utils.np_utils import to_categorical\n'), ((2870, 2890), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['y_tr'], {}), '(y_tr)\n', (2884, 2890), False, 'from keras.utils.np_utils import to_categorical\n'), ((2892, 2913), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['y_val'], {}), '(y_val)\n', (2906, 2913), False, 'from keras.utils.np_utils import to_categorical\n'), ((3312, 3341), 'os.path.exists', 'os.path.exists', (['val_data_path'], {}), '(val_data_path)\n', (3326, 3341), False, 'import os\n'), ((3620, 3662), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['val_data_path'], {}), '(val_data_path)\n', (3647, 3662), True, 'import tensorflow as tf\n'), ((4184, 4304), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rotation_range': '(20)', 'zoom_range': '(0.2)', 'horizontal_flip': '(True)', 'vertical_flip': '(True)', 'fill_mode': '"""nearest"""'}), "(rotation_range=20, zoom_range=0.2, horizontal_flip=True,\n vertical_flip=True, fill_mode='nearest')\n", (4202, 4304), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((4800, 4822), 'numpy.concatenate', 'np.concatenate', (['all_ID'], {}), '(all_ID)\n', (4814, 4822), True, 'import numpy as np\n'), ((4835, 4857), 'numpy.zeros', 'np.zeros', (['all_ID.shape'], {}), '(all_ID.shape)\n', (4843, 4857), True, 'import numpy as np\n'), ((4873, 4919), 'numpy.zeros', 'np.zeros', (['(all_ID.shape[0], X_num_tr.shape[1])'], {}), '((all_ID.shape[0], X_num_tr.shape[1]))\n', (4881, 4919), True, 'import numpy as np\n'), ((5295, 5326), 'os.path.exists', 'os.path.exists', (['train_data_path'], {}), '(train_data_path)\n', (5309, 5326), False, 'import os\n'), ((5614, 5658), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['train_data_path'], {}), '(train_data_path)\n', (5641, 5658), True, 'import tensorflow as tf\n'), ((1320, 1337), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (1332, 1337), False, 'from keras.preprocessing.image import img_to_array, load_img\n'), ((3403, 3427), 'os.remove', 'os.remove', (['val_data_path'], {}), '(val_data_path)\n', (3412, 3427), False, 'import os\n'), ((5388, 5414), 'os.remove', 'os.remove', (['train_data_path'], {}), '(train_data_path)\n', (5397, 5414), False, 'import os\n'), ((2984, 3017), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (3002, 3017), True, 'import tensorflow as tf\n'), ((3087, 3120), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[value]'}), '(value=[value])\n', (3105, 3120), True, 'import tensorflow as tf\n'), ((3192, 3223), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': 'value'}), '(value=value)\n', (3210, 3223), True, 'import tensorflow as tf\n'), ((4703, 4724), 'numpy.argmax', 'np.argmax', (['ID'], {'axis': '(1)'}), '(ID, axis=1)\n', (4712, 4724), True, 'import numpy as np\n'), ((4744, 4770), 'numpy.concatenate', 'np.concatenate', (['all_images'], {}), '(all_images)\n', (4758, 4770), True, 'import numpy as np\n'), ((5051, 5072), 'keras.utils.np_utils.to_categorical', 'to_categorical', (['all_y'], {}), '(all_y)\n', (5065, 5072), False, 'from keras.utils.np_utils import to_categorical\n'), ((4036, 4070), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (4053, 4070), True, 'import tensorflow as tf\n'), ((6124, 6158), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (6141, 6158), True, 'import tensorflow as tf\n'), ((507, 521), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (519, 521), False, 'from sklearn.preprocessing import LabelEncoder, StandardScaler\n'), ((550, 566), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (564, 566), False, 'from sklearn.preprocessing import LabelEncoder, StandardScaler\n'), ((768, 784), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (782, 784), False, 'from sklearn.preprocessing import LabelEncoder, StandardScaler\n')] |
import numpy as np
from skimage.transform import resize
from skimage import measure
from skimage.measure import regionprops
class OCROnObjects():
def __init__(self, license_plate):
character_objects = self.identify_boundary_objects(license_plate)
self.get_regions(character_objects, license_plate)
def identify_boundary_objects(self, a_license_plate):
labelImage = measure.label(a_license_plate)
character_dimensions = (0.4*a_license_plate.shape[0], 0.85*a_license_plate.shape[0], 0.04*a_license_plate.shape[1], 0.15*a_license_plate.shape[1])
minHeight, maxHeight, minWidth, maxWidth = character_dimensions
regionLists = regionprops(labelImage)
return regionLists
def get_regions(self, character_objects, a_license_plate):
"""
used to map out regions where the license plate charcters are
the principle of connected component analysis and labelling
were used
Parameters:
-----------
a_license_plate: 2D numpy binary image of the license plate
Returns:
--------
a dictionary containing the index
fullscale: 3D array containig 2D array of each character
columnsVal: 1D array the starting column of each character
coordinates:
"""
cord = []
counter=0
column_list = []
character_dimensions = (0.35*a_license_plate.shape[0], 0.60*a_license_plate.shape[0], 0.05*a_license_plate.shape[1], 0.15*a_license_plate.shape[1])
minHeight, maxHeight, minWidth, maxWidth = character_dimensions
for regions in character_objects:
minimumRow, minimumCol, maximumRow, maximumCol = regions.bbox
character_height = maximumRow - minimumRow
character_width = maximumCol - minimumCol
roi = a_license_plate[minimumRow:maximumRow, minimumCol:maximumCol]
if character_height > minHeight and character_height < maxHeight and character_width > minWidth and character_width < maxWidth:
if counter == 0:
samples = resize(roi, (20,20))
cord.append(regions.bbox)
counter += 1
elif counter == 1:
roismall = resize(roi, (20,20))
samples = np.concatenate((samples[None,:,:], roismall[None,:,:]), axis=0)
cord.append(regions.bbox)
counter+=1
else:
roismall = resize(roi, (20,20))
samples = np.concatenate((samples[:,:,:], roismall[None,:,:]), axis=0)
cord.append(regions.bbox)
column_list.append(minimumCol)
if len(column_list) == 0:
self.candidates = {}
else:
self.candidates = {
'fullscale': samples,
'coordinates': np.array(cord),
'columnsVal': column_list
}
return self.candidates | [
"skimage.measure.regionprops",
"numpy.array",
"numpy.concatenate",
"skimage.transform.resize",
"skimage.measure.label"
] | [((412, 442), 'skimage.measure.label', 'measure.label', (['a_license_plate'], {}), '(a_license_plate)\n', (425, 442), False, 'from skimage import measure\n'), ((692, 715), 'skimage.measure.regionprops', 'regionprops', (['labelImage'], {}), '(labelImage)\n', (703, 715), False, 'from skimage.measure import regionprops\n'), ((2943, 2957), 'numpy.array', 'np.array', (['cord'], {}), '(cord)\n', (2951, 2957), True, 'import numpy as np\n'), ((2129, 2150), 'skimage.transform.resize', 'resize', (['roi', '(20, 20)'], {}), '(roi, (20, 20))\n', (2135, 2150), False, 'from skimage.transform import resize\n'), ((2295, 2316), 'skimage.transform.resize', 'resize', (['roi', '(20, 20)'], {}), '(roi, (20, 20))\n', (2301, 2316), False, 'from skimage.transform import resize\n'), ((2346, 2413), 'numpy.concatenate', 'np.concatenate', (['(samples[None, :, :], roismall[None, :, :])'], {'axis': '(0)'}), '((samples[None, :, :], roismall[None, :, :]), axis=0)\n', (2360, 2413), True, 'import numpy as np\n'), ((2540, 2561), 'skimage.transform.resize', 'resize', (['roi', '(20, 20)'], {}), '(roi, (20, 20))\n', (2546, 2561), False, 'from skimage.transform import resize\n'), ((2591, 2655), 'numpy.concatenate', 'np.concatenate', (['(samples[:, :, :], roismall[None, :, :])'], {'axis': '(0)'}), '((samples[:, :, :], roismall[None, :, :]), axis=0)\n', (2605, 2655), True, 'import numpy as np\n')] |
from django.shortcuts import render
import requests
from bs4 import BeautifulSoup
def corona_data(request):
"Testaaaa"
corona_html = requests.get("https://www.mygov.in/covid-19")
soup = BeautifulSoup(corona_html.content, 'html.parser')
state_wise_data = soup.find_all('div', class_='views-row')
information = soup.find('div', class_='information_row')
info = {
'update_data': information.find('div', class_='info_title').find('span').string,
'active_case': information.find('div', class_='active-case').find('span', class_='icount').string,
'discharge': information.find('div', class_='discharge').find('span', class_='icount').string,
'death': information.find('div', class_='death_case').find('span', class_='icount').string
}
corona_info = [
{
"state_name": state.find_all('span', class_='st_name')[0].string,
"confirm_case": state.find_all('div', class_='tick-confirmed')[0].find_all('small')[0].string,
"active_case": state.find_all('div', class_='tick-active')[0].find_all('small')[0].string,
"discharge": state.find_all('div', class_='tick-discharged')[0].find_all('small')[0].string,
"death": state.find_all('div', class_='tick-death')[0].find_all('small')[0].string
} for state in state_wise_data
]
context = {
'corona_info': info,
'data': sorted(corona_info, key=lambda i: int(''.join(i['confirm_case'].replace(',', ''))), reverse=True)
}
return render(request, 'coronainfo/index.html', context)
| [
"bs4.BeautifulSoup",
"django.shortcuts.render",
"requests.get"
] | [((143, 188), 'requests.get', 'requests.get', (['"""https://www.mygov.in/covid-19"""'], {}), "('https://www.mygov.in/covid-19')\n", (155, 188), False, 'import requests\n'), ((200, 249), 'bs4.BeautifulSoup', 'BeautifulSoup', (['corona_html.content', '"""html.parser"""'], {}), "(corona_html.content, 'html.parser')\n", (213, 249), False, 'from bs4 import BeautifulSoup\n'), ((1533, 1582), 'django.shortcuts.render', 'render', (['request', '"""coronainfo/index.html"""', 'context'], {}), "(request, 'coronainfo/index.html', context)\n", (1539, 1582), False, 'from django.shortcuts import render\n')] |
# Script tests GPD model using UW truth data
# Test outputs:
# - type of event tested [EQS, EQP, SUS, SUP, THS, THP, SNS, SNP, PXS, PXP]
# - phase [P, S, N] Note: N - not detected
# - model time offset (t_truth - t_model_pick)
import numpy
import math
import string
import datetime
import sys
import os
import csv
from datetime import datetime
from datetime import timedelta
# params
padding_time = 10
fudge_factor = timedelta(seconds=27)
time_diff = timedelta(seconds=10)
# file dirs
parsed_arrivals = []
model_in = []
model_out = []
comp_out = []
for etype in ['EQS','EQP','SUS','SUP','THS','THP','SNS','SNP','PXS','PXP']:
arrival = "parsed_arrivals/" + etype + ".arrivals.txt"
infile = "input_files/GPD." + etype + ".in"
outfile = "output_files/GPD." + etype + ".out"
parsed_arrivals.append(arrival)
model_in.append(infile)
model_out.append(outfile)
comp_out.append("comparison_out/comp." + etype + ".out")
# ------------------
# read in UW arrival times as an array
def read_arrivals_to_arr(filename):
model_list = []
with open(filename) as f:
for ln in f:
row = ln.split()
line = []
line.extend([row[0].strip(), row[1].strip(), row[2].strip()])
formatted_time = datetime.strptime(row[3], "%Y-%m-%dT%H:%M:%S.%f") - fudge_factor
line.extend([formatted_time, row[4].strip(), row[5].strip()])
model_list.append(line)
return model_list
def arrivals_to_dictionary(arrivals):
picks = {}
for arr in arrivals:
key = datetime.strftime(arr[3], "%Y-%m-%dT%H:%M:%S.%f")
key = key[0:-7]
picks[key] = arr
return picks
def model_in_to_array(file):
timestamps = []
with open(file) as f:
for ln in f:
entry = ln.split()
entry = entry[0].strip()
entry = entry[len(entry)-20:len(entry)-6]
entry = entry[0:4] + "-" + entry[4:6] + "-" + entry[6:8] + "T" + entry[8:10] + ":" + entry[10:12] + ":" + entry[12:14]
# ------------- TIME STAMP ISSUES --------------------
# case 1: run if .mseed files have correct timestamps
"""
time = datetime.strptime(entry, "%Y-%m-%dT%H:%M:%S") - fudge_factor # + time_diff (might need to add this)
time = datetime.strftime(time, "%Y-%m-%dT%H:%M:%S")
"""
# case 2: run if .mseed files have buggy minutes in the timestamps
time = datetime.strptime(entry, "%Y-%m-%dT%H:%M:%S")
if time.second >=37 and time.second <=51:
time = time + timedelta(seconds=23) + time_diff
time = datetime.strftime(time, "%Y-%m-%dT%H:%M:%S")
else:
sec_int = time.second + 23
if sec_int > 59:
sec_int = sec_int - 60
sec_int = str(sec_int).zfill(2)
time = datetime.strftime(time, "%Y-%m-%dT%H:%M:%S")
time = time[:-2] + sec_int
time = datetime.strptime(time, "%Y-%m-%dT%H:%M:%S") + time_diff
time = datetime.strftime(time, "%Y-%m-%dT%H:%M:%S")
# -----------------------------------------------------
timestamps.append(time)
return timestamps
def filter_times(arrivals, model_in):
filtered = []
for key in model_in:
if key in arrivals:
filtered.append(arrivals[key])
return filtered
# read in Caltech model output and create a dictionary
def read_output_to_dict(filename):
model_dict = {}
with open(filename) as f:
for line in f:
tmp = line.split()
key = tmp[0] + "-" + tmp[1] + "-" + tmp[2]
try: # fails if date is missing floating point numbers
formatted_time = datetime.strptime(tmp[3], "%Y-%m-%dT%H:%M:%S.%f")
if key not in model_dict:
model_dict[key] = []
model_dict[key].append(formatted_time)
except:
pass
return model_dict
# lookup time in the dictionary
def key_lookup(event, phase, model_dict):
key = event[0] + "-" + event[1] + "-" + phase
times = []
if key in model_dict.keys():
times = model_dict[key]
times = time_lookup(event[3], times)
return times
# search for arrivals within the padding time window
def time_lookup(t, time_arr):
t_lower = t - timedelta(seconds=padding_time)
t_upper = t + timedelta(seconds=padding_time)
offsets = []
for time in time_arr:
if time > t_lower and time < t_upper:
offset = t - time # or format time to absolute value: abs(t - time)
offset = offset.total_seconds()
offsets.append('{:.6f}'.format(offset))
return offsets
def execute_script(arrival, inf, outf, comp_out):
# write outputs to file
outp_file = open(comp_out, 'w')
truth_arr = read_arrivals_to_arr(arrival) # read in the arrival times to a list
truth_dict = arrivals_to_dictionary(truth_arr) # convert arrivals to a dictionary (key=truncated timestamp)
model_in = model_in_to_array(inf) # read in model .in file as a list
truth_arr = filter_times(truth_dict, model_in) # filter arrivals to picks that were passed to the model (.in file)
model_dict = read_output_to_dict(outf) # read output file
for event in truth_arr:
phase = event[2]
times = key_lookup(event, phase, model_dict)
if len(times) == 0:
if phase == 'P':
phase = 'S'
else:
phase = 'P'
times = key_lookup(event, phase, model_dict)
if len(times) == 0:
phase = 'N'
times = ['nan']
outp_file.write(str(event[5]) + " " + phase)
for offset in times:
outp_file.write(" " + str(offset))
outp_file.write('\n')
outp_file.close()
for i in range(len(model_out)):
execute_script(parsed_arrivals[i], model_in[i], model_out[i], comp_out[i])
| [
"datetime.datetime.strptime",
"datetime.timedelta",
"datetime.datetime.strftime"
] | [((433, 454), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(27)'}), '(seconds=27)\n', (442, 454), False, 'from datetime import timedelta\n'), ((467, 488), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (476, 488), False, 'from datetime import timedelta\n'), ((1567, 1616), 'datetime.datetime.strftime', 'datetime.strftime', (['arr[3]', '"""%Y-%m-%dT%H:%M:%S.%f"""'], {}), "(arr[3], '%Y-%m-%dT%H:%M:%S.%f')\n", (1584, 1616), False, 'from datetime import datetime\n'), ((4429, 4460), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'padding_time'}), '(seconds=padding_time)\n', (4438, 4460), False, 'from datetime import timedelta\n'), ((4479, 4510), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'padding_time'}), '(seconds=padding_time)\n', (4488, 4510), False, 'from datetime import timedelta\n'), ((2483, 2528), 'datetime.datetime.strptime', 'datetime.strptime', (['entry', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(entry, '%Y-%m-%dT%H:%M:%S')\n", (2500, 2528), False, 'from datetime import datetime\n'), ((1277, 1326), 'datetime.datetime.strptime', 'datetime.strptime', (['row[3]', '"""%Y-%m-%dT%H:%M:%S.%f"""'], {}), "(row[3], '%Y-%m-%dT%H:%M:%S.%f')\n", (1294, 1326), False, 'from datetime import datetime\n'), ((2670, 2714), 'datetime.datetime.strftime', 'datetime.strftime', (['time', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(time, '%Y-%m-%dT%H:%M:%S')\n", (2687, 2714), False, 'from datetime import datetime\n'), ((2923, 2967), 'datetime.datetime.strftime', 'datetime.strftime', (['time', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(time, '%Y-%m-%dT%H:%M:%S')\n", (2940, 2967), False, 'from datetime import datetime\n'), ((3114, 3158), 'datetime.datetime.strftime', 'datetime.strftime', (['time', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(time, '%Y-%m-%dT%H:%M:%S')\n", (3131, 3158), False, 'from datetime import datetime\n'), ((3808, 3857), 'datetime.datetime.strptime', 'datetime.strptime', (['tmp[3]', '"""%Y-%m-%dT%H:%M:%S.%f"""'], {}), "(tmp[3], '%Y-%m-%dT%H:%M:%S.%f')\n", (3825, 3857), False, 'from datetime import datetime\n'), ((3034, 3078), 'datetime.datetime.strptime', 'datetime.strptime', (['time', '"""%Y-%m-%dT%H:%M:%S"""'], {}), "(time, '%Y-%m-%dT%H:%M:%S')\n", (3051, 3078), False, 'from datetime import datetime\n'), ((2613, 2634), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(23)'}), '(seconds=23)\n', (2622, 2634), False, 'from datetime import timedelta\n')] |
import argparse
import boto3
import datetime
import json
import os
import posixpath
import re
import shutil
import tempfile
import uuid
from concurrent import futures
from multiprocessing import Pool
from ultitrackerapi import get_backend, get_logger, get_s3Client, video
backend_instance = get_backend()
logger = get_logger(__name__, level="DEBUG")
s3Client = get_s3Client()
def update_game_video_length(game_id, video_length):
command = """
UPDATE ultitracker.game_metadata
SET data = jsonb_set(data, '{{length}}', '"{video_length}"', true)
WHERE game_id = '{game_id}'
""".format(
video_length=video_length,
game_id=game_id
)
backend_instance.client.execute(command)
def get_frame_number(key, chunk_multiplier=60):
frame_number = int(posixpath.splitext(posixpath.basename(key))[0].split("_")[1])
chunk_number = int(posixpath.basename(posixpath.dirname(key)).split("_")[1])
return chunk_number * chunk_multiplier + frame_number
def insert_images(
img_raw_paths,
img_types,
img_metadatas,
game_id,
frame_numbers
):
command = """
INSERT INTO ultitracker.img_location (img_id, img_raw_path, img_type, img_metadata, game_id, frame_number) VALUES
"""
for i, (img_raw_path, img_type, img_metadata, frame_number) in enumerate(zip(img_raw_paths, img_types, img_metadatas, frame_numbers)):
command += """('{img_id}', '{img_raw_path}', '{img_type}', '{img_metadata}', '{game_id}', {frame_number}){include_comma}
""".format(
img_id=uuid.uuid4(),
img_raw_path=img_raw_path,
img_type=img_type,
img_metadata=json.dumps(img_metadata),
game_id=game_id,
frame_number=frame_number,
include_comma="," if i < (len(img_raw_paths) - 1) else ""
)
backend_instance.client.execute(command)
def extract_and_upload_video(
bucket,
video_filename,
thumbnail_filename,
video_key,
thumbnail_key,
game_id
):
logger.debug("extract_and_upload_video: Getting video length")
video_length_seconds = int(video.get_video_duration(video_filename))
video_length = str(datetime.timedelta(seconds=video_length_seconds))
logger.debug("extract_and_upload_video: Finished getting video length")
logger.debug("extract_and_upload_video: Getting video height and width")
video_height_width = video.get_video_height_width(video_filename)
logger.debug("extract_and_upload_video: Finished getting height and width")
logger.debug("extract_and_upload_video: Updating length in db")
update_game_video_length(game_id, video_length)
logger.debug("extract_and_upload_video: Finished updating length in db")
logger.debug("extract_and_upload_video: Extracting thumbnail")
video.get_thumbnail(video_filename, thumbnail_filename, time=video_length_seconds // 2)
logger.debug("extract_and_upload_video: Finished extracting thumbnail")
logger.debug("extract_and_upload_video: Uploading thumbnail")
s3Client.upload_file(
thumbnail_filename,
bucket,
thumbnail_key
)
logger.debug("extract_and_upload_video: Finished uploading thumbnail")
logger.debug("extract_and_upload_video: Uploading video to S3")
s3Client.upload_file(
video_filename,
bucket,
video_key
)
logger.debug("extract_and_upload_video: Finished uploading video to S3")
logger.debug("extract_and_upload_video: Chunking video")
chunked_video_dir = tempfile.mkdtemp()
video.chunk_video(video_filename, chunked_video_dir, chunk_size=60)
logger.debug("extract_and_upload_video: Finished chunking video")
logger.debug("extract_and_upload_video: Uploading video chunks")
with futures.ThreadPoolExecutor(8) as ex:
for vid in os.listdir(chunked_video_dir):
ex.submit(
s3Client.upload_file,
os.path.join(chunked_video_dir, vid),
bucket,
posixpath.join(
posixpath.dirname(video_key),
"chunks",
vid
)
)
logger.debug("extract_and_upload_video: Finished uploading video chunks")
logger.debug("extract_and_upload_video: Submitting lambda frame extraction")
aws_lambda_payloads = [
json.dumps({
"s3_bucket_path": bucket,
"s3_video_path": posixpath.join(posixpath.dirname(video_key), "chunks", basename),
"s3_output_frames_path": posixpath.join(posixpath.dirname(video_key), "frames", posixpath.splitext(basename)[0]),
"video_metadata": video_height_width
}).encode()
for basename in os.listdir(chunked_video_dir)
]
client = boto3.client('lambda')
aws_lambda_responses = []
with futures.ThreadPoolExecutor(max_workers=16) as ex:
result_futures = []
for payload in aws_lambda_payloads:
result_futures.append(ex.submit(
client.invoke,
FunctionName="extractFrames",
# InvocationType="Event",
Payload=payload
))
logger.debug("extract_and_upload_video: Submitted lambda frame extraction")
for result_future in futures.as_completed(result_futures):
aws_lambda_response = json.loads(result_future.result()["Payload"].read().decode("utf-8"))
aws_lambda_responses.append(aws_lambda_response)
raw_paths = ["s3://" + posixpath.join(frame["bucket"], frame["key"]) for frame in aws_lambda_response["frames"]]
img_types = ["png" for frame in aws_lambda_response["frames"]]
metadatas = [
{"bucket": bucket}
for frame in aws_lambda_response["frames"]
]
frame_numbers = [-1 for frame in aws_lambda_response["frames"]]
insert_images(
raw_paths,
img_types,
metadatas,
game_id,
frame_numbers
)
logger.debug("extract_and_upload_video: Received all lambda responses")
logger.debug("extract_and_upload_video: Finished inserting image metadata")
os.remove(video_filename)
os.remove(thumbnail_filename)
shutil.rmtree(chunked_video_dir)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("bucket")
parser.add_argument("video_filename")
parser.add_argument("thumbnail_filename")
parser.add_argument("video_key")
parser.add_argument("thumbnail_key")
parser.add_argument("game_id")
args = parser.parse_args()
extract_and_upload_video(
bucket=args.bucket,
video_filename=args.video_filename,
thumbnail_filename=args.thumbnail_filename,
video_key=args.video_key,
thumbnail_key=args.thumbnail_key,
game_id=args.game_id
)
if __name__ == "__main__":
main() | [
"posixpath.join",
"boto3.client",
"ultitrackerapi.video.get_video_duration",
"ultitrackerapi.get_backend",
"ultitrackerapi.get_s3Client",
"datetime.timedelta",
"os.remove",
"os.listdir",
"posixpath.splitext",
"argparse.ArgumentParser",
"json.dumps",
"concurrent.futures.as_completed",
"posixpath.basename",
"ultitrackerapi.video.chunk_video",
"ultitrackerapi.get_logger",
"ultitrackerapi.video.get_thumbnail",
"posixpath.dirname",
"uuid.uuid4",
"tempfile.mkdtemp",
"concurrent.futures.ThreadPoolExecutor",
"os.path.join",
"ultitrackerapi.video.get_video_height_width",
"shutil.rmtree"
] | [((297, 310), 'ultitrackerapi.get_backend', 'get_backend', ([], {}), '()\n', (308, 310), False, 'from ultitrackerapi import get_backend, get_logger, get_s3Client, video\n'), ((320, 355), 'ultitrackerapi.get_logger', 'get_logger', (['__name__'], {'level': '"""DEBUG"""'}), "(__name__, level='DEBUG')\n", (330, 355), False, 'from ultitrackerapi import get_backend, get_logger, get_s3Client, video\n'), ((367, 381), 'ultitrackerapi.get_s3Client', 'get_s3Client', ([], {}), '()\n', (379, 381), False, 'from ultitrackerapi import get_backend, get_logger, get_s3Client, video\n'), ((2423, 2467), 'ultitrackerapi.video.get_video_height_width', 'video.get_video_height_width', (['video_filename'], {}), '(video_filename)\n', (2451, 2467), False, 'from ultitrackerapi import get_backend, get_logger, get_s3Client, video\n'), ((2822, 2914), 'ultitrackerapi.video.get_thumbnail', 'video.get_thumbnail', (['video_filename', 'thumbnail_filename'], {'time': '(video_length_seconds // 2)'}), '(video_filename, thumbnail_filename, time=\n video_length_seconds // 2)\n', (2841, 2914), False, 'from ultitrackerapi import get_backend, get_logger, get_s3Client, video\n'), ((3552, 3570), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (3568, 3570), False, 'import tempfile\n'), ((3575, 3642), 'ultitrackerapi.video.chunk_video', 'video.chunk_video', (['video_filename', 'chunked_video_dir'], {'chunk_size': '(60)'}), '(video_filename, chunked_video_dir, chunk_size=60)\n', (3592, 3642), False, 'from ultitrackerapi import get_backend, get_logger, get_s3Client, video\n'), ((4806, 4828), 'boto3.client', 'boto3.client', (['"""lambda"""'], {}), "('lambda')\n", (4818, 4828), False, 'import boto3\n'), ((6287, 6312), 'os.remove', 'os.remove', (['video_filename'], {}), '(video_filename)\n', (6296, 6312), False, 'import os\n'), ((6317, 6346), 'os.remove', 'os.remove', (['thumbnail_filename'], {}), '(thumbnail_filename)\n', (6326, 6346), False, 'import os\n'), ((6351, 6383), 'shutil.rmtree', 'shutil.rmtree', (['chunked_video_dir'], {}), '(chunked_video_dir)\n', (6364, 6383), False, 'import shutil\n'), ((6411, 6436), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6434, 6436), False, 'import argparse\n'), ((2129, 2169), 'ultitrackerapi.video.get_video_duration', 'video.get_video_duration', (['video_filename'], {}), '(video_filename)\n', (2153, 2169), False, 'from ultitrackerapi import get_backend, get_logger, get_s3Client, video\n'), ((2194, 2242), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'video_length_seconds'}), '(seconds=video_length_seconds)\n', (2212, 2242), False, 'import datetime\n'), ((3792, 3821), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', (['(8)'], {}), '(8)\n', (3818, 3821), False, 'from concurrent import futures\n'), ((3848, 3877), 'os.listdir', 'os.listdir', (['chunked_video_dir'], {}), '(chunked_video_dir)\n', (3858, 3877), False, 'import os\n'), ((4870, 4912), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': '(16)'}), '(max_workers=16)\n', (4896, 4912), False, 'from concurrent import futures\n'), ((5331, 5367), 'concurrent.futures.as_completed', 'futures.as_completed', (['result_futures'], {}), '(result_futures)\n', (5351, 5367), False, 'from concurrent import futures\n'), ((4756, 4785), 'os.listdir', 'os.listdir', (['chunked_video_dir'], {}), '(chunked_video_dir)\n', (4766, 4785), False, 'import os\n'), ((1559, 1571), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1569, 1571), False, 'import uuid\n'), ((1668, 1692), 'json.dumps', 'json.dumps', (['img_metadata'], {}), '(img_metadata)\n', (1678, 1692), False, 'import json\n'), ((3956, 3992), 'os.path.join', 'os.path.join', (['chunked_video_dir', 'vid'], {}), '(chunked_video_dir, vid)\n', (3968, 3992), False, 'import os\n'), ((4070, 4098), 'posixpath.dirname', 'posixpath.dirname', (['video_key'], {}), '(video_key)\n', (4087, 4098), False, 'import posixpath\n'), ((5569, 5614), 'posixpath.join', 'posixpath.join', (["frame['bucket']", "frame['key']"], {}), "(frame['bucket'], frame['key'])\n", (5583, 5614), False, 'import posixpath\n'), ((898, 920), 'posixpath.dirname', 'posixpath.dirname', (['key'], {}), '(key)\n', (915, 920), False, 'import posixpath\n'), ((813, 836), 'posixpath.basename', 'posixpath.basename', (['key'], {}), '(key)\n', (831, 836), False, 'import posixpath\n'), ((4486, 4514), 'posixpath.dirname', 'posixpath.dirname', (['video_key'], {}), '(video_key)\n', (4503, 4514), False, 'import posixpath\n'), ((4589, 4617), 'posixpath.dirname', 'posixpath.dirname', (['video_key'], {}), '(video_key)\n', (4606, 4617), False, 'import posixpath\n'), ((4629, 4657), 'posixpath.splitext', 'posixpath.splitext', (['basename'], {}), '(basename)\n', (4647, 4657), False, 'import posixpath\n')] |
# --- SECTION 1 ---
# Import the required libraries
from sklearn import datasets, naive_bayes, svm, neighbors
from sklearn.ensemble import VotingClassifier
from sklearn.metrics import accuracy_score
# Load the dataset
breast_cancer = datasets.load_breast_cancer()
x, y = breast_cancer.data, breast_cancer.target
# Split the train and test samples
test_samples = 100
x_train, y_train = x[:-test_samples], y[:-test_samples]
x_test, y_test = x[-test_samples:], y[-test_samples:]
# --- SECTION 2 ---
# Instantiate the learners (classifiers)
learner_1 = neighbors.KNeighborsClassifier(n_neighbors=5)
learner_2 = naive_bayes.GaussianNB()
learner_3 = neighbors.KNeighborsClassifier(n_neighbors=50)
# --- SECTION 3 ---
# Instantiate the voting classifier
voting = VotingClassifier([('5NN', learner_1),
('NB', learner_2),
('50NN', learner_3)],
voting='soft')
# --- SECTION 4 ---
# Fit classifier with the training data
voting.fit(x_train, y_train)
learner_1.fit(x_train, y_train)
learner_2.fit(x_train, y_train)
learner_3.fit(x_train, y_train)
# --- SECTION 5 ---
# Predict the most probable class
hard_predictions = voting.predict(x_test)
# --- SECTION 6 ---
# Get the base learner predictions
predictions_1 = learner_1.predict(x_test)
predictions_2 = learner_2.predict(x_test)
predictions_3 = learner_3.predict(x_test)
# --- SECTION 7 ---
# Accuracies of base learners
print('L1:', accuracy_score(y_test, predictions_1))
print('L2:', accuracy_score(y_test, predictions_2))
print('L3:', accuracy_score(y_test, predictions_3))
# Accuracy of hard voting
print('-'*30)
print('Hard Voting:', accuracy_score(y_test, hard_predictions))
# --- SECTION 1 ---
# Import the required libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.style.use('seaborn-paper')
# --- SECTION 2 ---
# Get the wrongly predicted instances
# and the predicted probabilities for the whole test set
errors = y_test-hard_predictions
probabilities_1 = learner_1.predict_proba(x_test)
probabilities_2 = learner_2.predict_proba(x_test)
probabilities_3 = learner_3.predict_proba(x_test)
# --- SECTION 2 ---
# Store the predicted probability for
# each wrongly predicted instance, for each base learner
# as well as the average predicted probability
#
x=[]
y_1=[]
y_2=[]
y_3=[]
y_avg=[]
for i in range(len(errors)):
if not errors[i] == 0:
x.append(i)
y_1.append(probabilities_1[i][0])
y_2.append(probabilities_2[i][0])
y_3.append(probabilities_3[i][0])
y_avg.append((probabilities_1[i][0]+probabilities_2[i][0]+probabilities_3[i][0])/3)
# --- SECTION 3 ---
# Plot the predicted probaiblity of each base learner as
# a bar and the average probability as an X
plt.bar(x, y_1, 3, label='5NN')
plt.bar(x, y_2, 2, label='NB')
plt.bar(x, y_3, 1, label='50NN')
plt.scatter(x, y_avg, marker='x', c='k', s=150, label='Average Positive', zorder=10)
y = [0.5 for x in range(len(errors))]
plt.plot(y, c='k', linestyle='--')
plt.title('Positive Probability')
plt.xlabel('Test sample')
plt.ylabel('probability')
plt.legend()
| [
"sklearn.ensemble.VotingClassifier",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.datasets.load_breast_cancer",
"matplotlib.pyplot.bar",
"matplotlib.style.use",
"matplotlib.pyplot.scatter",
"sklearn.naive_bayes.GaussianNB",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.legend"
] | [((241, 270), 'sklearn.datasets.load_breast_cancer', 'datasets.load_breast_cancer', ([], {}), '()\n', (268, 270), False, 'from sklearn import datasets, naive_bayes, svm, neighbors\n'), ((569, 614), 'sklearn.neighbors.KNeighborsClassifier', 'neighbors.KNeighborsClassifier', ([], {'n_neighbors': '(5)'}), '(n_neighbors=5)\n', (599, 614), False, 'from sklearn import datasets, naive_bayes, svm, neighbors\n'), ((628, 652), 'sklearn.naive_bayes.GaussianNB', 'naive_bayes.GaussianNB', ([], {}), '()\n', (650, 652), False, 'from sklearn import datasets, naive_bayes, svm, neighbors\n'), ((666, 712), 'sklearn.neighbors.KNeighborsClassifier', 'neighbors.KNeighborsClassifier', ([], {'n_neighbors': '(50)'}), '(n_neighbors=50)\n', (696, 712), False, 'from sklearn import datasets, naive_bayes, svm, neighbors\n'), ((784, 882), 'sklearn.ensemble.VotingClassifier', 'VotingClassifier', (["[('5NN', learner_1), ('NB', learner_2), ('50NN', learner_3)]"], {'voting': '"""soft"""'}), "([('5NN', learner_1), ('NB', learner_2), ('50NN', learner_3\n )], voting='soft')\n", (800, 882), False, 'from sklearn.ensemble import VotingClassifier\n'), ((1892, 1922), 'matplotlib.style.use', 'mpl.style.use', (['"""seaborn-paper"""'], {}), "('seaborn-paper')\n", (1905, 1922), True, 'import matplotlib as mpl\n'), ((2880, 2911), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'y_1', '(3)'], {'label': '"""5NN"""'}), "(x, y_1, 3, label='5NN')\n", (2887, 2911), True, 'import matplotlib.pyplot as plt\n'), ((2915, 2945), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'y_2', '(2)'], {'label': '"""NB"""'}), "(x, y_2, 2, label='NB')\n", (2922, 2945), True, 'import matplotlib.pyplot as plt\n'), ((2949, 2981), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'y_3', '(1)'], {'label': '"""50NN"""'}), "(x, y_3, 1, label='50NN')\n", (2956, 2981), True, 'import matplotlib.pyplot as plt\n'), ((2985, 3073), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y_avg'], {'marker': '"""x"""', 'c': '"""k"""', 's': '(150)', 'label': '"""Average Positive"""', 'zorder': '(10)'}), "(x, y_avg, marker='x', c='k', s=150, label='Average Positive',\n zorder=10)\n", (2996, 3073), True, 'import matplotlib.pyplot as plt\n'), ((3112, 3146), 'matplotlib.pyplot.plot', 'plt.plot', (['y'], {'c': '"""k"""', 'linestyle': '"""--"""'}), "(y, c='k', linestyle='--')\n", (3120, 3146), True, 'import matplotlib.pyplot as plt\n'), ((3150, 3183), 'matplotlib.pyplot.title', 'plt.title', (['"""Positive Probability"""'], {}), "('Positive Probability')\n", (3159, 3183), True, 'import matplotlib.pyplot as plt\n'), ((3185, 3210), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Test sample"""'], {}), "('Test sample')\n", (3195, 3210), True, 'import matplotlib.pyplot as plt\n'), ((3212, 3237), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""probability"""'], {}), "('probability')\n", (3222, 3237), True, 'import matplotlib.pyplot as plt\n'), ((3239, 3251), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3249, 3251), True, 'import matplotlib.pyplot as plt\n'), ((1523, 1560), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'predictions_1'], {}), '(y_test, predictions_1)\n', (1537, 1560), False, 'from sklearn.metrics import accuracy_score\n'), ((1576, 1613), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'predictions_2'], {}), '(y_test, predictions_2)\n', (1590, 1613), False, 'from sklearn.metrics import accuracy_score\n'), ((1629, 1666), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'predictions_3'], {}), '(y_test, predictions_3)\n', (1643, 1666), False, 'from sklearn.metrics import accuracy_score\n'), ((1733, 1773), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'hard_predictions'], {}), '(y_test, hard_predictions)\n', (1747, 1773), False, 'from sklearn.metrics import accuracy_score\n')] |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
iris dataset
"""
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from qiskit.aqua import MissingOptionalLibraryError
def iris(training_size, test_size, n, plot_data=False):
""" returns iris dataset """
class_labels = [r'A', r'B', r'C']
data, target = datasets.load_iris(return_X_y=True)
sample_train, sample_test, label_train, label_test = \
train_test_split(data, target, test_size=1, random_state=42)
# Now we standardize for gaussian around 0 with unit variance
std_scale = StandardScaler().fit(sample_train)
sample_train = std_scale.transform(sample_train)
sample_test = std_scale.transform(sample_test)
# Now reduce number of features to number of qubits
pca = PCA(n_components=n).fit(sample_train)
sample_train = pca.transform(sample_train)
sample_test = pca.transform(sample_test)
# Scale to the range (-1,+1)
samples = np.append(sample_train, sample_test, axis=0)
minmax_scale = MinMaxScaler((-1, 1)).fit(samples)
sample_train = minmax_scale.transform(sample_train)
sample_test = minmax_scale.transform(sample_test)
# Pick training size number of samples from each distro
training_input = {key: (sample_train[label_train == k, :])[:training_size]
for k, key in enumerate(class_labels)}
test_input = {key: (sample_test[label_test == k, :])[:test_size]
for k, key in enumerate(class_labels)}
if plot_data:
try:
import matplotlib.pyplot as plt
except ImportError as ex:
raise MissingOptionalLibraryError(
libname='Matplotlib',
name='iris',
pip_install='pip install matplotlib') from ex
for k in range(0, 3):
plt.scatter(sample_train[label_train == k, 0][:training_size],
sample_train[label_train == k, 1][:training_size])
plt.title("Iris dataset")
plt.show()
return sample_train, training_input, test_input, class_labels
| [
"sklearn.datasets.load_iris",
"sklearn.model_selection.train_test_split",
"sklearn.decomposition.PCA",
"numpy.append",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.scatter",
"qiskit.aqua.MissingOptionalLibraryError",
"matplotlib.pyplot.title",
"sklearn.preprocessing.MinMaxScaler",
"matplotlib.pyplot.show"
] | [((908, 943), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (926, 943), False, 'from sklearn import datasets\n'), ((1011, 1071), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'target'], {'test_size': '(1)', 'random_state': '(42)'}), '(data, target, test_size=1, random_state=42)\n', (1027, 1071), False, 'from sklearn.model_selection import train_test_split\n'), ((1539, 1583), 'numpy.append', 'np.append', (['sample_train', 'sample_test'], {'axis': '(0)'}), '(sample_train, sample_test, axis=0)\n', (1548, 1583), True, 'import numpy as np\n'), ((2550, 2575), 'matplotlib.pyplot.title', 'plt.title', (['"""Iris dataset"""'], {}), "('Iris dataset')\n", (2559, 2575), True, 'import matplotlib.pyplot as plt\n'), ((2584, 2594), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2592, 2594), True, 'import matplotlib.pyplot as plt\n'), ((1155, 1171), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1169, 1171), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler\n'), ((1361, 1380), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n'}), '(n_components=n)\n', (1364, 1380), False, 'from sklearn.decomposition import PCA\n'), ((1603, 1624), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', (['(-1, 1)'], {}), '((-1, 1))\n', (1615, 1624), False, 'from sklearn.preprocessing import StandardScaler, MinMaxScaler\n'), ((2403, 2521), 'matplotlib.pyplot.scatter', 'plt.scatter', (['sample_train[label_train == k, 0][:training_size]', 'sample_train[label_train == k, 1][:training_size]'], {}), '(sample_train[label_train == k, 0][:training_size], sample_train\n [label_train == k, 1][:training_size])\n', (2414, 2521), True, 'import matplotlib.pyplot as plt\n'), ((2203, 2308), 'qiskit.aqua.MissingOptionalLibraryError', 'MissingOptionalLibraryError', ([], {'libname': '"""Matplotlib"""', 'name': '"""iris"""', 'pip_install': '"""pip install matplotlib"""'}), "(libname='Matplotlib', name='iris', pip_install=\n 'pip install matplotlib')\n", (2230, 2308), False, 'from qiskit.aqua import MissingOptionalLibraryError\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import json
import mock
import pytest
from oauthlib.oauth2 import InvalidRequestFatalError
from oauthlib.common import Request as OAuthRequest
from pyramid import httpexceptions
from h._compat import urlparse
from h.exceptions import OAuthTokenError
from h.models.auth_client import ResponseType
from h.services.auth_token import auth_token_service_factory
from h.services.oauth_provider import OAuthProviderService
from h.services.oauth_validator import DEFAULT_SCOPES
from h.services.user import user_service_factory
from h.util.datetime import utc_iso8601
from h.views import api_auth as views
@pytest.mark.usefixtures('routes', 'oauth_provider', 'user_svc')
class TestOAuthAuthorizeController(object):
@pytest.mark.usefixtures('authenticated_user')
@pytest.mark.parametrize('view_name', ['get', 'get_web_message'])
def test_get_validates_request(self, controller, pyramid_request, view_name):
view = getattr(controller, view_name)
view()
controller.oauth.validate_authorization_request.assert_called_once_with(
pyramid_request.url)
@pytest.mark.parametrize('view_name', ['get', 'get_web_message'])
def test_get_raises_for_invalid_request(self, controller, view_name):
controller.oauth.validate_authorization_request.side_effect = InvalidRequestFatalError('boom!')
with pytest.raises(InvalidRequestFatalError) as exc:
view = getattr(controller, view_name)
view()
assert exc.value.description == 'boom!'
@pytest.mark.parametrize('view_name', ['get', 'get_web_message'])
def test_get_redirects_to_login_when_not_authenticated(self, controller, pyramid_request, view_name):
with pytest.raises(httpexceptions.HTTPFound) as exc:
view = getattr(controller, view_name)
view()
parsed_url = urlparse.urlparse(exc.value.location)
assert parsed_url.path == '/login'
assert urlparse.parse_qs(parsed_url.query) == {'next': [pyramid_request.url],
'for_oauth': ['True']}
@pytest.mark.parametrize('response_mode,view_name', [
(None, 'get'),
('web_message', 'get_web_message'),
])
def test_get_returns_expected_context(self, controller, auth_client, authenticated_user, oauth_request, response_mode, view_name):
oauth_request.response_mode = response_mode
view = getattr(controller, view_name)
assert view() == {
'client_id': auth_client.id,
'client_name': auth_client.name,
'response_mode': response_mode,
'response_type': auth_client.response_type.value,
'state': 'foobar',
'username': authenticated_user.username,
}
@pytest.mark.parametrize('view_name', ['get', 'get_web_message'])
def test_get_creates_authorization_response_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request, view_name):
auth_client.trusted = True
view = getattr(controller, view_name)
view()
controller.oauth.create_authorization_response.assert_called_once_with(
pyramid_request.url,
credentials={'user': authenticated_user},
scopes=DEFAULT_SCOPES)
def test_get_returns_redirect_immediately_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request):
auth_client.trusted = True
response = controller.get()
expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)
assert response.location == expected
@pytest.mark.usefixtures('authenticated_user')
def test_get_web_message_renders_template_for_trusted_clients(self, controller, auth_client):
auth_client.trusted = True
assert controller.request.override_renderer is None
controller.get_web_message()
assert controller.request.override_renderer == 'h:templates/oauth/authorize_web_message.html.jinja2'
@pytest.mark.usefixtures('authenticated_user')
def test_get_web_message_returns_context_for_trusted_clients(self, controller, auth_client):
auth_client.trusted = True
response = controller.get_web_message()
assert response == {
'code': 'abcdef123456',
'origin': 'http://client.com',
'state': 'foobar',
}
@pytest.mark.usefixtures('authenticated_user')
def test_get_web_message_allows_empty_state_in_context_for_trusted_clients(self, controller, auth_client, oauth_provider):
auth_client.trusted = True
headers = {'Location': '{}?code=abcdef123456'.format(auth_client.redirect_uri)}
oauth_provider.create_authorization_response.return_value = (headers, None, 302)
response = controller.get_web_message()
assert response['state'] is None
@pytest.mark.parametrize('view_name', ['post', 'post_web_message'])
def test_post_creates_authorization_response(self, controller, pyramid_request, authenticated_user, view_name):
pyramid_request.url = 'http://example.com/auth?client_id=the-client-id' + \
'&response_type=code' + \
'&state=foobar' + \
'&scope=exploit'
view = getattr(controller, view_name)
view()
controller.oauth.create_authorization_response.assert_called_once_with(
pyramid_request.url,
credentials={'user': authenticated_user},
scopes=DEFAULT_SCOPES)
@pytest.mark.usefixtures('authenticated_user')
@pytest.mark.parametrize('view_name', ['post', 'post_web_message'])
def test_post_raises_for_invalid_request(self, controller, view_name):
controller.oauth.create_authorization_response.side_effect = InvalidRequestFatalError('boom!')
with pytest.raises(InvalidRequestFatalError) as exc:
view = getattr(controller, view_name)
view()
assert exc.value.description == 'boom!'
def test_post_redirects_to_client(self, controller, auth_client):
response = controller.post()
expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)
assert response.location == expected
def test_post_web_message_returns_expected_context(self, controller, auth_client):
response = controller.post_web_message()
assert response == {
'code': 'abcdef123456',
'origin': 'http://client.com',
'state': 'foobar',
}
def test_post_web_message_allows_empty_state_in_context(self, controller, auth_client, oauth_provider):
auth_client.trusted = True
headers = {'Location': '{}?code=abcdef123456'.format(auth_client.redirect_uri)}
oauth_provider.create_authorization_response.return_value = (headers, None, 302)
response = controller.post_web_message()
assert response['state'] is None
@pytest.fixture
def controller(self, pyramid_request):
pyramid_request.override_renderer = None
return views.OAuthAuthorizeController(None, pyramid_request)
@pytest.fixture
def oauth_request(self):
return OAuthRequest('/')
@pytest.fixture
def oauth_provider(self, pyramid_config, auth_client, oauth_request):
svc = mock.create_autospec(OAuthProviderService, instance=True)
scopes = ['annotation:read', 'annotation:write']
credentials = {'client_id': auth_client.id, 'state': 'foobar', 'request': oauth_request}
svc.validate_authorization_request.return_value = (scopes, credentials)
headers = {'Location': '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)}
body = None
status = 302
svc.create_authorization_response.return_value = (headers, body, status)
pyramid_config.register_service(svc, name='oauth_provider')
return svc
@pytest.fixture
def auth_client(self, factories):
return factories.AuthClient(name='Test Client',
redirect_uri='http://client.com/auth/callback',
response_type=ResponseType.code)
@pytest.fixture
def user_svc(self, pyramid_config, pyramid_request):
svc = mock.Mock(spec_set=user_service_factory(None, pyramid_request))
pyramid_config.register_service(svc, name='user')
return svc
@pytest.fixture
def pyramid_request(self, pyramid_request):
pyramid_request.url = 'http://example.com/auth?client_id=the-client-id&response_type=code&state=foobar'
return pyramid_request
@pytest.fixture
def authenticated_user(self, factories, pyramid_config, user_svc):
user = factories.User.build()
pyramid_config.testing_securitypolicy(user.userid)
def fake_fetch(userid):
if userid == user.userid:
return user
user_svc.fetch.side_effect = fake_fetch
return user
@pytest.fixture
def routes(self, pyramid_config):
pyramid_config.add_route('login', '/login')
@pytest.mark.usefixtures('oauth_provider')
class TestOAuthAccessTokenController(object):
def test_it_creates_token_response(self, pyramid_request, controller, oauth_provider):
controller.post()
oauth_provider.create_token_response.assert_called_once_with(
pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers)
def test_it_returns_correct_response_on_success(self, controller, oauth_provider):
body = json.dumps({'access_token': 'the-access-token'})
oauth_provider.create_token_response.return_value = ({}, body, 200)
assert controller.post() == {'access_token': 'the-access-token'}
def test_it_raises_when_error(self, controller, oauth_provider):
body = json.dumps({'error': 'invalid_request'})
oauth_provider.create_token_response.return_value = ({}, body, 400)
with pytest.raises(httpexceptions.HTTPBadRequest) as exc:
controller.post()
assert exc.value.body == body
@pytest.fixture
def controller(self, pyramid_request):
pyramid_request.method = 'POST'
pyramid_request.POST['grant_type'] = 'authorization_code'
pyramid_request.POST['code'] = 'the-authz-code'
pyramid_request.headers = {'X-Test-ID': '1234'}
return views.OAuthAccessTokenController(pyramid_request)
@pytest.fixture
def oauth_provider(self, pyramid_config):
svc = mock.Mock(spec_set=['create_token_response'])
svc.create_token_response.return_value = ({}, '{}', 200)
pyramid_config.register_service(svc, name='oauth_provider')
return svc
@pytest.mark.usefixtures('oauth_provider')
class TestOAuthRevocationController(object):
def test_it_creates_revocation_response(self, pyramid_request, controller, oauth_provider):
controller.post()
oauth_provider.create_revocation_response.assert_called_once_with(
pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers)
def test_it_returns_empty_response_on_success(self, controller):
response = controller.post()
assert response == {}
def test_it_raises_when_error(self, controller, oauth_provider):
body = json.dumps({'error': 'invalid_request'})
oauth_provider.create_revocation_response.return_value = ({}, body, 400)
with pytest.raises(httpexceptions.HTTPBadRequest) as exc:
controller.post()
assert exc.value.body == body
@pytest.fixture
def controller(self, pyramid_request):
pyramid_request.method = 'POST'
pyramid_request.POST['token'] = 'the-token'
pyramid_request.headers = {'X-Test-ID': '1234'}
return views.OAuthRevocationController(pyramid_request)
@pytest.fixture
def oauth_provider(self, pyramid_config):
svc = mock.Mock(spec_set=['create_revocation_response'])
svc.create_revocation_response.return_value = ({}, '{}', 200)
pyramid_config.register_service(svc, name='oauth_provider')
return svc
class TestDebugToken(object):
def test_it_raises_error_when_token_is_missing(self, pyramid_request):
pyramid_request.auth_token = None
with pytest.raises(OAuthTokenError) as exc:
views.debug_token(pyramid_request)
assert exc.value.type == 'missing_token'
assert 'Bearer token is missing' in exc.value.message
def test_it_raises_error_when_token_is_empty(self, pyramid_request):
pyramid_request.auth_token = ''
with pytest.raises(OAuthTokenError) as exc:
views.debug_token(pyramid_request)
assert exc.value.type == 'missing_token'
assert 'Bearer token is missing' in exc.value.message
def test_it_validates_token(self, pyramid_request, token_service):
pyramid_request.auth_token = 'the-access-token'
views.debug_token(pyramid_request)
token_service.validate.assert_called_once_with('the-access-token')
def test_it_raises_error_when_token_is_invalid(self, pyramid_request, token_service):
pyramid_request.auth_token = 'the-token'
token_service.validate.return_value = None
with pytest.raises(OAuthTokenError) as exc:
views.debug_token(pyramid_request)
assert exc.value.type == 'missing_token'
assert 'Bearer token does not exist or is expired' in exc.value.message
def test_returns_debug_data_for_oauth_token(self, pyramid_request, token_service, oauth_token):
pyramid_request.auth_token = oauth_token.value
token_service.fetch.return_value = oauth_token
result = views.debug_token(pyramid_request)
assert result == {'userid': oauth_token.userid,
'client': {'id': oauth_token.authclient.id,
'name': oauth_token.authclient.name},
'issued_at': utc_iso8601(oauth_token.created),
'expires_at': utc_iso8601(oauth_token.expires),
'expired': oauth_token.expired}
def test_returns_debug_data_for_developer_token(self, pyramid_request, token_service, developer_token):
pyramid_request.auth_token = developer_token.value
token_service.fetch.return_value = developer_token
result = views.debug_token(pyramid_request)
assert result == {'userid': developer_token.userid,
'issued_at': utc_iso8601(developer_token.created),
'expires_at': None,
'expired': False}
@pytest.fixture
def token_service(self, pyramid_config, pyramid_request):
svc = mock.Mock(spec_set=auth_token_service_factory(None, pyramid_request))
pyramid_config.register_service(svc, name='auth_token')
return svc
@pytest.fixture
def oauth_token(self, factories):
authclient = factories.AuthClient(name='Example Client')
expires = datetime.datetime.utcnow() + datetime.timedelta(minutes=10)
return factories.DeveloperToken(authclient=authclient, expires=expires)
@pytest.fixture
def developer_token(self, factories):
return factories.DeveloperToken()
class TestAPITokenError(object):
def test_it_sets_the_response_status_code(self, pyramid_request):
context = OAuthTokenError('the error message', 'error_type', status_code=403)
views.api_token_error(context, pyramid_request)
assert pyramid_request.response.status_code == 403
def test_it_returns_the_error(self, pyramid_request):
context = OAuthTokenError('', 'error_type')
result = views.api_token_error(context, pyramid_request)
assert result['error'] == 'error_type'
def test_it_returns_error_description(self, pyramid_request):
context = OAuthTokenError('error description', 'error_type')
result = views.api_token_error(context, pyramid_request)
assert result['error_description'] == 'error description'
def test_it_skips_description_when_missing(self, pyramid_request):
context = OAuthTokenError(None, 'invalid_request')
result = views.api_token_error(context, pyramid_request)
assert 'error_description' not in result
def test_it_skips_description_when_empty(self, pyramid_request):
context = OAuthTokenError('', 'invalid_request')
result = views.api_token_error(context, pyramid_request)
assert 'error_description' not in result
| [
"oauthlib.oauth2.InvalidRequestFatalError",
"mock.Mock",
"mock.create_autospec",
"datetime.timedelta",
"h._compat.urlparse.urlparse",
"h.views.api_auth.debug_token",
"h.services.auth_token.auth_token_service_factory",
"json.dumps",
"h._compat.urlparse.parse_qs",
"h.views.api_auth.api_token_error",
"pytest.mark.usefixtures",
"h.services.user.user_service_factory",
"pytest.raises",
"h.exceptions.OAuthTokenError",
"datetime.datetime.utcnow",
"oauthlib.common.Request",
"h.views.api_auth.OAuthAuthorizeController",
"h.views.api_auth.OAuthAccessTokenController",
"pytest.mark.parametrize",
"h.util.datetime.utc_iso8601",
"h.views.api_auth.OAuthRevocationController"
] | [((685, 748), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""routes"""', '"""oauth_provider"""', '"""user_svc"""'], {}), "('routes', 'oauth_provider', 'user_svc')\n", (708, 748), False, 'import pytest\n'), ((9321, 9362), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""oauth_provider"""'], {}), "('oauth_provider')\n", (9344, 9362), False, 'import pytest\n'), ((10973, 11014), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""oauth_provider"""'], {}), "('oauth_provider')\n", (10996, 11014), False, 'import pytest\n'), ((798, 843), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""authenticated_user"""'], {}), "('authenticated_user')\n", (821, 843), False, 'import pytest\n'), ((849, 913), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""view_name"""', "['get', 'get_web_message']"], {}), "('view_name', ['get', 'get_web_message'])\n", (872, 913), False, 'import pytest\n'), ((1178, 1242), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""view_name"""', "['get', 'get_web_message']"], {}), "('view_name', ['get', 'get_web_message'])\n", (1201, 1242), False, 'import pytest\n'), ((1607, 1671), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""view_name"""', "['get', 'get_web_message']"], {}), "('view_name', ['get', 'get_web_message'])\n", (1630, 1671), False, 'import pytest\n'), ((2181, 2289), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""response_mode,view_name"""', "[(None, 'get'), ('web_message', 'get_web_message')]"], {}), "('response_mode,view_name', [(None, 'get'), (\n 'web_message', 'get_web_message')])\n", (2204, 2289), False, 'import pytest\n'), ((2861, 2925), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""view_name"""', "['get', 'get_web_message']"], {}), "('view_name', ['get', 'get_web_message'])\n", (2884, 2925), False, 'import pytest\n'), ((3722, 3767), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""authenticated_user"""'], {}), "('authenticated_user')\n", (3745, 3767), False, 'import pytest\n'), ((4114, 4159), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""authenticated_user"""'], {}), "('authenticated_user')\n", (4137, 4159), False, 'import pytest\n'), ((4497, 4542), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""authenticated_user"""'], {}), "('authenticated_user')\n", (4520, 4542), False, 'import pytest\n'), ((4979, 5045), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""view_name"""', "['post', 'post_web_message']"], {}), "('view_name', ['post', 'post_web_message'])\n", (5002, 5045), False, 'import pytest\n'), ((5739, 5784), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""authenticated_user"""'], {}), "('authenticated_user')\n", (5762, 5784), False, 'import pytest\n'), ((5790, 5856), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""view_name"""', "['post', 'post_web_message']"], {}), "('view_name', ['post', 'post_web_message'])\n", (5813, 5856), False, 'import pytest\n'), ((1387, 1420), 'oauthlib.oauth2.InvalidRequestFatalError', 'InvalidRequestFatalError', (['"""boom!"""'], {}), "('boom!')\n", (1411, 1420), False, 'from oauthlib.oauth2 import InvalidRequestFatalError\n'), ((1930, 1967), 'h._compat.urlparse.urlparse', 'urlparse.urlparse', (['exc.value.location'], {}), '(exc.value.location)\n', (1947, 1967), False, 'from h._compat import urlparse\n'), ((6001, 6034), 'oauthlib.oauth2.InvalidRequestFatalError', 'InvalidRequestFatalError', (['"""boom!"""'], {}), "('boom!')\n", (6025, 6034), False, 'from oauthlib.oauth2 import InvalidRequestFatalError\n'), ((7285, 7338), 'h.views.api_auth.OAuthAuthorizeController', 'views.OAuthAuthorizeController', (['None', 'pyramid_request'], {}), '(None, pyramid_request)\n', (7315, 7338), True, 'from h.views import api_auth as views\n'), ((7404, 7421), 'oauthlib.common.Request', 'OAuthRequest', (['"""/"""'], {}), "('/')\n", (7416, 7421), True, 'from oauthlib.common import Request as OAuthRequest\n'), ((7531, 7588), 'mock.create_autospec', 'mock.create_autospec', (['OAuthProviderService'], {'instance': '(True)'}), '(OAuthProviderService, instance=True)\n', (7551, 7588), False, 'import mock\n'), ((9807, 9855), 'json.dumps', 'json.dumps', (["{'access_token': 'the-access-token'}"], {}), "({'access_token': 'the-access-token'})\n", (9817, 9855), False, 'import json\n'), ((10091, 10131), 'json.dumps', 'json.dumps', (["{'error': 'invalid_request'}"], {}), "({'error': 'invalid_request'})\n", (10101, 10131), False, 'import json\n'), ((10641, 10690), 'h.views.api_auth.OAuthAccessTokenController', 'views.OAuthAccessTokenController', (['pyramid_request'], {}), '(pyramid_request)\n', (10673, 10690), True, 'from h.views import api_auth as views\n'), ((10772, 10817), 'mock.Mock', 'mock.Mock', ([], {'spec_set': "['create_token_response']"}), "(spec_set=['create_token_response'])\n", (10781, 10817), False, 'import mock\n'), ((11587, 11627), 'json.dumps', 'json.dumps', (["{'error': 'invalid_request'}"], {}), "({'error': 'invalid_request'})\n", (11597, 11627), False, 'import json\n'), ((12072, 12120), 'h.views.api_auth.OAuthRevocationController', 'views.OAuthRevocationController', (['pyramid_request'], {}), '(pyramid_request)\n', (12103, 12120), True, 'from h.views import api_auth as views\n'), ((12202, 12252), 'mock.Mock', 'mock.Mock', ([], {'spec_set': "['create_revocation_response']"}), "(spec_set=['create_revocation_response'])\n", (12211, 12252), False, 'import mock\n'), ((13234, 13268), 'h.views.api_auth.debug_token', 'views.debug_token', (['pyramid_request'], {}), '(pyramid_request)\n', (13251, 13268), True, 'from h.views import api_auth as views\n'), ((13995, 14029), 'h.views.api_auth.debug_token', 'views.debug_token', (['pyramid_request'], {}), '(pyramid_request)\n', (14012, 14029), True, 'from h.views import api_auth as views\n'), ((14682, 14716), 'h.views.api_auth.debug_token', 'views.debug_token', (['pyramid_request'], {}), '(pyramid_request)\n', (14699, 14716), True, 'from h.views import api_auth as views\n'), ((15705, 15772), 'h.exceptions.OAuthTokenError', 'OAuthTokenError', (['"""the error message"""', '"""error_type"""'], {'status_code': '(403)'}), "('the error message', 'error_type', status_code=403)\n", (15720, 15772), False, 'from h.exceptions import OAuthTokenError\n'), ((15781, 15828), 'h.views.api_auth.api_token_error', 'views.api_token_error', (['context', 'pyramid_request'], {}), '(context, pyramid_request)\n', (15802, 15828), True, 'from h.views import api_auth as views\n'), ((15965, 15998), 'h.exceptions.OAuthTokenError', 'OAuthTokenError', (['""""""', '"""error_type"""'], {}), "('', 'error_type')\n", (15980, 15998), False, 'from h.exceptions import OAuthTokenError\n'), ((16016, 16063), 'h.views.api_auth.api_token_error', 'views.api_token_error', (['context', 'pyramid_request'], {}), '(context, pyramid_request)\n', (16037, 16063), True, 'from h.views import api_auth as views\n'), ((16196, 16246), 'h.exceptions.OAuthTokenError', 'OAuthTokenError', (['"""error description"""', '"""error_type"""'], {}), "('error description', 'error_type')\n", (16211, 16246), False, 'from h.exceptions import OAuthTokenError\n'), ((16264, 16311), 'h.views.api_auth.api_token_error', 'views.api_token_error', (['context', 'pyramid_request'], {}), '(context, pyramid_request)\n', (16285, 16311), True, 'from h.views import api_auth as views\n'), ((16468, 16508), 'h.exceptions.OAuthTokenError', 'OAuthTokenError', (['None', '"""invalid_request"""'], {}), "(None, 'invalid_request')\n", (16483, 16508), False, 'from h.exceptions import OAuthTokenError\n'), ((16526, 16573), 'h.views.api_auth.api_token_error', 'views.api_token_error', (['context', 'pyramid_request'], {}), '(context, pyramid_request)\n', (16547, 16573), True, 'from h.views import api_auth as views\n'), ((16711, 16749), 'h.exceptions.OAuthTokenError', 'OAuthTokenError', (['""""""', '"""invalid_request"""'], {}), "('', 'invalid_request')\n", (16726, 16749), False, 'from h.exceptions import OAuthTokenError\n'), ((16767, 16814), 'h.views.api_auth.api_token_error', 'views.api_token_error', (['context', 'pyramid_request'], {}), '(context, pyramid_request)\n', (16788, 16814), True, 'from h.views import api_auth as views\n'), ((1435, 1474), 'pytest.raises', 'pytest.raises', (['InvalidRequestFatalError'], {}), '(InvalidRequestFatalError)\n', (1448, 1474), False, 'import pytest\n'), ((1791, 1830), 'pytest.raises', 'pytest.raises', (['httpexceptions.HTTPFound'], {}), '(httpexceptions.HTTPFound)\n', (1804, 1830), False, 'import pytest\n'), ((2026, 2061), 'h._compat.urlparse.parse_qs', 'urlparse.parse_qs', (['parsed_url.query'], {}), '(parsed_url.query)\n', (2043, 2061), False, 'from h._compat import urlparse\n'), ((6049, 6088), 'pytest.raises', 'pytest.raises', (['InvalidRequestFatalError'], {}), '(InvalidRequestFatalError)\n', (6062, 6088), False, 'import pytest\n'), ((10222, 10266), 'pytest.raises', 'pytest.raises', (['httpexceptions.HTTPBadRequest'], {}), '(httpexceptions.HTTPBadRequest)\n', (10235, 10266), False, 'import pytest\n'), ((11723, 11767), 'pytest.raises', 'pytest.raises', (['httpexceptions.HTTPBadRequest'], {}), '(httpexceptions.HTTPBadRequest)\n', (11736, 11767), False, 'import pytest\n'), ((12573, 12603), 'pytest.raises', 'pytest.raises', (['OAuthTokenError'], {}), '(OAuthTokenError)\n', (12586, 12603), False, 'import pytest\n'), ((12624, 12658), 'h.views.api_auth.debug_token', 'views.debug_token', (['pyramid_request'], {}), '(pyramid_request)\n', (12641, 12658), True, 'from h.views import api_auth as views\n'), ((12899, 12929), 'pytest.raises', 'pytest.raises', (['OAuthTokenError'], {}), '(OAuthTokenError)\n', (12912, 12929), False, 'import pytest\n'), ((12950, 12984), 'h.views.api_auth.debug_token', 'views.debug_token', (['pyramid_request'], {}), '(pyramid_request)\n', (12967, 12984), True, 'from h.views import api_auth as views\n'), ((13550, 13580), 'pytest.raises', 'pytest.raises', (['OAuthTokenError'], {}), '(OAuthTokenError)\n', (13563, 13580), False, 'import pytest\n'), ((13601, 13635), 'h.views.api_auth.debug_token', 'views.debug_token', (['pyramid_request'], {}), '(pyramid_request)\n', (13618, 13635), True, 'from h.views import api_auth as views\n'), ((15337, 15363), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (15361, 15363), False, 'import datetime\n'), ((15366, 15396), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(10)'}), '(minutes=10)\n', (15384, 15396), False, 'import datetime\n'), ((8516, 8559), 'h.services.user.user_service_factory', 'user_service_factory', (['None', 'pyramid_request'], {}), '(None, pyramid_request)\n', (8536, 8559), False, 'from h.services.user import user_service_factory\n'), ((14271, 14303), 'h.util.datetime.utc_iso8601', 'utc_iso8601', (['oauth_token.created'], {}), '(oauth_token.created)\n', (14282, 14303), False, 'from h.util.datetime import utc_iso8601\n'), ((14345, 14377), 'h.util.datetime.utc_iso8601', 'utc_iso8601', (['oauth_token.expires'], {}), '(oauth_token.expires)\n', (14356, 14377), False, 'from h.util.datetime import utc_iso8601\n'), ((14817, 14853), 'h.util.datetime.utc_iso8601', 'utc_iso8601', (['developer_token.created'], {}), '(developer_token.created)\n', (14828, 14853), False, 'from h.util.datetime import utc_iso8601\n'), ((15061, 15110), 'h.services.auth_token.auth_token_service_factory', 'auth_token_service_factory', (['None', 'pyramid_request'], {}), '(None, pyramid_request)\n', (15087, 15110), False, 'from h.services.auth_token import auth_token_service_factory\n')] |
import time
from typing import Dict, List, Tuple, Optional
from utils.logger_utils import LogManager
from utils.str_utils import check_is_json
from config import LOG_LEVEL, PROCESS_STATUS_FAIL
from utils.time_utils import datetime_str_change_fmt
from utils.exception_utils import LoginException, ParseDataException
from spiders import BaseSpider, BaseSpiderParseMethodType, CookieUtils
from utils.str_utils import check_is_phone_number, check_is_email_address
logger = LogManager(__name__).get_logger_and_add_handlers(
formatter_template=5, log_level_int=LOG_LEVEL
)
class JuejinSpider(BaseSpider):
def __init__(self, task_id: str, username: str, password: str):
self._main_url = "https://juejin.im/auth/type"
self._blogs_url = "https://timeline-merger-ms.juejin.im/v1/get_entry_by_self"
self._like_blogs_url = "https://user-like-wrapper-ms.juejin.im/v1/user"
self._task_id = task_id
self._login_username = username
self._login_password = password
self._spider_name: str = f"juejin:{self._login_username}"
self._login_cookies: Optional[str] = None
self._login_token: Optional[str] = None
self._login_uid: Optional[str] = None
self._login_client_id: Optional[str] = None
self._response_data = None
self._blogs_data: List = []
self._like_blogs_data: List = []
self._like_blogs_total_page: int = 0
super().__init__()
self._login_cookies = self.get_cookies(spider_name=self._spider_name)
def _check_username(self) -> Optional[Tuple[str, Dict]]:
"""
解析用户名
:return: 结果
"""
phone_login = check_is_phone_number(data=self._login_username)
email_login = check_is_email_address(data=self._login_username)
login_data: Dict = {"password": self._login_password}
if phone_login is None and email_login is None:
raise ValueError("Your login username is illegal!")
if phone_login is not None:
login_data.update(phoneNumber=self._login_username)
return f"{self._main_url}/phoneNumber", login_data
if email_login is not None:
login_data.update(email=self._login_username)
return f"{self._main_url}/email", login_data
return None
def parse_data_with_method(self, method: str):
if method == BaseSpiderParseMethodType.LoginResult:
self._parse_login_data()
elif method == BaseSpiderParseMethodType.PersonalBlogs:
self._parse_personal_blogs()
self._parse_personal_like_blogs()
elif method == BaseSpiderParseMethodType.Finish:
self.send_data()
def login(self):
if self._login_cookies is None:
login_url, login_data = self._check_username()
response = self.make_request(
url=login_url,
headers=self._common_headers,
method="POST",
json=login_data,
)
if response.content.decode() != "":
logger.info("登录成功!")
self._response_data = response.json()
self._login_cookies = CookieUtils(
cookie_list=response.cookies.items()
).to_str()
logger.debug(self._login_cookies)
self.set_cookies(
spider_name=self._spider_name, cookies=self._login_cookies
)
self.parse_data_with_method(
method=BaseSpiderParseMethodType.LoginResult
)
else:
logger.error("登录失败!")
raise LoginException()
else:
get_result: str = self.get_data(spider_name=f"{self._spider_name}:params")
if get_result is None:
self.parse_data_with_method(
method=BaseSpiderParseMethodType.LoginResult
)
else:
try:
login_params = get_result.split("&")[1:-1]
self._login_uid = [d for d in login_params if "uid" in d][
0
].replace("uid=", "")
self._login_token = [d for d in login_params if "token" in d][
0
].replace("token=", "")
self._login_client_id = [
d for d in login_params if "device_id" in d
][0].replace("device_id=", "")
self.parse_data_with_method(
method=BaseSpiderParseMethodType.PersonalBlogs
)
except Exception as err:
logger.error(f"解析 Redis 返回数据失败! 错误原因: {err}")
self.parse_data_with_method(
method=BaseSpiderParseMethodType.LoginResult
)
def _parse_login_data(self):
# 公共参数
self._login_token = self._response_data["token"]
self._login_uid = self._response_data["userId"]
self._login_client_id = self._response_data["clientId"]
# 重要参数持久化
params: str = f"?src=web&uid={self._login_uid}" f"&token={self._login_token}" f"&device_id={self._login_client_id}" f"¤t_uid={self._login_uid}"
self.set_data(spider_name=f"{self._spider_name}:params", data=params)
# 个人数据
username = self._response_data["user"]["username"]
description = self._response_data["user"]["selfDescription"]
avatar_img = self._response_data["user"]["avatarLarge"]
followee = self._response_data["user"]["followeesCount"]
follower = self._response_data["user"]["followersCount"]
like_blogs = self._response_data["user"]["collectedEntriesCount"]
personal_data: Dict = {
"username": username,
"description": description,
"avatarImg": avatar_img,
"followee": followee,
"follower": follower,
"likeBlogs": like_blogs,
}
logger.debug(personal_data)
self.data_model.set_personal_data(data=personal_data)
self.parse_data_with_method(method=BaseSpiderParseMethodType.PersonalBlogs)
def _parse_personal_blogs(self, next_params: Optional[str] = None):
req_data: dict = {
"src": "web",
"uid": self._login_uid,
"device_id": self._login_client_id,
"token": self._login_token,
"targetUid": self._login_uid,
"type": "post",
"limit": "20",
"order": "createdAt",
}
if next_params is not None:
req_data.update(before=next_params)
url_params: str = ""
for index, data in enumerate(req_data.items()):
if index == 0:
url_params += f"?{data[0]}={data[1]}"
else:
url_params += f"&{data[0]}={data[1]}"
blogs_url: str = f"{self._blogs_url}{url_params}"
response = self.make_request(url=blogs_url, headers=self._common_headers)
if response.content.decode() != "":
self._response_data = response.json()
if self._response_data is not None and self._response_data["m"] == "ok":
next_page_variable = None
entry_list = self._response_data["d"]["entrylist"]
if len(entry_list) > 0:
for personal_blog in entry_list:
blog_create_time = datetime_str_change_fmt(
time_str=personal_blog["createdAt"],
prev_fmt="%Y-%m-%dT%H:%M:%S.%fZ",
)
blog_data: Dict = {
"blogId": personal_blog["objectId"],
"blogTitle": personal_blog["title"],
"blogHref": personal_blog["originalUrl"],
"blogViewers": personal_blog["viewsCount"],
"blogCreateTime": blog_create_time,
}
self._blogs_data.append(blog_data)
next_page_variable = personal_blog["verifyCreatedAt"]
if self._response_data["d"]["total"] > 20:
time.sleep(0.5)
self._parse_personal_blogs(next_params=next_page_variable)
else:
logger.debug(self._blogs_data)
self.data_model.set_personal_blogs_data(data=self._blogs_data)
logger.info("获取个人博客数据成功!")
else:
logger.error("查询个人博客失败!")
self.update_task_status(
task_id=self._task_id, data=str(PROCESS_STATUS_FAIL)
)
raise LoginException()
def _parse_personal_like_blogs(self, page_no: int = 0):
like_blogs_url: str = f"{self._like_blogs_url}/{self._login_uid}/like/entry?page={page_no}&pageSize=20"
self._common_headers.update(
{
"X-Juejin-Client": str(self._login_client_id),
"X-Juejin-Src": "web",
"X-Juejin-Token": self._login_token,
"X-Juejin-Uid": self._login_uid,
}
)
response = self.make_request(url=like_blogs_url, headers=self._common_headers)
if response.content.decode() != "":
self._response_data = response.json()
if (
self._response_data is not None
and self._response_data["m"] == "success"
):
logger.info(f"当前正在获取第{page_no + 1}页的数据!")
if page_no == 0:
total_count = self._response_data["d"]["total"]
total_pages = total_count // 20
rest_count = total_count % 20
if rest_count != 0:
total_pages += 1
self._like_blogs_total_page = total_pages
entry_list = self._response_data["d"]["entryList"]
if len(entry_list) > 0:
for entry_data in entry_list:
if entry_data is None:
continue
blog_data: Dict = {
"blogId": entry_data["objectId"],
"blogTitle": entry_data["title"],
"blogHref": entry_data["originalUrl"],
"blogViewers": entry_data["viewsCount"],
"blogCreateTime": datetime_str_change_fmt(
time_str=entry_data["createdAt"],
prev_fmt="%Y-%m-%dT%H:%M:%S.%fZ",
),
}
self._like_blogs_data.append(blog_data)
page_no += 1
if page_no <= self._like_blogs_total_page:
# TODO 后面考虑多线程进行任务拆分,并发获取数据
time.sleep(0.5)
self._parse_personal_like_blogs(page_no=page_no)
else:
# logger.debug(self._like_blogs_data)
logger.debug(f"获取到 {len(self._like_blogs_data)} 条个人点赞博客")
self.data_model.set_personal_like_blogs_data(
data=self._like_blogs_data
)
logger.info("获取个人点赞博客成功!")
# 任务末尾
self.parse_data_with_method(method=BaseSpiderParseMethodType.Finish)
else:
logger.error("查询个人点赞博客失败!")
self.update_task_status(
task_id=self._task_id, data=str(PROCESS_STATUS_FAIL)
)
raise ParseDataException()
def _test_cookies(self, cookies: Optional[str] = None) -> bool:
params = self.get_data(spider_name=f"{self._spider_name}:params")
if params is None:
return False
test_user_url: str = f"https://user-storage-api-ms.juejin.im/v1/getUserInfo{params}"
test_request_headers: Dict = self.get_default_headers()
test_response = self.make_request(
url=test_user_url, headers=test_request_headers
)
if (
test_response.status_code != 200
or check_is_json(test_response.content.decode()) is not True
):
logger.error(f"当前掘金账号登录状态: 已退出!")
self._async_task.remove_async_scheduler(job_id=self._spider_name)
return False
test_json_response = test_response.json()
if test_json_response["s"] == 1:
logger.info(f"当前掘金账号为: {self._login_username}, 状态: 已登录")
return True
else:
logger.error(f"当前掘金账号登录状态: 已退出!")
return False
| [
"utils.exception_utils.LoginException",
"time.sleep",
"utils.str_utils.check_is_phone_number",
"utils.logger_utils.LogManager",
"utils.str_utils.check_is_email_address",
"utils.exception_utils.ParseDataException",
"utils.time_utils.datetime_str_change_fmt"
] | [((471, 491), 'utils.logger_utils.LogManager', 'LogManager', (['__name__'], {}), '(__name__)\n', (481, 491), False, 'from utils.logger_utils import LogManager\n'), ((1681, 1729), 'utils.str_utils.check_is_phone_number', 'check_is_phone_number', ([], {'data': 'self._login_username'}), '(data=self._login_username)\n', (1702, 1729), False, 'from utils.str_utils import check_is_phone_number, check_is_email_address\n'), ((1752, 1801), 'utils.str_utils.check_is_email_address', 'check_is_email_address', ([], {'data': 'self._login_username'}), '(data=self._login_username)\n', (1774, 1801), False, 'from utils.str_utils import check_is_phone_number, check_is_email_address\n'), ((8826, 8842), 'utils.exception_utils.LoginException', 'LoginException', ([], {}), '()\n', (8840, 8842), False, 'from utils.exception_utils import LoginException, ParseDataException\n'), ((11766, 11786), 'utils.exception_utils.ParseDataException', 'ParseDataException', ([], {}), '()\n', (11784, 11786), False, 'from utils.exception_utils import LoginException, ParseDataException\n'), ((3687, 3703), 'utils.exception_utils.LoginException', 'LoginException', ([], {}), '()\n', (3701, 3703), False, 'from utils.exception_utils import LoginException, ParseDataException\n'), ((8338, 8353), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (8348, 8353), False, 'import time\n'), ((11045, 11060), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (11055, 11060), False, 'import time\n'), ((7536, 7635), 'utils.time_utils.datetime_str_change_fmt', 'datetime_str_change_fmt', ([], {'time_str': "personal_blog['createdAt']", 'prev_fmt': '"""%Y-%m-%dT%H:%M:%S.%fZ"""'}), "(time_str=personal_blog['createdAt'], prev_fmt=\n '%Y-%m-%dT%H:%M:%S.%fZ')\n", (7559, 7635), False, 'from utils.time_utils import datetime_str_change_fmt\n'), ((10610, 10706), 'utils.time_utils.datetime_str_change_fmt', 'datetime_str_change_fmt', ([], {'time_str': "entry_data['createdAt']", 'prev_fmt': '"""%Y-%m-%dT%H:%M:%S.%fZ"""'}), "(time_str=entry_data['createdAt'], prev_fmt=\n '%Y-%m-%dT%H:%M:%S.%fZ')\n", (10633, 10706), False, 'from utils.time_utils import datetime_str_change_fmt\n')] |
from paddle.vision.transforms import (
ToTensor, RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose,
HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation
)
from paddle.vision.datasets import Cifar100
from paddle.io import DataLoader
from paddle.optimizer.lr import CosineAnnealingDecay, MultiStepDecay, LinearWarmup
import random
from resnet20 import *
import paddle
# supernet trainning 基于paddleslim模型压缩包
# https://github.com/PaddlePaddle/PaddleSlim 欢迎大家多多star
from paddleslim.nas.ofa.convert_super import Convert, supernet
from paddleslim.nas.ofa import OFA, RunConfig, DistillConfig
from paddleslim.nas.ofa.utils import utils
channel_list = []
for i in range(1, 21):
if 0 < i <= 7:
# channel_list.append(random.choice([ 4, 8, 12, 16]))
channel_list.append(16)
elif 7 < i <= 13:
# channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32]))
channel_list.append(32)
elif 13 < i <= 19:
# channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56,60, 64]))
channel_list.append(64)
else:
# channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56,60, 64]))
channel_list.append(64)
net = ResNet20(100, channel_list)
net2 = ResNet20(100, channel_list)
net2.set_state_dict(paddle.load('./pretrained_model/resnet20.pdparams'))
channel_optional = []
for i in range(0, 23):
if i <= 7:
channel_optional.append([4, 8, 12, 16])
# channel_optional.append([12, 16])
elif 7 < i <= 14:
channel_optional.append([4, 8, 12, 16, 20, 24, 28, 32])
# channel_optional.append([20, 24, 28, 32])
elif 14 < i <= 21:
channel_optional.append(
[4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64])
# channel_optional.append([36, 40, 44, 48, 52, 56,60, 64])
else:
channel_optional.append(
[4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64])
# channel_optional.append([36, 40, 44, 48, 52, 56,60, 64])
distill_config = DistillConfig(teacher_model=net2)
sp_net_config = supernet(channel=channel_optional)
sp_model = Convert(sp_net_config).convert(net)
ofa_net = OFA(sp_model, distill_config=distill_config)
ofa_net.set_task('channel')
model = paddle.Model(ofa_net)
MAX_EPOCH = 300
LR = 0.1
WEIGHT_DECAY = 5e-4
MOMENTUM = 0.9
BATCH_SIZE = 128
CIFAR_MEAN = [0.5071, 0.4865, 0.4409]
CIFAR_STD = [0.1942, 0.1918, 0.1958]
DATA_FILE = './data/data76994/cifar-100-python.tar.gz'
model.prepare(
paddle.optimizer.Momentum(
learning_rate=LinearWarmup(
CosineAnnealingDecay(LR, MAX_EPOCH), 2000, 0., LR),
momentum=MOMENTUM,
parameters=model.parameters(),
weight_decay=WEIGHT_DECAY),
CrossEntropyLoss(),
paddle.metric.Accuracy(topk=(1, 5)))
transforms = Compose([
RandomCrop(32, padding=4),
RandomApply(BrightnessTransform(0.1)),
RandomApply(ContrastTransform(0.1)),
RandomHorizontalFlip(),
RandomRotation(15),
ToArray(),
Normalize(CIFAR_MEAN, CIFAR_STD),
])
val_transforms = Compose([ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD)])
train_set = Cifar100(DATA_FILE, mode='train', transform=transforms)
test_set = Cifar100(DATA_FILE, mode='test', transform=val_transforms)
callbacks = [LRSchedulerM(), callbacks.VisualDL('vis_logs/ofa_resnet20')]
model.fit(
train_set,
test_set,
epochs=MAX_EPOCH,
batch_size=BATCH_SIZE,
save_dir='checkpoints',
save_freq=100,
shuffle=True,
num_workers=4,
verbose=1,
callbacks=callbacks,
)
| [
"paddle.optimizer.lr.CosineAnnealingDecay",
"paddleslim.nas.ofa.convert_super.supernet",
"paddle.vision.datasets.Cifar100",
"paddle.Model",
"paddle.metric.Accuracy",
"paddleslim.nas.ofa.convert_super.Convert",
"paddle.vision.transforms.RandomCrop",
"paddle.vision.transforms.Normalize",
"paddle.vision.transforms.ContrastTransform",
"paddleslim.nas.ofa.OFA",
"paddle.vision.transforms.RandomRotation",
"paddle.load",
"paddle.vision.transforms.BrightnessTransform",
"paddle.vision.transforms.RandomHorizontalFlip",
"paddleslim.nas.ofa.DistillConfig"
] | [((2136, 2169), 'paddleslim.nas.ofa.DistillConfig', 'DistillConfig', ([], {'teacher_model': 'net2'}), '(teacher_model=net2)\n', (2149, 2169), False, 'from paddleslim.nas.ofa import OFA, RunConfig, DistillConfig\n'), ((2186, 2220), 'paddleslim.nas.ofa.convert_super.supernet', 'supernet', ([], {'channel': 'channel_optional'}), '(channel=channel_optional)\n', (2194, 2220), False, 'from paddleslim.nas.ofa.convert_super import Convert, supernet\n'), ((2278, 2322), 'paddleslim.nas.ofa.OFA', 'OFA', (['sp_model'], {'distill_config': 'distill_config'}), '(sp_model, distill_config=distill_config)\n', (2281, 2322), False, 'from paddleslim.nas.ofa import OFA, RunConfig, DistillConfig\n'), ((2361, 2382), 'paddle.Model', 'paddle.Model', (['ofa_net'], {}), '(ofa_net)\n', (2373, 2382), False, 'import paddle\n'), ((3237, 3292), 'paddle.vision.datasets.Cifar100', 'Cifar100', (['DATA_FILE'], {'mode': '"""train"""', 'transform': 'transforms'}), "(DATA_FILE, mode='train', transform=transforms)\n", (3245, 3292), False, 'from paddle.vision.datasets import Cifar100\n'), ((3304, 3362), 'paddle.vision.datasets.Cifar100', 'Cifar100', (['DATA_FILE'], {'mode': '"""test"""', 'transform': 'val_transforms'}), "(DATA_FILE, mode='test', transform=val_transforms)\n", (3312, 3362), False, 'from paddle.vision.datasets import Cifar100\n'), ((1389, 1440), 'paddle.load', 'paddle.load', (['"""./pretrained_model/resnet20.pdparams"""'], {}), "('./pretrained_model/resnet20.pdparams')\n", (1400, 1440), False, 'import paddle\n'), ((2868, 2903), 'paddle.metric.Accuracy', 'paddle.metric.Accuracy', ([], {'topk': '(1, 5)'}), '(topk=(1, 5))\n', (2890, 2903), False, 'import paddle\n'), ((2232, 2254), 'paddleslim.nas.ofa.convert_super.Convert', 'Convert', (['sp_net_config'], {}), '(sp_net_config)\n', (2239, 2254), False, 'from paddleslim.nas.ofa.convert_super import Convert, supernet\n'), ((2933, 2958), 'paddle.vision.transforms.RandomCrop', 'RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (2943, 2958), False, 'from paddle.vision.transforms import ToTensor, RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose, HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation\n'), ((3048, 3070), 'paddle.vision.transforms.RandomHorizontalFlip', 'RandomHorizontalFlip', ([], {}), '()\n', (3068, 3070), False, 'from paddle.vision.transforms import ToTensor, RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose, HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation\n'), ((3076, 3094), 'paddle.vision.transforms.RandomRotation', 'RandomRotation', (['(15)'], {}), '(15)\n', (3090, 3094), False, 'from paddle.vision.transforms import ToTensor, RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose, HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation\n'), ((3115, 3147), 'paddle.vision.transforms.Normalize', 'Normalize', (['CIFAR_MEAN', 'CIFAR_STD'], {}), '(CIFAR_MEAN, CIFAR_STD)\n', (3124, 3147), False, 'from paddle.vision.transforms import ToTensor, RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose, HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation\n'), ((3190, 3222), 'paddle.vision.transforms.Normalize', 'Normalize', (['CIFAR_MEAN', 'CIFAR_STD'], {}), '(CIFAR_MEAN, CIFAR_STD)\n', (3199, 3222), False, 'from paddle.vision.transforms import ToTensor, RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose, HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation\n'), ((2976, 3000), 'paddle.vision.transforms.BrightnessTransform', 'BrightnessTransform', (['(0.1)'], {}), '(0.1)\n', (2995, 3000), False, 'from paddle.vision.transforms import ToTensor, RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose, HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation\n'), ((3019, 3041), 'paddle.vision.transforms.ContrastTransform', 'ContrastTransform', (['(0.1)'], {}), '(0.1)\n', (3036, 3041), False, 'from paddle.vision.transforms import ToTensor, RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose, HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation\n'), ((2686, 2721), 'paddle.optimizer.lr.CosineAnnealingDecay', 'CosineAnnealingDecay', (['LR', 'MAX_EPOCH'], {}), '(LR, MAX_EPOCH)\n', (2706, 2721), False, 'from paddle.optimizer.lr import CosineAnnealingDecay, MultiStepDecay, LinearWarmup\n')] |
import json
import re
from string import ascii_uppercase
from time import time
from urllib.parse import urljoin
import scrapy
from more_itertools import first
from scrapy import Request
from product_spider.items import JkProduct, JKPackage
from product_spider.utils.functions import strip
class JkPrdSpider(scrapy.Spider):
name = "jk"
allowed_domains = ["jkchemical.com"]
base_url = "http://www.jkchemical.com"
start_urls = map(lambda x: "http://www.jkchemical.com/CH/products/index/ProductName/{0}.html".format(x),
ascii_uppercase)
prd_size_url = "http://www.jkchemical.com/Controls/Handler/GetPackAgeJsonp.ashx?callback=py27&value={value}&cid={cid}&type=product&_={ts}"
def parse(self, response):
for xp_url in response.xpath("//div[@class='yy toa']//a/@href"):
tmp_url = self.base_url + xp_url.extract()
yield Request(tmp_url.replace("EN", "CH"), callback=self.parse_list)
def parse_list(self, response):
xp_boxes = response.xpath("//table[@id]//div[@class='PRODUCT_box']")
for xp_box in xp_boxes:
div = xp_box.xpath(".//div[2][@class='left_right mulu_text']")
brand = strip(div.xpath('.//li[@id="ctl00_cph_Content_li_lt_Brand"]/text()').get(), '')
rel_url = div.xpath('.//a[@class="name"]/@href').get()
img_url = div.xpath('.//img/@src').get()
d = {
'brand': brand.replace('-', '') or None,
"purity": div.xpath(".//li[1]/text()").get('').split(u":")[-1].strip(),
"cas": strip(div.xpath(".//li[2]//a/text()").get()),
"cat_no": div.xpath(".//li[4]/text()").get().split(u":")[-1].strip(),
"en_name": strip(xp_box.xpath(".//a[@class='name']/text()").get()),
"cn_name": strip(xp_box.xpath(".//a[@class='name']//span[1]/text()").get()),
'prd_url': rel_url and urljoin(response.url, rel_url),
'img_url': img_url and urljoin(response.url, img_url),
}
data_jkid = xp_box.xpath(".//div[@data-jkid]/@data-jkid").get()
data_cid = xp_box.xpath(".//div[@data-cid]/@data-cid").get()
yield Request(self.prd_size_url.format(value=data_jkid, cid=data_cid, ts=int(time())),
body=u"",
meta={"prd_data": d},
callback=self.parse_package)
next_page = response.xpath('//a[contains(text(), "下一页")]/@href').get()
if next_page:
yield Request(urljoin(response.url, next_page), callback=self.parse_list)
def parse_package(self, response):
s = re.findall(r"(?<=\().+(?=\))", response.text)[0]
packages = json.loads(s)
d = response.meta.get('prd_data', {})
package = first(packages, {})
if package:
d['brand'] = d['brand'] or package.get('Product', {}).get('BrandName')
yield JkProduct(**d)
for package_obj in packages:
catalog_price = package_obj.get("CatalogPrice", {})
dd = {
'brand': d.get('brand'),
'cat_no': d.get('cat_no'),
'package': package_obj.get("stringFormat"),
'price': catalog_price and catalog_price.get('Value'),
'currency': catalog_price and strip(catalog_price.get('Currency')),
'attrs': json.dumps(package_obj),
}
yield JKPackage(**dd)
| [
"json.loads",
"product_spider.items.JKPackage",
"json.dumps",
"urllib.parse.urljoin",
"time.time",
"re.findall",
"more_itertools.first",
"product_spider.items.JkProduct"
] | [((2746, 2759), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (2756, 2759), False, 'import json\n'), ((2824, 2843), 'more_itertools.first', 'first', (['packages', '{}'], {}), '(packages, {})\n', (2829, 2843), False, 'from more_itertools import first\n'), ((2678, 2724), 're.findall', 're.findall', (['"""(?<=\\\\().+(?=\\\\))"""', 'response.text'], {}), "('(?<=\\\\().+(?=\\\\))', response.text)\n", (2688, 2724), False, 'import re\n'), ((2961, 2975), 'product_spider.items.JkProduct', 'JkProduct', ([], {}), '(**d)\n', (2970, 2975), False, 'from product_spider.items import JkProduct, JKPackage\n'), ((3420, 3443), 'json.dumps', 'json.dumps', (['package_obj'], {}), '(package_obj)\n', (3430, 3443), False, 'import json\n'), ((3477, 3492), 'product_spider.items.JKPackage', 'JKPackage', ([], {}), '(**dd)\n', (3486, 3492), False, 'from product_spider.items import JkProduct, JKPackage\n'), ((1933, 1963), 'urllib.parse.urljoin', 'urljoin', (['response.url', 'rel_url'], {}), '(response.url, rel_url)\n', (1940, 1963), False, 'from urllib.parse import urljoin\n'), ((2004, 2034), 'urllib.parse.urljoin', 'urljoin', (['response.url', 'img_url'], {}), '(response.url, img_url)\n', (2011, 2034), False, 'from urllib.parse import urljoin\n'), ((2566, 2598), 'urllib.parse.urljoin', 'urljoin', (['response.url', 'next_page'], {}), '(response.url, next_page)\n', (2573, 2598), False, 'from urllib.parse import urljoin\n'), ((2289, 2295), 'time.time', 'time', ([], {}), '()\n', (2293, 2295), False, 'from time import time\n')] |
import os
import sys
import random
import datetime
import gym
from gym import spaces
import numpy as np
from env.IDM import IDM
from env.Road import Road
from env.Vehicle import Vehicle
import math
# add sumo/tools into python environment
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
print('success')
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import traci
######################################################################
# simulation environments
class LaneChangeEnv(gym.Env):
def __init__(self, id=None, traffic=1, gui=False, seed=None):
# todo check traffic flow density
if traffic == 0:
# average 9 vehicles
self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapFree.sumo.cfg'
elif traffic == 2:
# average 19 vehicles
self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/mapDense.sumo.cfg'
else:
# average 14 vehicles
self.cfg = 'C:/Users/<NAME>/Desktop/map/ramp3/map.sumo.cfg'
# arguments must be string, if float/int, must be converted to str(float/int), instead of '3.0'
self.sumoBinary = "/usr/local/Cellar/sumo/1.2.0/bin/sumo"
self.sumoCmd = ['-c', self.cfg,
# '--lanechange.duration', str(3), # using 'Simple Continuous lane-change model'
'--lateral-resolution', str(0.8), # using 'Sublane-Model'
'--step-length', str(0.1),
'--default.action-step-length', str(0.1)]
# randomness
if seed is None:
self.sumoCmd += ['--random']
else:
self.sumoCmd += ['--seed', str(seed)]
# gui
if gui is True:
self.sumoBinary += '-gui'
self.sumoCmd = [self.sumoBinary] + self.sumoCmd + ['--quit-on-end', str(True),
'--start', str(True)]
else:
self.sumoCmd = [self.sumoBinary] + self.sumoCmd
traci.start(self.sumoCmd)
self.rd = Road()
self.timestep = 0
self.dt = traci.simulation.getDeltaT()
self.randomseed = None
self.sumoseed = None
self.veh_dict = {}
self.vehID_tuple_all = ()
self.egoID = id
self.ego = None
# self.tgtLane = tgtlane
self.is_success = False
self.collision_num = 0
self.lateral_action = 2
# self.observation = [[0, 0, 0], # ego lane position and speed
# [0, 0, 0], # leader
# [0, 0, 0], # target lane leader
# [0, 0, 0]] # target lane follower
self.observation = np.empty(20)
self.reward = None # (float) : amount of reward returned after previous action
self.done = True # (bool): whether the episode has ended, in which case further step() calls will return undefined results
self.info = {
'resetFlag': 0} # (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
self.action_space = spaces.Discrete(6)
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(20,))
def update_veh_dict(self, veh_id_tuple):
for veh_id in veh_id_tuple:
if veh_id not in self.veh_dict.keys():
self.veh_dict[veh_id] = Vehicle(veh_id, self.rd)
for veh_id in list(self.veh_dict.keys()):
if veh_id not in veh_id_tuple:
self.veh_dict.pop(veh_id)
for veh_id in list(self.veh_dict.keys()):
self.veh_dict[veh_id].update_info(self.rd, self.veh_dict)
def _updateObservationSingle(self, name, veh):
"""
:param name: 0:ego; 1:leader; 2:target leader; 3:target follower
:param id: vehicle id corresponding to name
:return:
"""
if veh is not None:
self.observation[name * 4 + 0] = veh.lanePos
self.observation[name * 4 + 1] = veh.speed
self.observation[name * 4 + 2] = veh.pos_lat
self.observation[name * 4 + 3] = veh.acce
else:
self.observation[name * 4 + 0] = self.observation[0] + 300.
self.observation[name * 4 + 1] = self.observation[1]
self.observation[name * 4 + 2] = 4.8
self.observation[name * 4 + 3] = 0
# todo check if rational
def updateObservation(self):
self.observation[0] = self.ego.lanePos
self.observation[1] = self.ego.speed
self.observation[2] = self.ego.pos_lat
self.observation[3] = self.ego.acce
self._updateObservationSingle(1, self.ego.orig_leader)
self._updateObservationSingle(2, self.ego.orig_follower)
self._updateObservationSingle(3, self.ego.trgt_leader)
self._updateObservationSingle(4, self.ego.trgt_follower)
# self.observation = np.array(self.observation).flatten()
# print(self.observation.shape)
def updateReward(self):
return -self.ego.dis2tgtLane
def updateReward2(self):
wc1 = 1
wc2 = 1
wt = 1
ws = 1
we = 1
# reward related to comfort
r_comf = wc1 * self.ego.acce ** 2 + wc2 * self.ego.delta_acce ** 2
# reward related to efficiency
r_time = - wt * self.timestep
r_speed = ws * (self.ego.speed - self.ego_speedLimit)
r_effi = we * self.ego.dis2tgtLane / self.ego.dis2entrance
r_effi_all = r_time + r_speed + r_effi
# reward related to safety
w_lateral = 1
w_longi = 1
if self.ego.leaderID is not None:
# compute longitudinal time gap
delta_V = self.veh_dict[self.ego.leaderID].speed - self.ego.speed
delta_A = self.veh_dict[self.ego.leaderID].acce - self.ego.acce
if delta_A == 0:
TTC = - abs(self.ego.leaderDis)/delta_V
else:
TTC = -delta_V - math.sqrt(delta_V**2 + 2*delta_A * self.ego.leaderDis)
TTC = TTC/delta_A
if self.lateral_action != 1 and 0 < TTC < 2:
r_long_c = - math.exp(-2*TTC+5)
else:
r_long_c = 0
if self.lateral_action == 0: #abort lane change
alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2
assert 0 <= alpha <= 1.1
r_lat_c = -math.exp(-4*alpha+5)
else:
r_lat_c = 0
if self.ego.targetLeaderID is not None:
# compute longitudinal time gap
delta_V2 = self.veh_dict[self.ego.targetLeaderID].speed - self.ego.speed
delta_A2 = self.veh_dict[self.ego.targetLeaderID].acce - self.ego.acce
delta_D2 = self.veh_dict[self.ego.targetLeaderID].lanePos - self.ego.lanePos
if delta_A2 == 0:
TTC2 = - abs(delta_D2) / delta_V2
else:
TTC2 = -delta_V2 - math.sqrt(delta_V2 ** 2 + 2 * delta_A2 * delta_D2)
TTC2 = TTC2 / delta_A2
if self.lateral_action == 1 and 0 < TTC2 < 2:
r_long_t = - math.exp(-2 * TTC2 + 5)
else:
r_long_t = 0
if self.lateral_action == 1: # lane change
alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2
assert 0 <= alpha <= 1.1
r_lat_t = -math.exp(-4*alpha+5)
else:
r_lat_t = 0
r_safe = w_lateral * (r_lat_c + r_lat_t) + w_longi * (r_long_c+ r_long_t)
#
# if self.ego.leaderID is not None:
# # ('lateralPos2leader', abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat))
# alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2
# assert 0 <= alpha <= 1.1
# r_safe_leader = w_lateral * alpha + w_longi * (1 - alpha) * abs(self.ego.leaderDis)
# else:
# r_safe_leader = 0
# if self.ego.targetLeaderID is not None:
# # print('lateralPos2tgtleader', abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat))
# alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2
# # print('alpha', alpha)
# assert 0 <= alpha <= 1.1
#
# r_safe_tgtleader = w_lateral * alpha + w_longi * (1 - alpha) * abs(
# self.ego.lanePos - self.veh_dict[self.ego.targetLeaderID].lanePos)
# else:
# r_safe_tgtleader = 0
#
#
# r_safe = r_safe_leader + r_safe_tgtleader
# total reward
r_total = r_comf + r_effi_all + r_safe
return r_total
def is_done(self):
# lane change successfully executed, episode ends, reset env
# todo modify
if self.is_success:
self.done = True
# print('reset on: successfully lane change, dis2targetlane:',
# self.ego.dis2tgtLane)
# too close to ramp entrance
if self.ego.dis2entrance < 10.0:
self.done = True
# print('reset on: too close to ramp entrance, dis2targetlane:',
# self.ego.dis2tgtLane)
# ego vehicle out of env
if self.egoID not in self.vehID_tuple_all:
self.done = True
# print('reset on: self.ego not in env:', self.egoID not in self.vehID_tuple_all)
# collision occurs
self.collision_num = traci.simulation.getCollidingVehiclesNumber()
if self.collision_num > 0:
self.done = True
# print('reset on: self.collision_num:', self.collision_num)
def preStep(self):
traci.simulationStep()
self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID)
self.update_veh_dict(self.vehID_tuple_all)
def step(self, action=2):
"""Run one timestep of the environment's dynamics. When end of
episode is reached, call `reset()` outside env!! to reset this
environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
Args:
action (object): longitudinal0: action[0] = 1: accelerate
action[0] = -1: decelerate
action[0] = 0: use SUMO default
action[0] = others: acce = 0.0
longitudinal1: action[0] = 0: follow original lane leader
action[0] = 1: follow closer leader
longitudinal2: action[0] = 0: follow original lane leader
action[0] = 1: follow target lane leader
**important**: orginal/target lane leader will not change despite the lateral position of
the ego may change
lateral: action[1] = 1: lane change
action[1] = 0: abort lane change, change back to original lane
action[1] = 2: keep in current lateral position
Returns:
described in __init__
"""
action_longi = action // 3
action_lateral = action % 3
self.lateral_action = action_lateral
# action_longi = action[0]
# action_lateral = action[1]
assert self.done is False, 'self.done is not False'
assert action is not None, 'action is None'
assert self.egoID in self.vehID_tuple_all, 'vehicle not in env'
self.timestep += 1
# lateral control-------------------------
# episode in progress; 0:change back to original line; 1:lane change to target lane; 2:keep current
# lane change to target lane
if not self.is_success:
if action_lateral == 1: # and abs(self.ego.pos_lat - (0.5+self.ego.targetLane)*self.rd.laneWidth) > 0.01:
self.is_success = self.ego.changeLane(True, self.ego.trgt_laneIndex, self.rd)
# print('posLat', self.ego.pos_lat, 'lane', self.ego.curr_laneIndex, 'rdWdith', self.rd.laneWidth)
# print('right', -(self.ego.pos_lat - 0.5*self.rd.laneWidth))
# abort lane change, change back to ego's original lane
if action_lateral == 0: # and abs(self.ego.pos_lat - (0.5+self.ego.origLane)*self.rd.laneWidth) > 0.01:
self.is_success = self.ego.changeLane(True, self.ego.orig_laneIndex, self.rd)
# print('left', 1.5 * self.rd.laneWidth - self.ego.pos_lat)
# keep current lateral position
if action_lateral == 2:
self.is_success = self.ego.changeLane(True, -1, self.rd)
# longitudinal control2---------------------
acceNext = self.ego.updateLongitudinalSpeedIDM(action_longi)
# print(acceNext)
vNext = self.ego.speed + acceNext * 0.1
traci.vehicle.setSpeed(self.egoID, vNext)
# update info------------------------------
traci.simulationStep()
self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID)
self.update_veh_dict(self.vehID_tuple_all)
# check if episode ends
self.is_done()
if self.done is True:
self.info['resetFlag'] = True
return self.observation, 0.0, self.done, self.info
else:
self.updateObservation()
self.reward = self.updateReward()
return self.observation, self.reward, self.done, self.info
def seed(self, seed=None):
if seed is None:
self.randomseed = datetime.datetime.now().microsecond
else:
self.randomseed = seed
random.seed(self.randomseed)
def reset(self, egoid, tlane=0, tfc=1, is_gui=True, sumoseed=None, randomseed=None):
"""
reset env
:param id: ego vehicle id
:param tfc: int. 0:light; 1:medium; 2:dense
:return: initial observation
"""
self.seed(randomseed)
if sumoseed is None:
self.sumoseed = self.randomseed
traci.close()
self.__init__(id=egoid, traffic=tfc, gui=is_gui, seed=self.sumoseed)
# continue step until ego appears in env
if self.egoID is not None:
while self.egoID not in self.veh_dict.keys():
# must ensure safety in preStpe
self.preStep()
if self.timestep > 5000:
raise Exception('cannot find ego after 5000 timesteps')
assert self.egoID in self.vehID_tuple_all, "cannot start training while ego is not in env"
self.done = False
self.ego = self.veh_dict[self.egoID]
self.ego.trgt_laneIndex = tlane
self.ego.is_ego = 1
# set ego vehicle speed mode
traci.vehicle.setSpeedMode(self.ego.veh_id, 0)
self.ego_speedFactor = traci.vehicle.getSpeedFactor(egoid)
self.ego_speedLimit = self.ego_speedFactor * traci.lane.getMaxSpeed(traci.vehicle.getLaneID(self.egoID))
self.ego.idm_obj = IDM()
self.ego.idm_obj.__init__(self.ego_speedLimit)
self.ego.update_info(self.rd, self.veh_dict)
self.updateObservation()
return self.observation
return
def close(self):
traci.close()
| [
"math.sqrt",
"env.IDM.IDM",
"traci.vehicle.getSpeedFactor",
"sys.exit",
"env.Vehicle.Vehicle",
"math.exp",
"sys.path.append",
"traci.simulation.getDeltaT",
"numpy.empty",
"traci.vehicle.setSpeed",
"traci.vehicle.getLaneID",
"env.Road.Road",
"traci.vehicle.setSpeedMode",
"gym.spaces.Discrete",
"traci.simulationStep",
"traci.close",
"traci.simulation.getCollidingVehiclesNumber",
"traci.edge.getLastStepVehicleIDs",
"traci.start",
"os.path.join",
"random.seed",
"gym.spaces.Box",
"datetime.datetime.now"
] | [((282, 328), 'os.path.join', 'os.path.join', (["os.environ['SUMO_HOME']", '"""tools"""'], {}), "(os.environ['SUMO_HOME'], 'tools')\n", (294, 328), False, 'import os\n'), ((333, 355), 'sys.path.append', 'sys.path.append', (['tools'], {}), '(tools)\n', (348, 355), False, 'import sys\n'), ((387, 446), 'sys.exit', 'sys.exit', (['"""please declare environment variable \'SUMO_HOME\'"""'], {}), '("please declare environment variable \'SUMO_HOME\'")\n', (395, 446), False, 'import sys\n'), ((2097, 2122), 'traci.start', 'traci.start', (['self.sumoCmd'], {}), '(self.sumoCmd)\n', (2108, 2122), False, 'import traci\n'), ((2142, 2148), 'env.Road.Road', 'Road', ([], {}), '()\n', (2146, 2148), False, 'from env.Road import Road\n'), ((2193, 2221), 'traci.simulation.getDeltaT', 'traci.simulation.getDeltaT', ([], {}), '()\n', (2219, 2221), False, 'import traci\n'), ((2800, 2812), 'numpy.empty', 'np.empty', (['(20)'], {}), '(20)\n', (2808, 2812), True, 'import numpy as np\n'), ((3213, 3231), 'gym.spaces.Discrete', 'spaces.Discrete', (['(6)'], {}), '(6)\n', (3228, 3231), False, 'from gym import spaces\n'), ((3265, 3314), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-np.inf)', 'high': 'np.inf', 'shape': '(20,)'}), '(low=-np.inf, high=np.inf, shape=(20,))\n', (3275, 3314), False, 'from gym import spaces\n'), ((9695, 9740), 'traci.simulation.getCollidingVehiclesNumber', 'traci.simulation.getCollidingVehiclesNumber', ([], {}), '()\n', (9738, 9740), False, 'import traci\n'), ((9910, 9932), 'traci.simulationStep', 'traci.simulationStep', ([], {}), '()\n', (9930, 9932), False, 'import traci\n'), ((9964, 10020), 'traci.edge.getLastStepVehicleIDs', 'traci.edge.getLastStepVehicleIDs', (['self.rd.entranceEdgeID'], {}), '(self.rd.entranceEdgeID)\n', (9996, 10020), False, 'import traci\n'), ((13256, 13297), 'traci.vehicle.setSpeed', 'traci.vehicle.setSpeed', (['self.egoID', 'vNext'], {}), '(self.egoID, vNext)\n', (13278, 13297), False, 'import traci\n'), ((13359, 13381), 'traci.simulationStep', 'traci.simulationStep', ([], {}), '()\n', (13379, 13381), False, 'import traci\n'), ((13413, 13469), 'traci.edge.getLastStepVehicleIDs', 'traci.edge.getLastStepVehicleIDs', (['self.rd.entranceEdgeID'], {}), '(self.rd.entranceEdgeID)\n', (13445, 13469), False, 'import traci\n'), ((14059, 14087), 'random.seed', 'random.seed', (['self.randomseed'], {}), '(self.randomseed)\n', (14070, 14087), False, 'import random\n'), ((14455, 14468), 'traci.close', 'traci.close', ([], {}), '()\n', (14466, 14468), False, 'import traci\n'), ((15703, 15716), 'traci.close', 'traci.close', ([], {}), '()\n', (15714, 15716), False, 'import traci\n'), ((15196, 15242), 'traci.vehicle.setSpeedMode', 'traci.vehicle.setSpeedMode', (['self.ego.veh_id', '(0)'], {}), '(self.ego.veh_id, 0)\n', (15222, 15242), False, 'import traci\n'), ((15278, 15313), 'traci.vehicle.getSpeedFactor', 'traci.vehicle.getSpeedFactor', (['egoid'], {}), '(egoid)\n', (15306, 15313), False, 'import traci\n'), ((15463, 15468), 'env.IDM.IDM', 'IDM', ([], {}), '()\n', (15466, 15468), False, 'from env.IDM import IDM\n'), ((3488, 3512), 'env.Vehicle.Vehicle', 'Vehicle', (['veh_id', 'self.rd'], {}), '(veh_id, self.rd)\n', (3495, 3512), False, 'from env.Vehicle import Vehicle\n'), ((13966, 13989), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (13987, 13989), False, 'import datetime\n'), ((6097, 6155), 'math.sqrt', 'math.sqrt', (['(delta_V ** 2 + 2 * delta_A * self.ego.leaderDis)'], {}), '(delta_V ** 2 + 2 * delta_A * self.ego.leaderDis)\n', (6106, 6155), False, 'import math\n'), ((6274, 6296), 'math.exp', 'math.exp', (['(-2 * TTC + 5)'], {}), '(-2 * TTC + 5)\n', (6282, 6296), False, 'import math\n'), ((6564, 6588), 'math.exp', 'math.exp', (['(-4 * alpha + 5)'], {}), '(-4 * alpha + 5)\n', (6572, 6588), False, 'import math\n'), ((7117, 7167), 'math.sqrt', 'math.sqrt', (['(delta_V2 ** 2 + 2 * delta_A2 * delta_D2)'], {}), '(delta_V2 ** 2 + 2 * delta_A2 * delta_D2)\n', (7126, 7167), False, 'import math\n'), ((7295, 7318), 'math.exp', 'math.exp', (['(-2 * TTC2 + 5)'], {}), '(-2 * TTC2 + 5)\n', (7303, 7318), False, 'import math\n'), ((7591, 7615), 'math.exp', 'math.exp', (['(-4 * alpha + 5)'], {}), '(-4 * alpha + 5)\n', (7599, 7615), False, 'import math\n'), ((15394, 15429), 'traci.vehicle.getLaneID', 'traci.vehicle.getLaneID', (['self.egoID'], {}), '(self.egoID)\n', (15417, 15429), False, 'import traci\n')] |
import boto3
import ipaddress
import json
import logging
import os
import requests
import uuid
logger = logging.getLogger()
logger.setLevel(logging.INFO)
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])
client = boto3.client('ssm')
def downloader(instance, latest, parameter, link):
r = requests.get(link)
cidrs = r.json()
if r.status_code == 200:
for cidr in cidrs:
try:
if len(cidr['ips']) != 0:
for ip in cidr['ips']:
sortkey = 'O365#'+instance+'#'+cidr['serviceArea']+'#'+ip
hostmask = ip.split('/')
iptype = ipaddress.ip_address(hostmask[0])
nametype = 'IPv'+str(iptype.version)+'#'
if nametype == 'IPv4#':
netrange = ipaddress.IPv4Network(ip)
first, last = netrange[0], netrange[-1]
firstip = int(ipaddress.IPv4Address(first))
lastip = int(ipaddress.IPv4Address(last))
elif nametype == 'IPv6#':
netrange = ipaddress.IPv6Network(ip)
first, last = netrange[0], netrange[-1]
firstip = int(ipaddress.IPv6Address(first))
lastip = int(ipaddress.IPv6Address(last))
table.put_item(
Item= {
'pk': nametype,
'sk': sortkey,
'service': cidr['serviceArea'],
'cidr': ip,
'created': latest,
'endpoint': instance,
'firstip': firstip,
'lastip': lastip
}
)
except:
pass
logger.info('o365 '+instance+' IP Ranges Updated')
response = client.put_parameter(
Name = parameter,
Value = str(latest),
Type = 'String',
Overwrite = True
)
def handler(event, context):
r = requests.get('https://endpoints.office.com/version?clientrequestid='+str(uuid.uuid4()))
logger.info('Link Status Code: '+str(r.status_code))
if r.status_code == 200:
versions = r.json()
logger.info(versions)
for version in versions:
if version['instance'] == 'Worldwide':
response = client.get_parameter(Name=os.environ['WORLD_PARAMETER'])
prevtoken = response['Parameter']['Value']
if prevtoken != str(version['latest']):
logger.info('Updating o365 Worldwide IP Ranges')
link = 'https://endpoints.office.com/endpoints/worldwide?clientrequestid='+str(uuid.uuid4())
downloader(version['instance'], version['latest'], os.environ['WORLD_PARAMETER'], link)
elif version['instance'] == 'USGovDoD':
response = client.get_parameter(Name=os.environ['DOD_PARAMETER'])
prevtoken = response['Parameter']['Value']
if prevtoken != str(version['latest']):
logger.info('Updating o365 USGovDoD IP Ranges')
link = 'https://endpoints.office.com/endpoints/USGOVDoD?clientrequestid='+str(uuid.uuid4())
downloader(version['instance'], version['latest'], os.environ['DOD_PARAMETER'], link)
elif version['instance'] == 'USGovGCCHigh':
response = client.get_parameter(Name=os.environ['HIGH_PARAMETER'])
prevtoken = response['Parameter']['Value']
if prevtoken != str(version['latest']):
logger.info('Updating o365 USGovGCCHigh IP Ranges')
link = 'https://endpoints.office.com/endpoints/USGOVGCCHigh?clientrequestid='+str(uuid.uuid4())
downloader(version['instance'], version['latest'], os.environ['HIGH_PARAMETER'], link)
elif version['instance'] == 'China':
response = client.get_parameter(Name=os.environ['CHINA_PARAMETER'])
prevtoken = response['Parameter']['Value']
if prevtoken != str(version['latest']):
logger.info('Updating o365 China IP Ranges')
link = 'https://endpoints.office.com/endpoints/China?clientrequestid='+str(uuid.uuid4())
downloader(version['instance'], version['latest'], os.environ['CHINA_PARAMETER'], link)
elif version['instance'] == 'Germany':
response = client.get_parameter(Name=os.environ['GERMANY_PARAMETER'])
prevtoken = response['Parameter']['Value']
if prevtoken != str(version['latest']):
logger.info('Updating o365 Germany IP Ranges')
link = 'https://endpoints.office.com/endpoints/Germany?clientrequestid='+str(uuid.uuid4())
downloader(version['instance'], version['latest'], os.environ['GERMANY_PARAMETER'], link)
else:
logger.info('No o365 IP Range Updates')
return {
'statusCode': 200,
'body': json.dumps('Download o365 IP Ranges')
}
| [
"logging.getLogger",
"boto3.client",
"ipaddress.IPv6Address",
"ipaddress.IPv4Address",
"ipaddress.IPv4Network",
"json.dumps",
"ipaddress.IPv6Network",
"requests.get",
"uuid.uuid4",
"boto3.resource",
"ipaddress.ip_address"
] | [((105, 124), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (122, 124), False, 'import logging\n'), ((167, 193), 'boto3.resource', 'boto3.resource', (['"""dynamodb"""'], {}), "('dynamodb')\n", (181, 193), False, 'import boto3\n'), ((261, 280), 'boto3.client', 'boto3.client', (['"""ssm"""'], {}), "('ssm')\n", (273, 280), False, 'import boto3\n'), ((346, 364), 'requests.get', 'requests.get', (['link'], {}), '(link)\n', (358, 364), False, 'import requests\n'), ((5422, 5459), 'json.dumps', 'json.dumps', (['"""Download o365 IP Ranges"""'], {}), "('Download o365 IP Ranges')\n", (5432, 5459), False, 'import json\n'), ((2416, 2428), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2426, 2428), False, 'import uuid\n'), ((713, 746), 'ipaddress.ip_address', 'ipaddress.ip_address', (['hostmask[0]'], {}), '(hostmask[0])\n', (733, 746), False, 'import ipaddress\n'), ((899, 924), 'ipaddress.IPv4Network', 'ipaddress.IPv4Network', (['ip'], {}), '(ip)\n', (920, 924), False, 'import ipaddress\n'), ((3031, 3043), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3041, 3043), False, 'import uuid\n'), ((1035, 1063), 'ipaddress.IPv4Address', 'ipaddress.IPv4Address', (['first'], {}), '(first)\n', (1056, 1063), False, 'import ipaddress\n'), ((1106, 1133), 'ipaddress.IPv4Address', 'ipaddress.IPv4Address', (['last'], {}), '(last)\n', (1127, 1133), False, 'import ipaddress\n'), ((1224, 1249), 'ipaddress.IPv6Network', 'ipaddress.IPv6Network', (['ip'], {}), '(ip)\n', (1245, 1249), False, 'import ipaddress\n'), ((3568, 3580), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3578, 3580), False, 'import uuid\n'), ((1360, 1388), 'ipaddress.IPv6Address', 'ipaddress.IPv6Address', (['first'], {}), '(first)\n', (1381, 1388), False, 'import ipaddress\n'), ((1431, 1458), 'ipaddress.IPv6Address', 'ipaddress.IPv6Address', (['last'], {}), '(last)\n', (1452, 1458), False, 'import ipaddress\n'), ((4116, 4128), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4126, 4128), False, 'import uuid\n'), ((4645, 4657), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4655, 4657), False, 'import uuid\n'), ((5183, 5195), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5193, 5195), False, 'import uuid\n')] |
#!/usr/bin/env python
import os
import socket
import subprocess
import argparse
import logging
LOGGER = logging.getLogger(__name__)
class ValidatorError(Exception):
pass
def ping(address):
try:
subprocess.check_call(('ping', '-c 1', '-W 1', address), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
LOGGER.info('Ping server %s - OK', address)
except subprocess.CalledProcessError as e:
LOGGER.error('Ping server %s - Failed', address)
raise ValidatorError(e)
ping.short_name = 'PING'
def port(address, port):
s = socket.socket()
try:
s.connect((address, port))
LOGGER.info('Checking port %s:%d - OK', address, port)
except socket.error as e:
LOGGER.error('Checking port %s:%d - Failed', address, port)
raise ValidatorError(e)
port.short_name = 'PORT'
| [
"logging.getLogger",
"socket.socket",
"subprocess.check_call"
] | [((106, 133), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (123, 133), False, 'import logging\n'), ((569, 584), 'socket.socket', 'socket.socket', ([], {}), '()\n', (582, 584), False, 'import socket\n'), ((216, 325), 'subprocess.check_call', 'subprocess.check_call', (["('ping', '-c 1', '-W 1', address)"], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(('ping', '-c 1', '-W 1', address), stdout=subprocess.\n PIPE, stderr=subprocess.PIPE)\n", (237, 325), False, 'import subprocess\n')] |
from PlayerState import *
from pathFinder import PathFinder
from StateLook4Resources import *
class StateGoHome(PlayerState):
""" State Implementation: has a resource and go back home """
def __init__(self, player):
self.player = player
self.player.setTarget(self.player.playerData.HouseLocation)
def doAction(self):
origin = self.player.playerData.Position
target = self.player.target
moves = PathFinder(self.player.mapView).getPath(origin, target)
# If player just gave the resource home, look 4 resources again
if(not self.player.hasResources()):
self.player.state = StateLook4Resources(self.player)
return create_purchase_action(0)
return create_move_action(moves[0])
def toString():
return "StateGoHome"
| [
"pathFinder.PathFinder"
] | [((450, 481), 'pathFinder.PathFinder', 'PathFinder', (['self.player.mapView'], {}), '(self.player.mapView)\n', (460, 481), False, 'from pathFinder import PathFinder\n')] |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for models.models."""
__author__ = [
'<EMAIL> (<NAME>)',
]
import datetime
from models import models
from tests.functional import actions
# Disable complaints about docstrings for self-documenting tests.
# pylint: disable-msg=g-missing-docstring
class EventEntityTestCase(actions.ExportTestBase):
def test_for_export_transforms_correctly(self):
event = models.EventEntity(source='source', user_id='1')
key = event.put()
exported = event.for_export(self.transform)
self.assert_blacklisted_properties_removed(event, exported)
self.assertEqual('source', event.source)
self.assertEqual('transformed_1', exported.user_id)
self.assertEqual(key, models.EventEntity.safe_key(key, self.transform))
class PersonalProfileTestCase(actions.ExportTestBase):
def test_for_export_transforms_correctly_and_sets_safe_key(self):
date_of_birth = datetime.date.today()
email = '<EMAIL>'
legal_name = 'legal_name'
nick_name = 'nick_name'
user_id = '1'
profile = models.PersonalProfile(
date_of_birth=date_of_birth, email=email, key_name=user_id,
legal_name=legal_name, nick_name=nick_name)
profile.put()
exported = profile.for_export(self.transform)
self.assert_blacklisted_properties_removed(profile, exported)
self.assertEqual(
self.transform(user_id), exported.safe_key.name())
class QuestionDAOTestCase(actions.TestBase):
"""Functional tests for QuestionDAO."""
# Name determined by parent. pylint: disable-msg=g-bad-name
def setUp(self):
"""Sets up datastore contents."""
super(QuestionDAOTestCase, self).setUp()
self.used_twice_question_id = 1
self.used_twice_question_dto = models.QuestionDTO(
self.used_twice_question_id, {})
self.used_once_question_id = 2
self.used_once_question_dto = models.QuestionDTO(
self.used_once_question_id, {})
self.unused_question_id = 3
self.unused_question_dto = models.QuestionDTO(
self.unused_question_id, {})
models.QuestionDAO.save_all([
self.used_twice_question_dto, self.used_once_question_dto,
self.unused_question_dto])
# Handcoding the dicts. This is dangerous because they're handcoded
# elsewhere, the implementations could fall out of sync, and these tests
# may then pass erroneously.
self.first_question_group_description = 'first_question_group'
self.first_question_group_id = 4
self.first_question_group_dto = models.QuestionGroupDTO(
self.first_question_group_id,
{'description': self.first_question_group_description,
'items': [{'question': str(self.used_once_question_id)}]})
self.second_question_group_description = 'second_question_group'
self.second_question_group_id = 5
self.second_question_group_dto = models.QuestionGroupDTO(
self.second_question_group_id,
{'description': self.second_question_group_description,
'items': [{'question': str(self.used_twice_question_id)}]})
self.third_question_group_description = 'third_question_group'
self.third_question_group_id = 6
self.third_question_group_dto = models.QuestionGroupDTO(
self.third_question_group_id,
{'description': self.third_question_group_description,
'items': [{'question': str(self.used_twice_question_id)}]})
models.QuestionGroupDAO.save_all([
self.first_question_group_dto, self.second_question_group_dto,
self.third_question_group_dto])
def test_used_by_returns_description_of_single_question_group(self):
self.assertEqual(
[self.first_question_group_description],
models.QuestionDAO.used_by(self.used_once_question_id))
def test_used_by_returns_descriptions_of_multiple_question_groups(self):
self.assertEqual(
[self.second_question_group_description,
self.third_question_group_description],
models.QuestionDAO.used_by(self.used_twice_question_id))
def test_used_by_returns_empty_list_for_unused_question(self):
not_found_id = 7
self.assertFalse(models.QuestionDAO.load(not_found_id))
self.assertEqual([], models.QuestionDAO.used_by(not_found_id))
class StudentTestCase(actions.ExportTestBase):
def test_for_export_transforms_correctly(self):
user_id = '1'
student = models.Student(key_name='name', user_id='1', is_enrolled=True)
key = student.put()
exported = student.for_export(self.transform)
self.assert_blacklisted_properties_removed(student, exported)
self.assertTrue(exported.is_enrolled)
self.assertEqual('transformed_1', exported.user_id)
self.assertEqual(
'transformed_' + user_id, exported.key_by_user_id.name())
self.assertEqual(
models.Student.safe_key(key, self.transform), exported.safe_key)
def test_get_key_does_not_transform_by_default(self):
user_id = 'user_id'
student = models.Student(key_name='name', user_id=user_id)
student.put()
self.assertEqual(user_id, student.get_key().name())
def test_safe_key_transforms_name(self):
key = models.Student(key_name='name').put()
self.assertEqual(
'transformed_name',
models.Student.safe_key(key, self.transform).name())
class StudentAnswersEntityTestCase(actions.ExportTestBase):
def test_safe_key_transforms_name(self):
student_key = models.Student(key_name='name').put()
answers = models.StudentAnswersEntity(key_name=student_key.name())
answers_key = answers.put()
self.assertEqual(
'transformed_name',
models.StudentAnswersEntity.safe_key(
answers_key, self.transform).name())
class StudentPropertyEntityTestCase(actions.ExportTestBase):
def test_safe_key_transforms_user_id_component(self):
user_id = 'user_id'
student = models.Student(key_name='<EMAIL>', user_id=user_id)
student.put()
property_name = 'property-name'
student_property_key = models.StudentPropertyEntity.create(
student, property_name).put()
self.assertEqual(
'transformed_%s-%s' % (user_id, property_name),
models.StudentPropertyEntity.safe_key(
student_property_key, self.transform).name())
| [
"models.models.QuestionDTO",
"models.models.QuestionDAO.save_all",
"models.models.StudentPropertyEntity.safe_key",
"models.models.QuestionDAO.load",
"models.models.StudentAnswersEntity.safe_key",
"models.models.Student",
"models.models.QuestionDAO.used_by",
"models.models.Student.safe_key",
"models.models.EventEntity",
"models.models.EventEntity.safe_key",
"models.models.QuestionGroupDAO.save_all",
"datetime.date.today",
"models.models.StudentPropertyEntity.create",
"models.models.PersonalProfile"
] | [((994, 1042), 'models.models.EventEntity', 'models.EventEntity', ([], {'source': '"""source"""', 'user_id': '"""1"""'}), "(source='source', user_id='1')\n", (1012, 1042), False, 'from models import models\n'), ((1531, 1552), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1550, 1552), False, 'import datetime\n'), ((1685, 1816), 'models.models.PersonalProfile', 'models.PersonalProfile', ([], {'date_of_birth': 'date_of_birth', 'email': 'email', 'key_name': 'user_id', 'legal_name': 'legal_name', 'nick_name': 'nick_name'}), '(date_of_birth=date_of_birth, email=email, key_name=\n user_id, legal_name=legal_name, nick_name=nick_name)\n', (1707, 1816), False, 'from models import models\n'), ((2420, 2471), 'models.models.QuestionDTO', 'models.QuestionDTO', (['self.used_twice_question_id', '{}'], {}), '(self.used_twice_question_id, {})\n', (2438, 2471), False, 'from models import models\n'), ((2563, 2613), 'models.models.QuestionDTO', 'models.QuestionDTO', (['self.used_once_question_id', '{}'], {}), '(self.used_once_question_id, {})\n', (2581, 2613), False, 'from models import models\n'), ((2699, 2746), 'models.models.QuestionDTO', 'models.QuestionDTO', (['self.unused_question_id', '{}'], {}), '(self.unused_question_id, {})\n', (2717, 2746), False, 'from models import models\n'), ((2768, 2887), 'models.models.QuestionDAO.save_all', 'models.QuestionDAO.save_all', (['[self.used_twice_question_dto, self.used_once_question_dto, self.\n unused_question_dto]'], {}), '([self.used_twice_question_dto, self.\n used_once_question_dto, self.unused_question_dto])\n', (2795, 2887), False, 'from models import models\n'), ((4196, 4329), 'models.models.QuestionGroupDAO.save_all', 'models.QuestionGroupDAO.save_all', (['[self.first_question_group_dto, self.second_question_group_dto, self.\n third_question_group_dto]'], {}), '([self.first_question_group_dto, self.\n second_question_group_dto, self.third_question_group_dto])\n', (4228, 4329), False, 'from models import models\n'), ((5220, 5282), 'models.models.Student', 'models.Student', ([], {'key_name': '"""name"""', 'user_id': '"""1"""', 'is_enrolled': '(True)'}), "(key_name='name', user_id='1', is_enrolled=True)\n", (5234, 5282), False, 'from models import models\n'), ((5846, 5894), 'models.models.Student', 'models.Student', ([], {'key_name': '"""name"""', 'user_id': 'user_id'}), "(key_name='name', user_id=user_id)\n", (5860, 5894), False, 'from models import models\n'), ((6806, 6857), 'models.models.Student', 'models.Student', ([], {'key_name': '"""<EMAIL>"""', 'user_id': 'user_id'}), "(key_name='<EMAIL>', user_id=user_id)\n", (6820, 6857), False, 'from models import models\n'), ((1329, 1377), 'models.models.EventEntity.safe_key', 'models.EventEntity.safe_key', (['key', 'self.transform'], {}), '(key, self.transform)\n', (1356, 1377), False, 'from models import models\n'), ((4515, 4569), 'models.models.QuestionDAO.used_by', 'models.QuestionDAO.used_by', (['self.used_once_question_id'], {}), '(self.used_once_question_id)\n', (4541, 4569), False, 'from models import models\n'), ((4793, 4848), 'models.models.QuestionDAO.used_by', 'models.QuestionDAO.used_by', (['self.used_twice_question_id'], {}), '(self.used_twice_question_id)\n', (4819, 4848), False, 'from models import models\n'), ((4968, 5005), 'models.models.QuestionDAO.load', 'models.QuestionDAO.load', (['not_found_id'], {}), '(not_found_id)\n', (4991, 5005), False, 'from models import models\n'), ((5036, 5076), 'models.models.QuestionDAO.used_by', 'models.QuestionDAO.used_by', (['not_found_id'], {}), '(not_found_id)\n', (5062, 5076), False, 'from models import models\n'), ((5676, 5720), 'models.models.Student.safe_key', 'models.Student.safe_key', (['key', 'self.transform'], {}), '(key, self.transform)\n', (5699, 5720), False, 'from models import models\n'), ((6037, 6068), 'models.models.Student', 'models.Student', ([], {'key_name': '"""name"""'}), "(key_name='name')\n", (6051, 6068), False, 'from models import models\n'), ((6328, 6359), 'models.models.Student', 'models.Student', ([], {'key_name': '"""name"""'}), "(key_name='name')\n", (6342, 6359), False, 'from models import models\n'), ((6951, 7010), 'models.models.StudentPropertyEntity.create', 'models.StudentPropertyEntity.create', (['student', 'property_name'], {}), '(student, property_name)\n', (6986, 7010), False, 'from models import models\n'), ((6145, 6189), 'models.models.Student.safe_key', 'models.Student.safe_key', (['key', 'self.transform'], {}), '(key, self.transform)\n', (6168, 6189), False, 'from models import models\n'), ((6547, 6612), 'models.models.StudentAnswersEntity.safe_key', 'models.StudentAnswersEntity.safe_key', (['answers_key', 'self.transform'], {}), '(answers_key, self.transform)\n', (6583, 6612), False, 'from models import models\n'), ((7128, 7203), 'models.models.StudentPropertyEntity.safe_key', 'models.StudentPropertyEntity.safe_key', (['student_property_key', 'self.transform'], {}), '(student_property_key, self.transform)\n', (7165, 7203), False, 'from models import models\n')] |
# -*- coding: utf-8 -*-
import io
import math
import warnings
from typing import Optional, Tuple
import torch
from torch import Tensor
from torchaudio._internal import module_utils as _mod_utils
import torchaudio
__all__ = [
"spectrogram",
"griffinlim",
"amplitude_to_DB",
"DB_to_amplitude",
"compute_deltas",
"compute_kaldi_pitch",
"create_fb_matrix",
"create_dct",
"compute_deltas",
"detect_pitch_frequency",
"DB_to_amplitude",
"mu_law_encoding",
"mu_law_decoding",
"complex_norm",
"angle",
"magphase",
"phase_vocoder",
'mask_along_axis',
'mask_along_axis_iid',
'sliding_window_cmn',
"spectral_centroid",
"apply_codec",
]
def spectrogram(
waveform: Tensor,
pad: int,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
power: Optional[float],
normalized: bool,
center: bool = True,
pad_mode: str = "reflect",
onesided: bool = True
) -> Tensor:
r"""Create a spectrogram or a batch of spectrograms from a raw audio signal.
The spectrogram can be either magnitude-only or complex.
Args:
waveform (Tensor): Tensor of audio of dimension (..., time)
pad (int): Two sided padding of signal
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT
hop_length (int): Length of hop between STFT windows
win_length (int): Window size
power (float or None): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
If None, then the complex spectrum is returned instead.
normalized (bool): Whether to normalize by magnitude after stft
center (bool, optional): whether to pad :attr:`waveform` on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
Default: ``True``
pad_mode (string, optional): controls the padding method used when
:attr:`center` is ``True``. Default: ``"reflect"``
onesided (bool, optional): controls whether to return half of results to
avoid redundancy. Default: ``True``
Returns:
Tensor: Dimension (..., freq, time), freq is
``n_fft // 2 + 1`` and ``n_fft`` is the number of
Fourier bins, and time is the number of window hops (n_frame).
"""
if pad > 0:
# TODO add "with torch.no_grad():" back when JIT supports it
waveform = torch.nn.functional.pad(waveform, (pad, pad), "constant")
# pack batch
shape = waveform.size()
waveform = waveform.reshape(-1, shape[-1])
# default values are consistent with librosa.core.spectrum._spectrogram
spec_f = torch.stft(
input=waveform,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
pad_mode=pad_mode,
normalized=False,
onesided=onesided,
return_complex=True,
)
# unpack batch
spec_f = spec_f.reshape(shape[:-1] + spec_f.shape[-2:])
if normalized:
spec_f /= window.pow(2.).sum().sqrt()
if power is not None:
if power == 1.0:
return spec_f.abs()
return spec_f.abs().pow(power)
return torch.view_as_real(spec_f)
def griffinlim(
specgram: Tensor,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
power: float,
normalized: bool,
n_iter: int,
momentum: float,
length: Optional[int],
rand_init: bool
) -> Tensor:
r"""Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation.
Implementation ported from `librosa`.
* [1] McFee, Brian, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
"librosa: Audio and music signal analysis in python."
In Proceedings of the 14th python in science conference, pp. 18-25. 2015.
* [2] <NAME>., <NAME>., & <NAME>.
"A fast Griffin-Lim algorithm,"
IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (pp. 1-4),
Oct. 2013.
* [3] <NAME> and <NAME>,
"Signal estimation from modified short-time Fourier transform,"
IEEE Trans. ASSP, vol.32, no.2, pp.236–243, Apr. 1984.
Args:
specgram (Tensor): A magnitude-only STFT spectrogram of dimension (..., freq, frames)
where freq is ``n_fft // 2 + 1``.
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT, creates ``n_fft // 2 + 1`` bins
hop_length (int): Length of hop between STFT windows. (
Default: ``win_length // 2``)
win_length (int): Window size. (Default: ``n_fft``)
power (float): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
normalized (bool): Whether to normalize by magnitude after stft.
n_iter (int): Number of iteration for phase recovery process.
momentum (float): The momentum parameter for fast Griffin-Lim.
Setting this to 0 recovers the original Griffin-Lim method.
Values near 1 can lead to faster convergence, but above 1 may not converge.
length (int or None): Array length of the expected output.
rand_init (bool): Initializes phase randomly if True, to zero otherwise.
Returns:
torch.Tensor: waveform of (..., time), where time equals the ``length`` parameter if given.
"""
assert momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum)
assert momentum >= 0, 'momentum={} < 0'.format(momentum)
if normalized:
warnings.warn(
"The argument normalized is not used in Griffin-Lim, "
"and will be removed in v0.9.0 release. To suppress this warning, "
"please use `normalized=False`.")
# pack batch
shape = specgram.size()
specgram = specgram.reshape([-1] + list(shape[-2:]))
specgram = specgram.pow(1 / power)
# randomly initialize the phase
batch, freq, frames = specgram.size()
if rand_init:
angles = 2 * math.pi * torch.rand(batch, freq, frames)
else:
angles = torch.zeros(batch, freq, frames)
angles = torch.stack([angles.cos(), angles.sin()], dim=-1) \
.to(dtype=specgram.dtype, device=specgram.device)
specgram = specgram.unsqueeze(-1).expand_as(angles)
# And initialize the previous iterate to 0
rebuilt = torch.tensor(0.)
for _ in range(n_iter):
# Store the previous iterate
tprev = rebuilt
# Invert with our current estimate of the phases
inverse = torch.istft(specgram * angles,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
length=length).float()
# Rebuild the spectrogram
rebuilt = torch.view_as_real(
torch.stft(
input=inverse,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=True,
pad_mode='reflect',
normalized=False,
onesided=True,
return_complex=True,
)
)
# Update our phase estimates
angles = rebuilt
if momentum:
angles = angles - tprev.mul_(momentum / (1 + momentum))
angles = angles.div(complex_norm(angles).add(1e-16).unsqueeze(-1).expand_as(angles))
# Return the final phase estimates
waveform = torch.istft(specgram * angles,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
length=length)
# unpack batch
waveform = waveform.reshape(shape[:-2] + waveform.shape[-1:])
return waveform
def amplitude_to_DB(
x: Tensor,
multiplier: float,
amin: float,
db_multiplier: float,
top_db: Optional[float] = None
) -> Tensor:
r"""Turn a spectrogram from the power/amplitude scale to the decibel scale.
The output of each tensor in a batch depends on the maximum value of that tensor,
and so may return different values for an audio clip split into snippets vs. a full clip.
Args:
x (Tensor): Input spectrogram(s) before being converted to decibel scale. Input should take
the form `(..., freq, time)`. Batched inputs should include a channel dimension and
have the form `(batch, channel, freq, time)`.
multiplier (float): Use 10. for power and 20. for amplitude
amin (float): Number to clamp ``x``
db_multiplier (float): Log10(max(reference value and amin))
top_db (float or None, optional): Minimum negative cut-off in decibels. A reasonable number
is 80. (Default: ``None``)
Returns:
Tensor: Output tensor in decibel scale
"""
x_db = multiplier * torch.log10(torch.clamp(x, min=amin))
x_db -= multiplier * db_multiplier
if top_db is not None:
# Expand batch
shape = x_db.size()
packed_channels = shape[-3] if x_db.dim() > 2 else 1
x_db = x_db.reshape(-1, packed_channels, shape[-2], shape[-1])
x_db = torch.max(x_db, (x_db.amax(dim=(-3, -2, -1)) - top_db).view(-1, 1, 1, 1))
# Repack batch
x_db = x_db.reshape(shape)
return x_db
def DB_to_amplitude(
x: Tensor,
ref: float,
power: float
) -> Tensor:
r"""Turn a tensor from the decibel scale to the power/amplitude scale.
Args:
x (Tensor): Input tensor before being converted to power/amplitude scale.
ref (float): Reference which the output will be scaled by.
power (float): If power equals 1, will compute DB to power. If 0.5, will compute DB to amplitude.
Returns:
Tensor: Output tensor in power/amplitude scale.
"""
return ref * torch.pow(torch.pow(10.0, 0.1 * x), power)
def _hz_to_mel(freq: float, mel_scale: str = "htk") -> float:
r"""Convert Hz to Mels.
Args:
freqs (float): Frequencies in Hz
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Returns:
mels (float): Frequency in Mels
"""
if mel_scale not in ['slaney', 'htk']:
raise ValueError('mel_scale should be one of "htk" or "slaney".')
if mel_scale == "htk":
return 2595.0 * math.log10(1.0 + (freq / 700.0))
# Fill in the linear part
f_min = 0.0
f_sp = 200.0 / 3
mels = (freq - f_min) / f_sp
# Fill in the log-scale part
min_log_hz = 1000.0
min_log_mel = (min_log_hz - f_min) / f_sp
logstep = math.log(6.4) / 27.0
if freq >= min_log_hz:
mels = min_log_mel + math.log(freq / min_log_hz) / logstep
return mels
def _mel_to_hz(mels: Tensor, mel_scale: str = "htk") -> Tensor:
"""Convert mel bin numbers to frequencies.
Args:
mels (Tensor): Mel frequencies
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Returns:
freqs (Tensor): Mels converted in Hz
"""
if mel_scale not in ['slaney', 'htk']:
raise ValueError('mel_scale should be one of "htk" or "slaney".')
if mel_scale == "htk":
return 700.0 * (10.0**(mels / 2595.0) - 1.0)
# Fill in the linear scale
f_min = 0.0
f_sp = 200.0 / 3
freqs = f_min + f_sp * mels
# And now the nonlinear scale
min_log_hz = 1000.0
min_log_mel = (min_log_hz - f_min) / f_sp
logstep = math.log(6.4) / 27.0
log_t = (mels >= min_log_mel)
freqs[log_t] = min_log_hz * torch.exp(logstep * (mels[log_t] - min_log_mel))
return freqs
def create_fb_matrix(
n_freqs: int,
f_min: float,
f_max: float,
n_mels: int,
sample_rate: int,
norm: Optional[str] = None,
mel_scale: str = "htk",
) -> Tensor:
r"""Create a frequency bin conversion matrix.
Args:
n_freqs (int): Number of frequencies to highlight/apply
f_min (float): Minimum frequency (Hz)
f_max (float): Maximum frequency (Hz)
n_mels (int): Number of mel filterbanks
sample_rate (int): Sample rate of the audio waveform
norm (Optional[str]): If 'slaney', divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Returns:
Tensor: Triangular filter banks (fb matrix) of size (``n_freqs``, ``n_mels``)
meaning number of frequencies to highlight/apply to x the number of filterbanks.
Each column is a filterbank so that assuming there is a matrix A of
size (..., ``n_freqs``), the applied result would be
``A * create_fb_matrix(A.size(-1), ...)``.
"""
if norm is not None and norm != "slaney":
raise ValueError("norm must be one of None or 'slaney'")
# freq bins
# Equivalent filterbank construction by Librosa
all_freqs = torch.linspace(0, sample_rate // 2, n_freqs)
# calculate mel freq bins
m_min = _hz_to_mel(f_min, mel_scale=mel_scale)
m_max = _hz_to_mel(f_max, mel_scale=mel_scale)
m_pts = torch.linspace(m_min, m_max, n_mels + 2)
f_pts = _mel_to_hz(m_pts, mel_scale=mel_scale)
# calculate the difference between each mel point and each stft freq point in hertz
f_diff = f_pts[1:] - f_pts[:-1] # (n_mels + 1)
slopes = f_pts.unsqueeze(0) - all_freqs.unsqueeze(1) # (n_freqs, n_mels + 2)
# create overlapping triangles
zero = torch.zeros(1)
down_slopes = (-1.0 * slopes[:, :-2]) / f_diff[:-1] # (n_freqs, n_mels)
up_slopes = slopes[:, 2:] / f_diff[1:] # (n_freqs, n_mels)
fb = torch.max(zero, torch.min(down_slopes, up_slopes))
if norm is not None and norm == "slaney":
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (f_pts[2:n_mels + 2] - f_pts[:n_mels])
fb *= enorm.unsqueeze(0)
if (fb.max(dim=0).values == 0.).any():
warnings.warn(
"At least one mel filterbank has all zero values. "
f"The value for `n_mels` ({n_mels}) may be set too high. "
f"Or, the value for `n_freqs` ({n_freqs}) may be set too low."
)
return fb
def create_dct(
n_mfcc: int,
n_mels: int,
norm: Optional[str]
) -> Tensor:
r"""Create a DCT transformation matrix with shape (``n_mels``, ``n_mfcc``),
normalized depending on norm.
Args:
n_mfcc (int): Number of mfc coefficients to retain
n_mels (int): Number of mel filterbanks
norm (str or None): Norm to use (either 'ortho' or None)
Returns:
Tensor: The transformation matrix, to be right-multiplied to
row-wise data of size (``n_mels``, ``n_mfcc``).
"""
# http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II
n = torch.arange(float(n_mels))
k = torch.arange(float(n_mfcc)).unsqueeze(1)
dct = torch.cos(math.pi / float(n_mels) * (n + 0.5) * k) # size (n_mfcc, n_mels)
if norm is None:
dct *= 2.0
else:
assert norm == "ortho"
dct[0] *= 1.0 / math.sqrt(2.0)
dct *= math.sqrt(2.0 / float(n_mels))
return dct.t()
def mu_law_encoding(
x: Tensor,
quantization_channels: int
) -> Tensor:
r"""Encode signal based on mu-law companding. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This algorithm assumes the signal has been scaled to between -1 and 1 and
returns a signal encoded with values from 0 to quantization_channels - 1.
Args:
x (Tensor): Input tensor
quantization_channels (int): Number of channels
Returns:
Tensor: Input after mu-law encoding
"""
mu = quantization_channels - 1.0
if not x.is_floating_point():
x = x.to(torch.float)
mu = torch.tensor(mu, dtype=x.dtype)
x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu)
x_mu = ((x_mu + 1) / 2 * mu + 0.5).to(torch.int64)
return x_mu
def mu_law_decoding(
x_mu: Tensor,
quantization_channels: int
) -> Tensor:
r"""Decode mu-law encoded signal. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This expects an input with values between 0 and quantization_channels - 1
and returns a signal scaled between -1 and 1.
Args:
x_mu (Tensor): Input tensor
quantization_channels (int): Number of channels
Returns:
Tensor: Input after mu-law decoding
"""
mu = quantization_channels - 1.0
if not x_mu.is_floating_point():
x_mu = x_mu.to(torch.float)
mu = torch.tensor(mu, dtype=x_mu.dtype)
x = ((x_mu) / mu) * 2 - 1.0
x = torch.sign(x) * (torch.exp(torch.abs(x) * torch.log1p(mu)) - 1.0) / mu
return x
def complex_norm(
complex_tensor: Tensor,
power: float = 1.0
) -> Tensor:
r"""Compute the norm of complex tensor input.
Args:
complex_tensor (Tensor): Tensor shape of `(..., complex=2)`
power (float): Power of the norm. (Default: `1.0`).
Returns:
Tensor: Power of the normed input tensor. Shape of `(..., )`
"""
# Replace by torch.norm once issue is fixed
# https://github.com/pytorch/pytorch/issues/34279
return complex_tensor.pow(2.).sum(-1).pow(0.5 * power)
def angle(
complex_tensor: Tensor
) -> Tensor:
r"""Compute the angle of complex tensor input.
Args:
complex_tensor (Tensor): Tensor shape of `(..., complex=2)`
Return:
Tensor: Angle of a complex tensor. Shape of `(..., )`
"""
return torch.atan2(complex_tensor[..., 1], complex_tensor[..., 0])
def magphase(
complex_tensor: Tensor,
power: float = 1.0
) -> Tuple[Tensor, Tensor]:
r"""Separate a complex-valued spectrogram with shape `(..., 2)` into its magnitude and phase.
Args:
complex_tensor (Tensor): Tensor shape of `(..., complex=2)`
power (float): Power of the norm. (Default: `1.0`)
Returns:
(Tensor, Tensor): The magnitude and phase of the complex tensor
"""
mag = complex_norm(complex_tensor, power)
phase = angle(complex_tensor)
return mag, phase
def phase_vocoder(
complex_specgrams: Tensor,
rate: float,
phase_advance: Tensor
) -> Tensor:
r"""Given a STFT tensor, speed up in time without modifying pitch by a
factor of ``rate``.
Args:
complex_specgrams (Tensor): Dimension of `(..., freq, time, complex=2)`
rate (float): Speed-up factor
phase_advance (Tensor): Expected phase advance in each bin. Dimension of (freq, 1)
Returns:
Tensor: Complex Specgrams Stretch with dimension of `(..., freq, ceil(time/rate), complex=2)`
Example
>>> freq, hop_length = 1025, 512
>>> # (channel, freq, time, complex=2)
>>> complex_specgrams = torch.randn(2, freq, 300, 2)
>>> rate = 1.3 # Speed up by 30%
>>> phase_advance = torch.linspace(
>>> 0, math.pi * hop_length, freq)[..., None]
>>> x = phase_vocoder(complex_specgrams, rate, phase_advance)
>>> x.shape # with 231 == ceil(300 / 1.3)
torch.Size([2, 1025, 231, 2])
"""
# pack batch
shape = complex_specgrams.size()
complex_specgrams = complex_specgrams.reshape([-1] + list(shape[-3:]))
time_steps = torch.arange(0,
complex_specgrams.size(-2),
rate,
device=complex_specgrams.device,
dtype=complex_specgrams.dtype)
alphas = time_steps % 1.0
phase_0 = angle(complex_specgrams[..., :1, :])
# Time Padding
complex_specgrams = torch.nn.functional.pad(complex_specgrams, [0, 0, 0, 2])
# (new_bins, freq, 2)
complex_specgrams_0 = complex_specgrams.index_select(-2, time_steps.long())
complex_specgrams_1 = complex_specgrams.index_select(-2, (time_steps + 1).long())
angle_0 = angle(complex_specgrams_0)
angle_1 = angle(complex_specgrams_1)
norm_0 = torch.norm(complex_specgrams_0, p=2, dim=-1)
norm_1 = torch.norm(complex_specgrams_1, p=2, dim=-1)
phase = angle_1 - angle_0 - phase_advance
phase = phase - 2 * math.pi * torch.round(phase / (2 * math.pi))
# Compute Phase Accum
phase = phase + phase_advance
phase = torch.cat([phase_0, phase[..., :-1]], dim=-1)
phase_acc = torch.cumsum(phase, -1)
mag = alphas * norm_1 + (1 - alphas) * norm_0
real_stretch = mag * torch.cos(phase_acc)
imag_stretch = mag * torch.sin(phase_acc)
complex_specgrams_stretch = torch.stack([real_stretch, imag_stretch], dim=-1)
# unpack batch
complex_specgrams_stretch = complex_specgrams_stretch.reshape(shape[:-3] + complex_specgrams_stretch.shape[1:])
return complex_specgrams_stretch
def mask_along_axis_iid(
specgrams: Tensor,
mask_param: int,
mask_value: float,
axis: int
) -> Tensor:
r"""
Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
Args:
specgrams (Tensor): Real spectrograms (batch, channel, freq, time)
mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
mask_value (float): Value to assign to the masked columns
axis (int): Axis to apply masking on (2 -> frequency, 3 -> time)
Returns:
Tensor: Masked spectrograms of dimensions (batch, channel, freq, time)
"""
if axis != 2 and axis != 3:
raise ValueError('Only Frequency and Time masking are supported')
device = specgrams.device
dtype = specgrams.dtype
value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * mask_param
min_value = torch.rand(specgrams.shape[:2], device=device, dtype=dtype) * (specgrams.size(axis) - value)
# Create broadcastable mask
mask_start = min_value[..., None, None]
mask_end = (min_value + value)[..., None, None]
mask = torch.arange(0, specgrams.size(axis), device=device, dtype=dtype)
# Per batch example masking
specgrams = specgrams.transpose(axis, -1)
specgrams.masked_fill_((mask >= mask_start) & (mask < mask_end), mask_value)
specgrams = specgrams.transpose(axis, -1)
return specgrams
def mask_along_axis(
specgram: Tensor,
mask_param: int,
mask_value: float,
axis: int
) -> Tensor:
r"""
Apply a mask along ``axis``. Mask will be applied from indices ``[v_0, v_0 + v)``, where
``v`` is sampled from ``uniform(0, mask_param)``, and ``v_0`` from ``uniform(0, max_v - v)``.
All examples will have the same mask interval.
Args:
specgram (Tensor): Real spectrogram (channel, freq, time)
mask_param (int): Number of columns to be masked will be uniformly sampled from [0, mask_param]
mask_value (float): Value to assign to the masked columns
axis (int): Axis to apply masking on (1 -> frequency, 2 -> time)
Returns:
Tensor: Masked spectrogram of dimensions (channel, freq, time)
"""
# pack batch
shape = specgram.size()
specgram = specgram.reshape([-1] + list(shape[-2:]))
value = torch.rand(1) * mask_param
min_value = torch.rand(1) * (specgram.size(axis) - value)
mask_start = (min_value.long()).squeeze()
mask_end = (min_value.long() + value.long()).squeeze()
assert mask_end - mask_start < mask_param
if axis == 1:
specgram[:, mask_start:mask_end] = mask_value
elif axis == 2:
specgram[:, :, mask_start:mask_end] = mask_value
else:
raise ValueError('Only Frequency and Time masking are supported')
# unpack batch
specgram = specgram.reshape(shape[:-2] + specgram.shape[-2:])
return specgram
def compute_deltas(
specgram: Tensor,
win_length: int = 5,
mode: str = "replicate"
) -> Tensor:
r"""Compute delta coefficients of a tensor, usually a spectrogram:
.. math::
d_t = \frac{\sum_{n=1}^{\text{N}} n (c_{t+n} - c_{t-n})}{2 \sum_{n=1}^{\text{N}} n^2}
where :math:`d_t` is the deltas at time :math:`t`,
:math:`c_t` is the spectrogram coeffcients at time :math:`t`,
:math:`N` is ``(win_length-1)//2``.
Args:
specgram (Tensor): Tensor of audio of dimension (..., freq, time)
win_length (int, optional): The window length used for computing delta (Default: ``5``)
mode (str, optional): Mode parameter passed to padding (Default: ``"replicate"``)
Returns:
Tensor: Tensor of deltas of dimension (..., freq, time)
Example
>>> specgram = torch.randn(1, 40, 1000)
>>> delta = compute_deltas(specgram)
>>> delta2 = compute_deltas(delta)
"""
device = specgram.device
dtype = specgram.dtype
# pack batch
shape = specgram.size()
specgram = specgram.reshape(1, -1, shape[-1])
assert win_length >= 3
n = (win_length - 1) // 2
# twice sum of integer squared
denom = n * (n + 1) * (2 * n + 1) / 3
specgram = torch.nn.functional.pad(specgram, (n, n), mode=mode)
kernel = torch.arange(-n, n + 1, 1, device=device, dtype=dtype).repeat(specgram.shape[1], 1, 1)
output = torch.nn.functional.conv1d(specgram, kernel, groups=specgram.shape[1]) / denom
# unpack batch
output = output.reshape(shape)
return output
def _compute_nccf(
waveform: Tensor,
sample_rate: int,
frame_time: float,
freq_low: int
) -> Tensor:
r"""
Compute Normalized Cross-Correlation Function (NCCF).
.. math::
\phi_i(m) = \frac{\sum_{n=b_i}^{b_i + N-1} w(n) w(m+n)}{\sqrt{E(b_i) E(m+b_i)}},
where
:math:`\phi_i(m)` is the NCCF at frame :math:`i` with lag :math:`m`,
:math:`w` is the waveform,
:math:`N` is the length of a frame,
:math:`b_i` is the beginning of frame :math:`i`,
:math:`E(j)` is the energy :math:`\sum_{n=j}^{j+N-1} w^2(n)`.
"""
EPSILON = 10 ** (-9)
# Number of lags to check
lags = int(math.ceil(sample_rate / freq_low))
frame_size = int(math.ceil(sample_rate * frame_time))
waveform_length = waveform.size()[-1]
num_of_frames = int(math.ceil(waveform_length / frame_size))
p = lags + num_of_frames * frame_size - waveform_length
waveform = torch.nn.functional.pad(waveform, (0, p))
# Compute lags
output_lag = []
for lag in range(1, lags + 1):
s1 = waveform[..., :-lag].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]
s2 = waveform[..., lag:].unfold(-1, frame_size, frame_size)[..., :num_of_frames, :]
output_frames = (
(s1 * s2).sum(-1)
/ (EPSILON + torch.norm(s1, p=2, dim=-1)).pow(2)
/ (EPSILON + torch.norm(s2, p=2, dim=-1)).pow(2)
)
output_lag.append(output_frames.unsqueeze(-1))
nccf = torch.cat(output_lag, -1)
return nccf
def _combine_max(
a: Tuple[Tensor, Tensor],
b: Tuple[Tensor, Tensor],
thresh: float = 0.99
) -> Tuple[Tensor, Tensor]:
"""
Take value from first if bigger than a multiplicative factor of the second, elementwise.
"""
mask = (a[0] > thresh * b[0])
values = mask * a[0] + ~mask * b[0]
indices = mask * a[1] + ~mask * b[1]
return values, indices
def _find_max_per_frame(
nccf: Tensor,
sample_rate: int,
freq_high: int
) -> Tensor:
r"""
For each frame, take the highest value of NCCF,
apply centered median smoothing, and convert to frequency.
Note: If the max among all the lags is very close
to the first half of lags, then the latter is taken.
"""
lag_min = int(math.ceil(sample_rate / freq_high))
# Find near enough max that is smallest
best = torch.max(nccf[..., lag_min:], -1)
half_size = nccf.shape[-1] // 2
half = torch.max(nccf[..., lag_min:half_size], -1)
best = _combine_max(half, best)
indices = best[1]
# Add back minimal lag
indices += lag_min
# Add 1 empirical calibration offset
indices += 1
return indices
def _median_smoothing(
indices: Tensor,
win_length: int
) -> Tensor:
r"""
Apply median smoothing to the 1D tensor over the given window.
"""
# Centered windowed
pad_length = (win_length - 1) // 2
# "replicate" padding in any dimension
indices = torch.nn.functional.pad(
indices, (pad_length, 0), mode="constant", value=0.
)
indices[..., :pad_length] = torch.cat(pad_length * [indices[..., pad_length].unsqueeze(-1)], dim=-1)
roll = indices.unfold(-1, win_length, 1)
values, _ = torch.median(roll, -1)
return values
def detect_pitch_frequency(
waveform: Tensor,
sample_rate: int,
frame_time: float = 10 ** (-2),
win_length: int = 30,
freq_low: int = 85,
freq_high: int = 3400,
) -> Tensor:
r"""Detect pitch frequency.
It is implemented using normalized cross-correlation function and median smoothing.
Args:
waveform (Tensor): Tensor of audio of dimension (..., freq, time)
sample_rate (int): The sample rate of the waveform (Hz)
frame_time (float, optional): Duration of a frame (Default: ``10 ** (-2)``).
win_length (int, optional): The window length for median smoothing (in number of frames) (Default: ``30``).
freq_low (int, optional): Lowest frequency that can be detected (Hz) (Default: ``85``).
freq_high (int, optional): Highest frequency that can be detected (Hz) (Default: ``3400``).
Returns:
Tensor: Tensor of freq of dimension (..., frame)
"""
# pack batch
shape = list(waveform.size())
waveform = waveform.reshape([-1] + shape[-1:])
nccf = _compute_nccf(waveform, sample_rate, frame_time, freq_low)
indices = _find_max_per_frame(nccf, sample_rate, freq_high)
indices = _median_smoothing(indices, win_length)
# Convert indices to frequency
EPSILON = 10 ** (-9)
freq = sample_rate / (EPSILON + indices.to(torch.float))
# unpack batch
freq = freq.reshape(shape[:-1] + list(freq.shape[-1:]))
return freq
def sliding_window_cmn(
waveform: Tensor,
cmn_window: int = 600,
min_cmn_window: int = 100,
center: bool = False,
norm_vars: bool = False,
) -> Tensor:
r"""
Apply sliding-window cepstral mean (and optionally variance) normalization per utterance.
Args:
waveform (Tensor): Tensor of audio of dimension (..., freq, time)
cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600)
min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start).
Only applicable if center == false, ignored if center==true (int, default = 100)
center (bool, optional): If true, use a window centered on the current frame
(to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false)
norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false)
Returns:
Tensor: Tensor of freq of dimension (..., frame)
"""
input_shape = waveform.shape
num_frames, num_feats = input_shape[-2:]
waveform = waveform.view(-1, num_frames, num_feats)
num_channels = waveform.shape[0]
dtype = waveform.dtype
device = waveform.device
last_window_start = last_window_end = -1
cur_sum = torch.zeros(num_channels, num_feats, dtype=dtype, device=device)
cur_sumsq = torch.zeros(num_channels, num_feats, dtype=dtype, device=device)
cmn_waveform = torch.zeros(
num_channels, num_frames, num_feats, dtype=dtype, device=device)
for t in range(num_frames):
window_start = 0
window_end = 0
if center:
window_start = t - cmn_window // 2
window_end = window_start + cmn_window
else:
window_start = t - cmn_window
window_end = t + 1
if window_start < 0:
window_end -= window_start
window_start = 0
if not center:
if window_end > t:
window_end = max(t + 1, min_cmn_window)
if window_end > num_frames:
window_start -= (window_end - num_frames)
window_end = num_frames
if window_start < 0:
window_start = 0
if last_window_start == -1:
input_part = waveform[:, window_start: window_end - window_start, :]
cur_sum += torch.sum(input_part, 1)
if norm_vars:
cur_sumsq += torch.cumsum(input_part ** 2, 1)[:, -1, :]
else:
if window_start > last_window_start:
frame_to_remove = waveform[:, last_window_start, :]
cur_sum -= frame_to_remove
if norm_vars:
cur_sumsq -= (frame_to_remove ** 2)
if window_end > last_window_end:
frame_to_add = waveform[:, last_window_end, :]
cur_sum += frame_to_add
if norm_vars:
cur_sumsq += (frame_to_add ** 2)
window_frames = window_end - window_start
last_window_start = window_start
last_window_end = window_end
cmn_waveform[:, t, :] = waveform[:, t, :] - cur_sum / window_frames
if norm_vars:
if window_frames == 1:
cmn_waveform[:, t, :] = torch.zeros(
num_channels, num_feats, dtype=dtype, device=device)
else:
variance = cur_sumsq
variance = variance / window_frames
variance -= ((cur_sum ** 2) / (window_frames ** 2))
variance = torch.pow(variance, -0.5)
cmn_waveform[:, t, :] *= variance
cmn_waveform = cmn_waveform.view(input_shape[:-2] + (num_frames, num_feats))
if len(input_shape) == 2:
cmn_waveform = cmn_waveform.squeeze(0)
return cmn_waveform
def spectral_centroid(
waveform: Tensor,
sample_rate: int,
pad: int,
window: Tensor,
n_fft: int,
hop_length: int,
win_length: int,
) -> Tensor:
r"""
Compute the spectral centroid for each channel along the time axis.
The spectral centroid is defined as the weighted average of the
frequency values, weighted by their magnitude.
Args:
waveform (Tensor): Tensor of audio of dimension (..., time)
sample_rate (int): Sample rate of the audio waveform
pad (int): Two sided padding of signal
window (Tensor): Window tensor that is applied/multiplied to each frame/window
n_fft (int): Size of FFT
hop_length (int): Length of hop between STFT windows
win_length (int): Window size
Returns:
Tensor: Dimension (..., time)
"""
specgram = spectrogram(waveform, pad=pad, window=window, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, power=1., normalized=False)
freqs = torch.linspace(0, sample_rate // 2, steps=1 + n_fft // 2,
device=specgram.device).reshape((-1, 1))
freq_dim = -2
return (freqs * specgram).sum(dim=freq_dim) / specgram.sum(dim=freq_dim)
@_mod_utils.requires_sox()
def apply_codec(
waveform: Tensor,
sample_rate: int,
format: str,
channels_first: bool = True,
compression: Optional[float] = None,
encoding: Optional[str] = None,
bits_per_sample: Optional[int] = None,
) -> Tensor:
r"""
Apply codecs as a form of augmentation.
Args:
waveform (Tensor): Audio data. Must be 2 dimensional. See also ```channels_first```.
sample_rate (int): Sample rate of the audio waveform.
format (str): File format.
channels_first (bool):
When True, both the input and output Tensor have dimension ``[channel, time]``.
Otherwise, they have dimension ``[time, channel]``.
compression (float): Used for formats other than WAV.
For mor details see :py:func:`torchaudio.backend.sox_io_backend.save`.
encoding (str, optional): Changes the encoding for the supported formats.
For more details see :py:func:`torchaudio.backend.sox_io_backend.save`.
bits_per_sample (int, optional): Changes the bit depth for the supported formats.
For more details see :py:func:`torchaudio.backend.sox_io_backend.save`.
Returns:
torch.Tensor: Resulting Tensor.
If ``channels_first=True``, it has ``[channel, time]`` else ``[time, channel]``.
"""
bytes = io.BytesIO()
torchaudio.backend.sox_io_backend.save(bytes,
waveform,
sample_rate,
channels_first,
compression,
format,
encoding,
bits_per_sample
)
bytes.seek(0)
augmented, _ = torchaudio.sox_effects.sox_effects.apply_effects_file(
bytes, effects=[["rate", f"{sample_rate}"]], channels_first=channels_first, format=format)
return augmented
@_mod_utils.requires_kaldi()
def compute_kaldi_pitch(
waveform: torch.Tensor,
sample_rate: float,
frame_length: float = 25.0,
frame_shift: float = 10.0,
min_f0: float = 50,
max_f0: float = 400,
soft_min_f0: float = 10.0,
penalty_factor: float = 0.1,
lowpass_cutoff: float = 1000,
resample_frequency: float = 4000,
delta_pitch: float = 0.005,
nccf_ballast: float = 7000,
lowpass_filter_width: int = 1,
upsample_filter_width: int = 5,
max_frames_latency: int = 0,
frames_per_chunk: int = 0,
simulate_first_pass_online: bool = False,
recompute_frame: int = 500,
snip_edges: bool = True,
) -> torch.Tensor:
"""Extract pitch based on method described in [1].
This function computes the equivalent of `compute-kaldi-pitch-feats` from Kaldi.
Args:
waveform (Tensor):
The input waveform of shape `(..., time)`.
sample_rate (float):
Sample rate of `waveform`.
frame_length (float, optional):
Frame length in milliseconds. (default: 25.0)
frame_shift (float, optional):
Frame shift in milliseconds. (default: 10.0)
min_f0 (float, optional):
Minimum F0 to search for (Hz) (default: 50.0)
max_f0 (float, optional):
Maximum F0 to search for (Hz) (default: 400.0)
soft_min_f0 (float, optional):
Minimum f0, applied in soft way, must not exceed min-f0 (default: 10.0)
penalty_factor (float, optional):
Cost factor for FO change. (default: 0.1)
lowpass_cutoff (float, optional):
Cutoff frequency for LowPass filter (Hz) (default: 1000)
resample_frequency (float, optional):
Frequency that we down-sample the signal to. Must be more than twice lowpass-cutoff.
(default: 4000)
delta_pitch( float, optional):
Smallest relative change in pitch that our algorithm measures. (default: 0.005)
nccf_ballast (float, optional):
Increasing this factor reduces NCCF for quiet frames (default: 7000)
lowpass_filter_width (int, optional):
Integer that determines filter width of lowpass filter, more gives sharper filter.
(default: 1)
upsample_filter_width (int, optional):
Integer that determines filter width when upsampling NCCF. (default: 5)
max_frames_latency (int, optional):
Maximum number of frames of latency that we allow pitch tracking to introduce into
the feature processing (affects output only if ``frames_per_chunk > 0`` and
``simulate_first_pass_online=True``) (default: 0)
frames_per_chunk (int, optional):
The number of frames used for energy normalization. (default: 0)
simulate_first_pass_online (bool, optional):
If true, the function will output features that correspond to what an online decoder
would see in the first pass of decoding -- not the final version of the features,
which is the default. (default: False)
Relevant if ``frames_per_chunk > 0``.
recompute_frame (int, optional):
Only relevant for compatibility with online pitch extraction.
A non-critical parameter; the frame at which we recompute some of the forward pointers,
after revising our estimate of the signal energy.
Relevant if ``frames_per_chunk > 0``. (default: 500)
snip_edges (bool, optional):
If this is set to false, the incomplete frames near the ending edge won't be snipped,
so that the number of frames is the file size divided by the frame-shift.
This makes different types of features give the same number of frames. (default: True)
Returns:
Tensor: Pitch feature. Shape: ``(batch, frames 2)`` where the last dimension
corresponds to pitch and NCCF.
Reference:
- A pitch extraction algorithm tuned for automatic speech recognition
<NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>
2014 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP),
Florence, 2014, pp. 2494-2498, doi: 10.1109/ICASSP.2014.6854049.
"""
shape = waveform.shape
waveform = waveform.reshape(-1, shape[-1])
result = torch.ops.torchaudio.kaldi_ComputeKaldiPitch(
waveform, sample_rate, frame_length, frame_shift,
min_f0, max_f0, soft_min_f0, penalty_factor, lowpass_cutoff,
resample_frequency, delta_pitch, nccf_ballast,
lowpass_filter_width, upsample_filter_width, max_frames_latency,
frames_per_chunk, simulate_first_pass_online, recompute_frame,
snip_edges,
)
result = result.reshape(shape[:-1] + result.shape[-2:])
return result
| [
"torch.ops.torchaudio.kaldi_ComputeKaldiPitch",
"torchaudio._internal.module_utils.requires_sox",
"torchaudio.sox_effects.sox_effects.apply_effects_file",
"torch.max",
"io.BytesIO",
"torch.sin",
"torch.exp",
"math.log",
"torch.min",
"torch.nn.functional.conv1d",
"torch.cos",
"torch.pow",
"math.sqrt",
"torch.sum",
"torch.nn.functional.pad",
"math.log10",
"torch.arange",
"torch.istft",
"torch.view_as_real",
"warnings.warn",
"torch.abs",
"torch.stft",
"torch.rand",
"torch.sign",
"torch.norm",
"torch.round",
"torch.cumsum",
"torch.cat",
"torch.clamp",
"torch.median",
"math.ceil",
"torch.atan2",
"torch.stack",
"torch.tensor",
"torchaudio.backend.sox_io_backend.save",
"torchaudio._internal.module_utils.requires_kaldi",
"torch.zeros",
"torch.linspace",
"torch.log1p"
] | [((36079, 36104), 'torchaudio._internal.module_utils.requires_sox', '_mod_utils.requires_sox', ([], {}), '()\n', (36102, 36104), True, 'from torchaudio._internal import module_utils as _mod_utils\n'), ((38148, 38175), 'torchaudio._internal.module_utils.requires_kaldi', '_mod_utils.requires_kaldi', ([], {}), '()\n', (38173, 38175), True, 'from torchaudio._internal import module_utils as _mod_utils\n'), ((2825, 3027), 'torch.stft', 'torch.stft', ([], {'input': 'waveform', 'n_fft': 'n_fft', 'hop_length': 'hop_length', 'win_length': 'win_length', 'window': 'window', 'center': 'center', 'pad_mode': 'pad_mode', 'normalized': '(False)', 'onesided': 'onesided', 'return_complex': '(True)'}), '(input=waveform, n_fft=n_fft, hop_length=hop_length, win_length=\n win_length, window=window, center=center, pad_mode=pad_mode, normalized\n =False, onesided=onesided, return_complex=True)\n', (2835, 3027), False, 'import torch\n'), ((3384, 3410), 'torch.view_as_real', 'torch.view_as_real', (['spec_f'], {}), '(spec_f)\n', (3402, 3410), False, 'import torch\n'), ((6658, 6675), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (6670, 6675), False, 'import torch\n'), ((7876, 7999), 'torch.istft', 'torch.istft', (['(specgram * angles)'], {'n_fft': 'n_fft', 'hop_length': 'hop_length', 'win_length': 'win_length', 'window': 'window', 'length': 'length'}), '(specgram * angles, n_fft=n_fft, hop_length=hop_length,\n win_length=win_length, window=window, length=length)\n', (7887, 7999), False, 'import torch\n'), ((13495, 13539), 'torch.linspace', 'torch.linspace', (['(0)', '(sample_rate // 2)', 'n_freqs'], {}), '(0, sample_rate // 2, n_freqs)\n', (13509, 13539), False, 'import torch\n'), ((13686, 13726), 'torch.linspace', 'torch.linspace', (['m_min', 'm_max', '(n_mels + 2)'], {}), '(m_min, m_max, n_mels + 2)\n', (13700, 13726), False, 'import torch\n'), ((14047, 14061), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (14058, 14061), False, 'import torch\n'), ((16425, 16456), 'torch.tensor', 'torch.tensor', (['mu'], {'dtype': 'x.dtype'}), '(mu, dtype=x.dtype)\n', (16437, 16456), False, 'import torch\n'), ((17251, 17285), 'torch.tensor', 'torch.tensor', (['mu'], {'dtype': 'x_mu.dtype'}), '(mu, dtype=x_mu.dtype)\n', (17263, 17285), False, 'import torch\n'), ((18225, 18284), 'torch.atan2', 'torch.atan2', (['complex_tensor[..., 1]', 'complex_tensor[..., 0]'], {}), '(complex_tensor[..., 1], complex_tensor[..., 0])\n', (18236, 18284), False, 'import torch\n'), ((20353, 20409), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['complex_specgrams', '[0, 0, 0, 2]'], {}), '(complex_specgrams, [0, 0, 0, 2])\n', (20376, 20409), False, 'import torch\n'), ((20700, 20744), 'torch.norm', 'torch.norm', (['complex_specgrams_0'], {'p': '(2)', 'dim': '(-1)'}), '(complex_specgrams_0, p=2, dim=-1)\n', (20710, 20744), False, 'import torch\n'), ((20758, 20802), 'torch.norm', 'torch.norm', (['complex_specgrams_1'], {'p': '(2)', 'dim': '(-1)'}), '(complex_specgrams_1, p=2, dim=-1)\n', (20768, 20802), False, 'import torch\n'), ((20992, 21037), 'torch.cat', 'torch.cat', (['[phase_0, phase[..., :-1]]'], {'dim': '(-1)'}), '([phase_0, phase[..., :-1]], dim=-1)\n', (21001, 21037), False, 'import torch\n'), ((21054, 21077), 'torch.cumsum', 'torch.cumsum', (['phase', '(-1)'], {}), '(phase, -1)\n', (21066, 21077), False, 'import torch\n'), ((21255, 21304), 'torch.stack', 'torch.stack', (['[real_stretch, imag_stretch]'], {'dim': '(-1)'}), '([real_stretch, imag_stretch], dim=-1)\n', (21266, 21304), False, 'import torch\n'), ((25809, 25861), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['specgram', '(n, n)'], {'mode': 'mode'}), '(specgram, (n, n), mode=mode)\n', (25832, 25861), False, 'import torch\n'), ((27068, 27109), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['waveform', '(0, p)'], {}), '(waveform, (0, p))\n', (27091, 27109), False, 'import torch\n'), ((27627, 27652), 'torch.cat', 'torch.cat', (['output_lag', '(-1)'], {}), '(output_lag, -1)\n', (27636, 27652), False, 'import torch\n'), ((28533, 28567), 'torch.max', 'torch.max', (['nccf[..., lag_min:]', '(-1)'], {}), '(nccf[..., lag_min:], -1)\n', (28542, 28567), False, 'import torch\n'), ((28616, 28659), 'torch.max', 'torch.max', (['nccf[..., lag_min:half_size]', '(-1)'], {}), '(nccf[..., lag_min:half_size], -1)\n', (28625, 28659), False, 'import torch\n'), ((29141, 29218), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['indices', '(pad_length, 0)'], {'mode': '"""constant"""', 'value': '(0.0)'}), "(indices, (pad_length, 0), mode='constant', value=0.0)\n", (29164, 29218), False, 'import torch\n'), ((29400, 29422), 'torch.median', 'torch.median', (['roll', '(-1)'], {}), '(roll, -1)\n', (29412, 29422), False, 'import torch\n'), ((32263, 32327), 'torch.zeros', 'torch.zeros', (['num_channels', 'num_feats'], {'dtype': 'dtype', 'device': 'device'}), '(num_channels, num_feats, dtype=dtype, device=device)\n', (32274, 32327), False, 'import torch\n'), ((32344, 32408), 'torch.zeros', 'torch.zeros', (['num_channels', 'num_feats'], {'dtype': 'dtype', 'device': 'device'}), '(num_channels, num_feats, dtype=dtype, device=device)\n', (32355, 32408), False, 'import torch\n'), ((32428, 32504), 'torch.zeros', 'torch.zeros', (['num_channels', 'num_frames', 'num_feats'], {'dtype': 'dtype', 'device': 'device'}), '(num_channels, num_frames, num_feats, dtype=dtype, device=device)\n', (32439, 32504), False, 'import torch\n'), ((37438, 37450), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (37448, 37450), False, 'import io\n'), ((37455, 37591), 'torchaudio.backend.sox_io_backend.save', 'torchaudio.backend.sox_io_backend.save', (['bytes', 'waveform', 'sample_rate', 'channels_first', 'compression', 'format', 'encoding', 'bits_per_sample'], {}), '(bytes, waveform, sample_rate,\n channels_first, compression, format, encoding, bits_per_sample)\n', (37493, 37591), False, 'import torchaudio\n'), ((37970, 38119), 'torchaudio.sox_effects.sox_effects.apply_effects_file', 'torchaudio.sox_effects.sox_effects.apply_effects_file', (['bytes'], {'effects': "[['rate', f'{sample_rate}']]", 'channels_first': 'channels_first', 'format': 'format'}), "(bytes, effects=[[\n 'rate', f'{sample_rate}']], channels_first=channels_first, format=format)\n", (38023, 38119), False, 'import torchaudio\n'), ((42613, 42971), 'torch.ops.torchaudio.kaldi_ComputeKaldiPitch', 'torch.ops.torchaudio.kaldi_ComputeKaldiPitch', (['waveform', 'sample_rate', 'frame_length', 'frame_shift', 'min_f0', 'max_f0', 'soft_min_f0', 'penalty_factor', 'lowpass_cutoff', 'resample_frequency', 'delta_pitch', 'nccf_ballast', 'lowpass_filter_width', 'upsample_filter_width', 'max_frames_latency', 'frames_per_chunk', 'simulate_first_pass_online', 'recompute_frame', 'snip_edges'], {}), '(waveform, sample_rate,\n frame_length, frame_shift, min_f0, max_f0, soft_min_f0, penalty_factor,\n lowpass_cutoff, resample_frequency, delta_pitch, nccf_ballast,\n lowpass_filter_width, upsample_filter_width, max_frames_latency,\n frames_per_chunk, simulate_first_pass_online, recompute_frame, snip_edges)\n', (42657, 42971), False, 'import torch\n'), ((2584, 2641), 'torch.nn.functional.pad', 'torch.nn.functional.pad', (['waveform', '(pad, pad)', '"""constant"""'], {}), "(waveform, (pad, pad), 'constant')\n", (2607, 2641), False, 'import torch\n'), ((5846, 6020), 'warnings.warn', 'warnings.warn', (['"""The argument normalized is not used in Griffin-Lim, and will be removed in v0.9.0 release. To suppress this warning, please use `normalized=False`."""'], {}), "(\n 'The argument normalized is not used in Griffin-Lim, and will be removed in v0.9.0 release. To suppress this warning, please use `normalized=False`.'\n )\n", (5859, 6020), False, 'import warnings\n'), ((6384, 6416), 'torch.zeros', 'torch.zeros', (['batch', 'freq', 'frames'], {}), '(batch, freq, frames)\n', (6395, 6416), False, 'import torch\n'), ((11094, 11107), 'math.log', 'math.log', (['(6.4)'], {}), '(6.4)\n', (11102, 11107), False, 'import math\n'), ((11967, 11980), 'math.log', 'math.log', (['(6.4)'], {}), '(6.4)\n', (11975, 11980), False, 'import math\n'), ((12055, 12103), 'torch.exp', 'torch.exp', (['(logstep * (mels[log_t] - min_log_mel))'], {}), '(logstep * (mels[log_t] - min_log_mel))\n', (12064, 12103), False, 'import torch\n'), ((14228, 14261), 'torch.min', 'torch.min', (['down_slopes', 'up_slopes'], {}), '(down_slopes, up_slopes)\n', (14237, 14261), False, 'import torch\n'), ((14534, 14725), 'warnings.warn', 'warnings.warn', (['f"""At least one mel filterbank has all zero values. The value for `n_mels` ({n_mels}) may be set too high. Or, the value for `n_freqs` ({n_freqs}) may be set too low."""'], {}), "(\n f'At least one mel filterbank has all zero values. The value for `n_mels` ({n_mels}) may be set too high. Or, the value for `n_freqs` ({n_freqs}) may be set too low.'\n )\n", (14547, 14725), False, 'import warnings\n'), ((16517, 16532), 'torch.log1p', 'torch.log1p', (['mu'], {}), '(mu)\n', (16528, 16532), False, 'import torch\n'), ((21155, 21175), 'torch.cos', 'torch.cos', (['phase_acc'], {}), '(phase_acc)\n', (21164, 21175), False, 'import torch\n'), ((21201, 21221), 'torch.sin', 'torch.sin', (['phase_acc'], {}), '(phase_acc)\n', (21210, 21221), False, 'import torch\n'), ((22425, 22484), 'torch.rand', 'torch.rand', (['specgrams.shape[:2]'], {'device': 'device', 'dtype': 'dtype'}), '(specgrams.shape[:2], device=device, dtype=dtype)\n', (22435, 22484), False, 'import torch\n'), ((22514, 22573), 'torch.rand', 'torch.rand', (['specgrams.shape[:2]'], {'device': 'device', 'dtype': 'dtype'}), '(specgrams.shape[:2], device=device, dtype=dtype)\n', (22524, 22573), False, 'import torch\n'), ((23953, 23966), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (23963, 23966), False, 'import torch\n'), ((23996, 24009), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (24006, 24009), False, 'import torch\n'), ((25977, 26047), 'torch.nn.functional.conv1d', 'torch.nn.functional.conv1d', (['specgram', 'kernel'], {'groups': 'specgram.shape[1]'}), '(specgram, kernel, groups=specgram.shape[1])\n', (26003, 26047), False, 'import torch\n'), ((26790, 26823), 'math.ceil', 'math.ceil', (['(sample_rate / freq_low)'], {}), '(sample_rate / freq_low)\n', (26799, 26823), False, 'import math\n'), ((26847, 26882), 'math.ceil', 'math.ceil', (['(sample_rate * frame_time)'], {}), '(sample_rate * frame_time)\n', (26856, 26882), False, 'import math\n'), ((26951, 26990), 'math.ceil', 'math.ceil', (['(waveform_length / frame_size)'], {}), '(waveform_length / frame_size)\n', (26960, 26990), False, 'import math\n'), ((28440, 28474), 'math.ceil', 'math.ceil', (['(sample_rate / freq_high)'], {}), '(sample_rate / freq_high)\n', (28449, 28474), False, 'import math\n'), ((6325, 6356), 'torch.rand', 'torch.rand', (['batch', 'freq', 'frames'], {}), '(batch, freq, frames)\n', (6335, 6356), False, 'import torch\n'), ((7204, 7400), 'torch.stft', 'torch.stft', ([], {'input': 'inverse', 'n_fft': 'n_fft', 'hop_length': 'hop_length', 'win_length': 'win_length', 'window': 'window', 'center': '(True)', 'pad_mode': '"""reflect"""', 'normalized': '(False)', 'onesided': '(True)', 'return_complex': '(True)'}), "(input=inverse, n_fft=n_fft, hop_length=hop_length, win_length=\n win_length, window=window, center=True, pad_mode='reflect', normalized=\n False, onesided=True, return_complex=True)\n", (7214, 7400), False, 'import torch\n'), ((9357, 9381), 'torch.clamp', 'torch.clamp', (['x'], {'min': 'amin'}), '(x, min=amin)\n', (9368, 9381), False, 'import torch\n'), ((10341, 10365), 'torch.pow', 'torch.pow', (['(10.0)', '(0.1 * x)'], {}), '(10.0, 0.1 * x)\n', (10350, 10365), False, 'import torch\n'), ((10841, 10871), 'math.log10', 'math.log10', (['(1.0 + freq / 700.0)'], {}), '(1.0 + freq / 700.0)\n', (10851, 10871), False, 'import math\n'), ((15673, 15687), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (15682, 15687), False, 'import math\n'), ((16468, 16481), 'torch.sign', 'torch.sign', (['x'], {}), '(x)\n', (16478, 16481), False, 'import torch\n'), ((17326, 17339), 'torch.sign', 'torch.sign', (['x'], {}), '(x)\n', (17336, 17339), False, 'import torch\n'), ((20884, 20918), 'torch.round', 'torch.round', (['(phase / (2 * math.pi))'], {}), '(phase / (2 * math.pi))\n', (20895, 20918), False, 'import torch\n'), ((25876, 25930), 'torch.arange', 'torch.arange', (['(-n)', '(n + 1)', '(1)'], {'device': 'device', 'dtype': 'dtype'}), '(-n, n + 1, 1, device=device, dtype=dtype)\n', (25888, 25930), False, 'import torch\n'), ((33337, 33361), 'torch.sum', 'torch.sum', (['input_part', '(1)'], {}), '(input_part, 1)\n', (33346, 33361), False, 'import torch\n'), ((35855, 35941), 'torch.linspace', 'torch.linspace', (['(0)', '(sample_rate // 2)'], {'steps': '(1 + n_fft // 2)', 'device': 'specgram.device'}), '(0, sample_rate // 2, steps=1 + n_fft // 2, device=specgram.\n device)\n', (35869, 35941), False, 'import torch\n'), ((6841, 6964), 'torch.istft', 'torch.istft', (['(specgram * angles)'], {'n_fft': 'n_fft', 'hop_length': 'hop_length', 'win_length': 'win_length', 'window': 'window', 'length': 'length'}), '(specgram * angles, n_fft=n_fft, hop_length=hop_length,\n win_length=win_length, window=window, length=length)\n', (6852, 6964), False, 'import torch\n'), ((11172, 11199), 'math.log', 'math.log', (['(freq / min_log_hz)'], {}), '(freq / min_log_hz)\n', (11180, 11199), False, 'import math\n'), ((34252, 34316), 'torch.zeros', 'torch.zeros', (['num_channels', 'num_feats'], {'dtype': 'dtype', 'device': 'device'}), '(num_channels, num_feats, dtype=dtype, device=device)\n', (34263, 34316), False, 'import torch\n'), ((34540, 34565), 'torch.pow', 'torch.pow', (['variance', '(-0.5)'], {}), '(variance, -0.5)\n', (34549, 34565), False, 'import torch\n'), ((16501, 16513), 'torch.abs', 'torch.abs', (['x'], {}), '(x)\n', (16510, 16513), False, 'import torch\n'), ((33417, 33449), 'torch.cumsum', 'torch.cumsum', (['(input_part ** 2)', '(1)'], {}), '(input_part ** 2, 1)\n', (33429, 33449), False, 'import torch\n'), ((17353, 17365), 'torch.abs', 'torch.abs', (['x'], {}), '(x)\n', (17362, 17365), False, 'import torch\n'), ((17368, 17383), 'torch.log1p', 'torch.log1p', (['mu'], {}), '(mu)\n', (17379, 17383), False, 'import torch\n'), ((27513, 27540), 'torch.norm', 'torch.norm', (['s2'], {'p': '(2)', 'dim': '(-1)'}), '(s2, p=2, dim=-1)\n', (27523, 27540), False, 'import torch\n'), ((27452, 27479), 'torch.norm', 'torch.norm', (['s1'], {'p': '(2)', 'dim': '(-1)'}), '(s1, p=2, dim=-1)\n', (27462, 27479), False, 'import torch\n')] |
#!/usr/bin/env python3
from __future__ import absolute_import, division, print_function
import curses
import sys
from collections import deque
from datetime import datetime
import numpy as np
import rospy
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
from geometry_msgs.msg import PoseStamped
from mavros_msgs.msg import ExtendedState, PositionTarget, State # StatusText
from scipy.spatial.transform import Rotation as R
from sensor_msgs.msg import BatteryState, Image, NavSatFix
GPS_FIX_DICT = {
0: ('No GPS', curses.COLOR_RED),
1: ('No fix', curses.COLOR_RED),
2: ('2D lock', curses.COLOR_BLUE),
3: ('3D lock', curses.COLOR_BLUE),
4: ('DGPS', curses.COLOR_MAGENTA),
5: ('RTK float', curses.COLOR_YELLOW),
6: ('RTK fix', curses.COLOR_GREEN)
}
def get_color(color):
return curses.color_pair(color)
def frequency_from_messages(messages):
durations = []
for i in range(len(messages) - 1):
duration = messages[i + 1].header.stamp - messages[i].header.stamp
durations.append(duration.to_sec())
frequency = 1 / np.mean(durations)
if np.isnan(frequency):
return 0
return frequency
class StatusNode:
def __init__(self, screen):
rospy.init_node('status_node', argv=sys.argv)
self.rate = rospy.get_param('~rate', default=1.0)
# Curses setup
self.screen = curses.initscr()
self.rows, self.cols = self.screen.getmaxyx()
height_status = 15
self.status = curses.newwin(height_status, self.cols, 1, 2)
# self.console = curses.newwin(self.rows - height_status, self.cols, 12, 2)
self.lines = 0
self.text = ''
self.screen.keypad(True)
curses.curs_set(False) # Hide cursor
colors = [curses.COLOR_BLACK, curses.COLOR_BLUE, curses.COLOR_CYAN,
curses.COLOR_GREEN, curses.COLOR_MAGENTA, curses.COLOR_RED,
curses.COLOR_WHITE, curses.COLOR_YELLOW]
# Curses color setup
curses.use_default_colors()
for color in colors:
curses.init_pair(color, color, -1)
# Default variables
self.status_battery_perc = None
self.state = State()
self.state_sub = rospy.Subscriber('mavros/state', State,
callback=self.state_callback,
queue_size=1)
self.battery = BatteryState()
self.battery_sub = rospy.Subscriber('mavros/battery', BatteryState,
callback=self.battery_callback,
queue_size=1)
self.extended = ExtendedState()
self.extended_sub = rospy.Subscriber('mavros/extended_state', ExtendedState,
callback=self.extended_callback,
queue_size=1)
# self.statustext = StatusText()
# self.statustext_sub = rospy.Subscriber('mavros/statustext/recv', StatusText,
# callback=self.statustext_callback,
# queue_size=1)
self.gps = NavSatFix()
self.gps_sub = rospy.Subscriber('mavros/global_position/raw/fix', NavSatFix,
callback=self.gps_callback,
queue_size=1)
self.local_pose = PoseStamped()
self.local_pose_sub = rospy.Subscriber('mavros/local_position/pose', PoseStamped,
callback=self.local_pose_callback,
queue_size=1)
self.global_pose = PoseStamped()
self.global_pose_sub = rospy.Subscriber('global_position/pose', PoseStamped,
callback=self.global_pose_callback,
queue_size=1)
self.diagnostics = DiagnosticArray()
self.diagnostic_gps = DiagnosticStatus()
self.diagnostics_sub = rospy.Subscriber('/diagnostics', DiagnosticArray,
callback=self.diagnostics_callback,
queue_size=1)
self.setpoint = PositionTarget()
self.setpoint_sub = rospy.Subscriber('mavros/setpoint_raw/local', PositionTarget,
callback=self.setpoint_callback,
queue_size=1)
self.cameras = ['front', 'right', 'back', 'left']
self.image_subscribers = []
self.images = {c: deque(maxlen=10) for c in self.cameras}
for camera in self.cameras:
topic = f'camera_{camera}/image_raw'
subscriber = rospy.Subscriber(topic, Image, callback=self.image_callback,
callback_args=camera, queue_size=1,
buff_size=2 ** 24)
self.image_subscribers.append(subscriber)
def battery_callback(self, battery_msg):
if battery_msg.location == 'id0':
self.battery = battery_msg
def state_callback(self, state_msg):
self.state = state_msg
def extended_callback(self, extended_msg):
self.extended = extended_msg
def diagnostics_callback(self, diagnostics_msg):
for status in diagnostics_msg.status:
if 'GPS' in status.name:
self.diagnostic_gps = status
def gps_callback(self, gps_msg):
self.gps = gps_msg
def local_pose_callback(self, pose_msg):
self.local_pose = pose_msg
def global_pose_callback(self, pose_msg):
self.global_pose = pose_msg
def setpoint_callback(self, setpoint_msg):
self.setpoint = setpoint_msg
def image_callback(self, image_msg, camera):
self.images[camera].append(image_msg)
def statustext_callback(self, statustext_msg):
screen = self.console
time_str = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# time_str = datetime.datetime.fromtimestamp(unix_time)
text = statustext_msg.text
severity = statustext_msg.severity
msg = statustext_msg
severity_red = [msg.EMERGENCY, msg.ALERT, msg.CRITICAL, msg.ERROR]
severity_yellow = [msg.WARNING, msg.NOTICE]
severity_neutral = [msg.INFO, msg.DEBUG]
color = curses.COLOR_CYAN
if severity in severity_red:
color = curses.COLOR_RED
elif severity in severity_yellow:
color = curses.COLOR_YELLOW
elif severity in severity_neutral:
color = curses.COLOR_WHITE
self.text = f'{time_str}: {text} ({color})'
# screen.addstr(self.lines, 0, log, get_color(color))
self.lines += 1
screen.refresh()
def print_status(self):
screen = self.status
screen.clear()
# rospy.loginfo(status)
# print(status)
x_tab = 0
x_indent = 14
row = 0
# Battery
battery_percentage = int(self.battery.percentage * 100)
color = curses.COLOR_CYAN
if battery_percentage > 50:
color = curses.COLOR_GREEN
elif battery_percentage > 25:
color = curses.COLOR_YELLOW
elif battery_percentage > 0:
color = curses.COLOR_RED
status_battery = str(battery_percentage) + '%'
screen.addstr(row, x_tab, 'Battery: ')
screen.addstr(row, x_indent, status_battery, get_color(color))
row += 1
# Armed
if self.state.armed:
color = curses.COLOR_RED
status_armed = 'Yes'
else:
color = curses.COLOR_GREEN
status_armed = 'No'
screen.addstr(row, x_tab, 'Armed: ')
screen.addstr(row, x_indent, status_armed, get_color(color))
row += 1
# Mode
color = curses.COLOR_CYAN
mode = self.state.mode
if mode.startswith('AUTO'):
mode = mode.split('.')[-1]
mode = mode.capitalize()
if mode == 'Offboard':
color = curses.COLOR_RED
else:
color = curses.COLOR_BLUE
if mode == '':
mode = 'None'
elif mode == 'Posctl':
mode = 'Position'
elif mode == 'Rtl':
mode = 'Return'
status_mode = '{}'.format(mode)
screen.addstr(row, x_tab, 'Mode: ')
screen.addstr(row, x_indent, status_mode, get_color(color))
row += 1
# Extended status
if self.extended.landed_state == self.extended.LANDED_STATE_IN_AIR:
status_extended = 'Air'
color = curses.COLOR_RED
elif self.extended.landed_state == self.extended.LANDED_STATE_LANDING:
status_extended = 'Landed'
color = curses.COLOR_GREEN
elif self.extended.landed_state == self.extended.LANDED_STATE_ON_GROUND:
status_extended = 'Ground'
color = curses.COLOR_GREEN
elif self.extended.landed_state == self.extended.LANDED_STATE_TAKEOFF:
status_extended = 'Takeoff'
color = curses.COLOR_RED
elif self.extended.landed_state == self.extended.LANDED_STATE_UNDEFINED:
status_extended = 'Undefined'
color = curses.COLOR_CYAN
screen.addstr(row, x_tab, 'State: ')
screen.addstr(row, x_indent, status_extended, get_color(color))
row += 1
# GPS info
satellites = 0
fix_type, color = GPS_FIX_DICT[0]
for value in self.diagnostic_gps.values:
if value.key == 'Satellites visible':
satellites = value.value
elif value.key == 'Fix type':
fix_type, color = GPS_FIX_DICT[int(value.value)]
screen.addstr(row, x_tab, 'GPS info: ')
screen.addstr(row, x_indent, f'{fix_type} ({satellites} sat)', get_color(color))
row += 2
# GPS pos
latitude = self.gps.latitude
longitude = self.gps.longitude
altitude = round(self.gps.altitude, 2)
status_gps = f'{latitude:.7f} {longitude:.7f} {altitude:.2f} (LLA)'
screen.addstr(row, x_tab, 'GPS pos: ')
screen.addstr(row, x_indent, status_gps)
row += 1
# Local pose
p = self.local_pose.pose.position
q = self.local_pose.pose.orientation
quaternion = [q.x, q.y, q.z, q.w]
try:
rot = R.from_quat(quaternion)
except ValueError:
rot = R.from_euler('zyx', [0.0, 0.0, 0.0])
yaw, pitch, roll = rot.as_euler('zyx', degrees=True)
x, y, z = round(p.x, 2), round(p.y, 2), round(p.z, 2)
yaw, pitch, roll = int(yaw), int(pitch), int(roll)
screen.addstr(row, x_tab, 'Local pos: ')
screen.addstr(row, x_indent, f'{x:.2f} {y:.2f} {z:.2f} (XYZ) {roll} {pitch} {yaw} (RPY)')
row += 1
# Global pose
p = self.global_pose.pose.position
q = self.global_pose.pose.orientation
quaternion = [q.x, q.y, q.z, q.w]
try:
rot = R.from_quat(quaternion)
except ValueError:
rot = R.from_euler('zyx', [0.0, 0.0, 0.0])
yaw, pitch, roll = rot.as_euler('zyx', degrees=True)
x, y, z = round(p.x, 2), round(p.y, 2), round(p.z, 2)
yaw, pitch, roll = int(yaw), int(pitch), int(roll)
screen.addstr(row, x_tab, 'Global pos: ')
screen.addstr(row, x_indent, f'{x:.2f} {y:.2f} {z:.2f} (XYZ) {roll} {pitch} {yaw} (RPY)')
row += 1
# Setpoint
v = self.setpoint.velocity
vx, vy, vz = round(v.x, 2), round(v.y, 2), round(v.z, 2)
yaw = int(np.rad2deg(self.setpoint.yaw))
screen.addstr(row, x_tab, 'Setpoint: ')
screen.addstr(row, x_indent, f'{vx:.2f} {vy:.2f} {vz:.2f} (XYZ) {yaw} (Y)')
row += 1
# Cameras
freqs = {c: 0 for c in self.cameras}
for cam, messages in self.images.items():
freqs[cam] = frequency_from_messages(messages)
ff, fr, fb, fl = [int(round(v)) for k, v in freqs.items()]
screen.addstr(row, x_tab, 'Cameras: ')
screen.addstr(row, x_indent, f'{ff} {fr} {fb} {fl} (front right back left [Hz])')
row += 1
screen.refresh()
self.screen.refresh()
def run(self):
rate = rospy.Rate(self.rate)
try:
while not rospy.is_shutdown():
self.print_status()
rate.sleep()
except rospy.ROSInterruptException:
curses.nocbreak()
self.screen.keypad(False)
curses.echo()
def curses_main(screen):
StatusNode(screen).run()
def main():
try:
curses.wrapper(curses_main)
except rospy.ROSInterruptException:
pass
if __name__ == '__main__':
main()
| [
"rospy.init_node",
"mavros_msgs.msg.State",
"curses.curs_set",
"rospy.Rate",
"curses.nocbreak",
"numpy.mean",
"collections.deque",
"scipy.spatial.transform.Rotation.from_euler",
"sensor_msgs.msg.NavSatFix",
"curses.init_pair",
"rospy.Subscriber",
"numpy.rad2deg",
"curses.color_pair",
"curses.wrapper",
"mavros_msgs.msg.PositionTarget",
"diagnostic_msgs.msg.DiagnosticStatus",
"rospy.get_param",
"sensor_msgs.msg.BatteryState",
"curses.use_default_colors",
"diagnostic_msgs.msg.DiagnosticArray",
"numpy.isnan",
"curses.newwin",
"curses.initscr",
"mavros_msgs.msg.ExtendedState",
"curses.echo",
"rospy.is_shutdown",
"scipy.spatial.transform.Rotation.from_quat",
"datetime.datetime.now",
"geometry_msgs.msg.PoseStamped"
] | [((831, 855), 'curses.color_pair', 'curses.color_pair', (['color'], {}), '(color)\n', (848, 855), False, 'import curses\n'), ((1120, 1139), 'numpy.isnan', 'np.isnan', (['frequency'], {}), '(frequency)\n', (1128, 1139), True, 'import numpy as np\n'), ((1094, 1112), 'numpy.mean', 'np.mean', (['durations'], {}), '(durations)\n', (1101, 1112), True, 'import numpy as np\n'), ((1241, 1286), 'rospy.init_node', 'rospy.init_node', (['"""status_node"""'], {'argv': 'sys.argv'}), "('status_node', argv=sys.argv)\n", (1256, 1286), False, 'import rospy\n'), ((1308, 1345), 'rospy.get_param', 'rospy.get_param', (['"""~rate"""'], {'default': '(1.0)'}), "('~rate', default=1.0)\n", (1323, 1345), False, 'import rospy\n'), ((1392, 1408), 'curses.initscr', 'curses.initscr', ([], {}), '()\n', (1406, 1408), False, 'import curses\n'), ((1514, 1559), 'curses.newwin', 'curses.newwin', (['height_status', 'self.cols', '(1)', '(2)'], {}), '(height_status, self.cols, 1, 2)\n', (1527, 1559), False, 'import curses\n'), ((1732, 1754), 'curses.curs_set', 'curses.curs_set', (['(False)'], {}), '(False)\n', (1747, 1754), False, 'import curses\n'), ((2022, 2049), 'curses.use_default_colors', 'curses.use_default_colors', ([], {}), '()\n', (2047, 2049), False, 'import curses\n'), ((2217, 2224), 'mavros_msgs.msg.State', 'State', ([], {}), '()\n', (2222, 2224), False, 'from mavros_msgs.msg import ExtendedState, PositionTarget, State\n'), ((2250, 2337), 'rospy.Subscriber', 'rospy.Subscriber', (['"""mavros/state"""', 'State'], {'callback': 'self.state_callback', 'queue_size': '(1)'}), "('mavros/state', State, callback=self.state_callback,\n queue_size=1)\n", (2266, 2337), False, 'import rospy\n'), ((2442, 2456), 'sensor_msgs.msg.BatteryState', 'BatteryState', ([], {}), '()\n', (2454, 2456), False, 'from sensor_msgs.msg import BatteryState, Image, NavSatFix\n'), ((2484, 2583), 'rospy.Subscriber', 'rospy.Subscriber', (['"""mavros/battery"""', 'BatteryState'], {'callback': 'self.battery_callback', 'queue_size': '(1)'}), "('mavros/battery', BatteryState, callback=self.\n battery_callback, queue_size=1)\n", (2500, 2583), False, 'import rospy\n'), ((2692, 2707), 'mavros_msgs.msg.ExtendedState', 'ExtendedState', ([], {}), '()\n', (2705, 2707), False, 'from mavros_msgs.msg import ExtendedState, PositionTarget, State\n'), ((2736, 2844), 'rospy.Subscriber', 'rospy.Subscriber', (['"""mavros/extended_state"""', 'ExtendedState'], {'callback': 'self.extended_callback', 'queue_size': '(1)'}), "('mavros/extended_state', ExtendedState, callback=self.\n extended_callback, queue_size=1)\n", (2752, 2844), False, 'import rospy\n'), ((3226, 3237), 'sensor_msgs.msg.NavSatFix', 'NavSatFix', ([], {}), '()\n', (3235, 3237), False, 'from sensor_msgs.msg import BatteryState, Image, NavSatFix\n'), ((3261, 3369), 'rospy.Subscriber', 'rospy.Subscriber', (['"""mavros/global_position/raw/fix"""', 'NavSatFix'], {'callback': 'self.gps_callback', 'queue_size': '(1)'}), "('mavros/global_position/raw/fix', NavSatFix, callback=self\n .gps_callback, queue_size=1)\n", (3277, 3369), False, 'import rospy\n'), ((3472, 3485), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (3483, 3485), False, 'from geometry_msgs.msg import PoseStamped\n'), ((3516, 3629), 'rospy.Subscriber', 'rospy.Subscriber', (['"""mavros/local_position/pose"""', 'PoseStamped'], {'callback': 'self.local_pose_callback', 'queue_size': '(1)'}), "('mavros/local_position/pose', PoseStamped, callback=self.\n local_pose_callback, queue_size=1)\n", (3532, 3629), False, 'import rospy\n'), ((3747, 3760), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (3758, 3760), False, 'from geometry_msgs.msg import PoseStamped\n'), ((3792, 3900), 'rospy.Subscriber', 'rospy.Subscriber', (['"""global_position/pose"""', 'PoseStamped'], {'callback': 'self.global_pose_callback', 'queue_size': '(1)'}), "('global_position/pose', PoseStamped, callback=self.\n global_pose_callback, queue_size=1)\n", (3808, 3900), False, 'import rospy\n'), ((4020, 4037), 'diagnostic_msgs.msg.DiagnosticArray', 'DiagnosticArray', ([], {}), '()\n', (4035, 4037), False, 'from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus\n'), ((4068, 4086), 'diagnostic_msgs.msg.DiagnosticStatus', 'DiagnosticStatus', ([], {}), '()\n', (4084, 4086), False, 'from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus\n'), ((4118, 4222), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/diagnostics"""', 'DiagnosticArray'], {'callback': 'self.diagnostics_callback', 'queue_size': '(1)'}), "('/diagnostics', DiagnosticArray, callback=self.\n diagnostics_callback, queue_size=1)\n", (4134, 4222), False, 'import rospy\n'), ((4339, 4355), 'mavros_msgs.msg.PositionTarget', 'PositionTarget', ([], {}), '()\n', (4353, 4355), False, 'from mavros_msgs.msg import ExtendedState, PositionTarget, State\n'), ((4384, 4497), 'rospy.Subscriber', 'rospy.Subscriber', (['"""mavros/setpoint_raw/local"""', 'PositionTarget'], {'callback': 'self.setpoint_callback', 'queue_size': '(1)'}), "('mavros/setpoint_raw/local', PositionTarget, callback=self\n .setpoint_callback, queue_size=1)\n", (4400, 4497), False, 'import rospy\n'), ((12475, 12496), 'rospy.Rate', 'rospy.Rate', (['self.rate'], {}), '(self.rate)\n', (12485, 12496), False, 'import rospy\n'), ((12843, 12870), 'curses.wrapper', 'curses.wrapper', (['curses_main'], {}), '(curses_main)\n', (12857, 12870), False, 'import curses\n'), ((2091, 2125), 'curses.init_pair', 'curses.init_pair', (['color', 'color', '(-1)'], {}), '(color, color, -1)\n', (2107, 2125), False, 'import curses\n'), ((4704, 4720), 'collections.deque', 'deque', ([], {'maxlen': '(10)'}), '(maxlen=10)\n', (4709, 4720), False, 'from collections import deque\n'), ((4854, 4974), 'rospy.Subscriber', 'rospy.Subscriber', (['topic', 'Image'], {'callback': 'self.image_callback', 'callback_args': 'camera', 'queue_size': '(1)', 'buff_size': '(2 ** 24)'}), '(topic, Image, callback=self.image_callback, callback_args=\n camera, queue_size=1, buff_size=2 ** 24)\n', (4870, 4974), False, 'import rospy\n'), ((10572, 10595), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['quaternion'], {}), '(quaternion)\n', (10583, 10595), True, 'from scipy.spatial.transform import Rotation as R\n'), ((11212, 11235), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['quaternion'], {}), '(quaternion)\n', (11223, 11235), True, 'from scipy.spatial.transform import Rotation as R\n'), ((11806, 11835), 'numpy.rad2deg', 'np.rad2deg', (['self.setpoint.yaw'], {}), '(self.setpoint.yaw)\n', (11816, 11835), True, 'import numpy as np\n'), ((6087, 6101), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6099, 6101), False, 'from datetime import datetime\n'), ((10641, 10677), 'scipy.spatial.transform.Rotation.from_euler', 'R.from_euler', (['"""zyx"""', '[0.0, 0.0, 0.0]'], {}), "('zyx', [0.0, 0.0, 0.0])\n", (10653, 10677), True, 'from scipy.spatial.transform import Rotation as R\n'), ((11281, 11317), 'scipy.spatial.transform.Rotation.from_euler', 'R.from_euler', (['"""zyx"""', '[0.0, 0.0, 0.0]'], {}), "('zyx', [0.0, 0.0, 0.0])\n", (11293, 11317), True, 'from scipy.spatial.transform import Rotation as R\n'), ((12532, 12551), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (12549, 12551), False, 'import rospy\n'), ((12674, 12691), 'curses.nocbreak', 'curses.nocbreak', ([], {}), '()\n', (12689, 12691), False, 'import curses\n'), ((12742, 12755), 'curses.echo', 'curses.echo', ([], {}), '()\n', (12753, 12755), False, 'import curses\n')] |
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
from . import images
from gym import Env, spaces
from time import time
import numpy as np
from copy import copy
import colorsys
import pygame
from pygame.transform import scale
class MinesweeperEnv(Env):
def __init__(self, grid_shape=(10, 15), bombs_density=0.1, n_bombs=None, impact_size=3, max_time=999, chicken=False):
self.grid_shape = grid_shape
self.grid_size = np.prod(grid_shape)
self.n_bombs = max(1, int(bombs_density * self.grid_size)) if n_bombs is None else n_bombs
self.n_bombs = min(self.grid_size - 1, self.n_bombs)
self.flaged_bombs = 0
self.flaged_empty = 0
self.max_time = max_time
if impact_size % 2 == 0:
raise ValueError('Impact_size must be an odd number !')
self.impact_size = impact_size
# Define constants
self.HIDDEN = 0
self.REVEAL = 1
self.FLAG = 2
self.BOMB = self.impact_size ** 2
# Setting up gym Env conventions
nvec_observation = (self.BOMB + 2) * np.ones(self.grid_shape)
self.observation_space = spaces.MultiDiscrete(nvec_observation)
nvec_action = np.array(self.grid_shape + (2,))
self.action_space = spaces.MultiDiscrete(nvec_action)
# Initalize state
self.state = np.zeros(self.grid_shape + (2,), dtype=np.uint8)
## Setup bombs places
idx = np.indices(self.grid_shape).reshape(2, -1)
bombs_ids = np.random.choice(range(self.grid_size), size=self.n_bombs, replace=False)
self.bombs_positions = idx[0][bombs_ids], idx[1][bombs_ids]
## Place numbers
self.semi_impact_size = (self.impact_size-1)//2
bomb_impact = np.ones((self.impact_size, self.impact_size), dtype=np.uint8)
for bombs_id in bombs_ids:
bomb_x, bomb_y = idx[0][bombs_id], idx[1][bombs_id]
x_min, x_max, dx_min, dx_max = self.clip_index(bomb_x, 0)
y_min, y_max, dy_min, dy_max = self.clip_index(bomb_y, 1)
bomb_region = self.state[x_min:x_max, y_min:y_max, 0]
bomb_region += bomb_impact[dx_min:dx_max, dy_min:dy_max]
## Place bombs
self.state[self.bombs_positions + (0,)] = self.BOMB
self.start_time = time()
self.time_left = int(time() - self.start_time)
# Setup rendering
self.pygame_is_init = False
self.chicken = chicken
self.done = False
self.score = 0
def get_observation(self):
observation = copy(self.state[:, :, 1])
revealed = observation == 1
flaged = observation == 2
observation += self.impact_size ** 2 + 1
observation[revealed] = copy(self.state[:, :, 0][revealed])
observation[flaged] -= 1
return observation
def reveal_around(self, coords, reward, done, without_loss=False):
if not done:
x_min, x_max, _, _ = self.clip_index(coords[0], 0)
y_min, y_max, _, _ = self.clip_index(coords[1], 1)
region = self.state[x_min:x_max, y_min:y_max, :]
unseen_around = np.sum(region[..., 1] == 0)
if unseen_around == 0:
if not without_loss:
reward -= 0.001
return
flags_around = np.sum(region[..., 1] == 2)
if flags_around == self.state[coords + (0,)]:
unrevealed_zeros_around = np.logical_and(region[..., 0] == 0, region[..., 1] == self.HIDDEN)
if np.any(unrevealed_zeros_around):
zeros_coords = np.argwhere(unrevealed_zeros_around)
for zero in zeros_coords:
coord = (x_min + zero[0], y_min + zero[1])
self.state[coord + (1,)] = 1
self.reveal_around(coord, reward, done, without_loss=True)
self.state[x_min:x_max, y_min:y_max, 1][self.state[x_min:x_max, y_min:y_max, 1] != self.FLAG] = 1
unflagged_bombs_around = np.logical_and(region[..., 0] == self.BOMB, region[..., 1] != self.FLAG)
if np.any(unflagged_bombs_around):
self.done = True
reward, done = -1, True
else:
if not without_loss:
reward -= 0.001
def clip_index(self, x, axis):
max_idx = self.grid_shape[axis]
x_min, x_max = max(0, x-self.semi_impact_size), min(max_idx, x + self.semi_impact_size + 1)
dx_min, dx_max = x_min - (x - self.semi_impact_size), x_max - (x + self.semi_impact_size + 1) + self.impact_size
return x_min, x_max, dx_min, dx_max
def step(self, action):
coords = action[:2]
action_type = action[2] + 1 # 0 -> 1 = reveal; 1 -> 2 = toggle_flag
case_state = self.state[coords + (1,)]
case_content = self.state[coords + (0,)]
NO_BOMBS_AROUND = 0
reward, done = 0, False
self.time_left = self.max_time - time() + self.start_time
if self.time_left <= 0:
score = -(self.n_bombs - self.flaged_bombs + self.flaged_empty)/self.n_bombs
reward, done = score, True
return self.get_observation(), reward, done, {'passed':False}
if action_type == self.REVEAL:
if case_state == self.HIDDEN:
self.state[coords + (1,)] = action_type
if case_content == self.BOMB:
if self.pygame_is_init: self.done = True
reward, done = -1, True
return self.get_observation(), reward, done, {'passed':False}
elif case_content == NO_BOMBS_AROUND:
self.reveal_around(coords, reward, done)
elif case_state == self.REVEAL:
self.reveal_around(coords, reward, done)
reward -= 0.01
else:
reward -= 0.001
self.score += reward
return self.get_observation(), reward, done, {'passed':True}
elif action_type == self.FLAG:
if case_state == self.REVEAL:
reward -= 0.001
else:
flaging = 1
if case_state == self.FLAG:
flaging = -1
self.state[coords + (1,)] = self.HIDDEN
else:
self.state[coords + (1,)] = self.FLAG
if case_content == self.BOMB:
self.flaged_bombs += flaging
else:
self.flaged_empty += flaging
if self.flaged_bombs == self.n_bombs and self.flaged_empty == 0:
reward, done = 2 + self.time_left/self.max_time, True
if np.any(np.logical_and(self.state[..., 0]==9, self.state[..., 1]==1)) or self.done:
reward, done = -1 + self.time_left/self.max_time + (self.flaged_bombs - self.flaged_empty)/self.n_bombs, True
self.score += reward
return self.get_observation(), reward, done, {'passed':False}
def reset(self):
self.__init__(self.grid_shape, n_bombs=self.n_bombs, impact_size=self.impact_size, max_time=self.max_time, chicken=self.chicken)
return self.get_observation()
def render(self):
if not self.pygame_is_init:
self._init_pygame()
self.pygame_is_init = True
for event in pygame.event.get():
if event.type == pygame.QUIT: # pylint: disable=E1101
pygame.quit() # pylint: disable=E1101
# Plot background
pygame.draw.rect(self.window, (60, 56, 53), (0, 0, self.height, self.width))
# Plot grid
for index, state in np.ndenumerate(self.state[..., 1]):
self._plot_block(index, state)
# Plot infos
## Score
score_text = self.score_font.render("SCORE", 1, (255, 10, 10))
score = self.score_font.render(str(round(self.score, 4)), 1, (255, 10, 10))
self.window.blit(score_text, (0.1*self.header_size, 0.75*self.width))
self.window.blit(score, (0.1*self.header_size, 0.8*self.width))
## Time left
time_text = self.num_font.render("TIME", 1, (255, 10, 10))
self.time_left = self.max_time - time() + self.start_time
time_left = self.num_font.render(str(int(self.time_left+1)), 1, (255, 10, 10))
self.window.blit(time_text, (0.1*self.header_size, 0.03*self.width))
self.window.blit(time_left, (0.1*self.header_size, 0.1*self.width))
## Bombs left
bombs_text = self.num_font.render("BOMBS", 1, (255, 255, 10))
left_text = self.num_font.render("LEFT", 1, (255, 255, 10))
potential_bombs_left = self.n_bombs - self.flaged_bombs - self.flaged_empty
potential_bombs_left = self.num_font.render(str(int(potential_bombs_left)), 1, (255, 255, 10))
self.window.blit(bombs_text, (0.1*self.header_size, 0.4*self.width))
self.window.blit(left_text, (0.1*self.header_size, 0.45*self.width))
self.window.blit(potential_bombs_left, (0.1*self.header_size, 0.5*self.width))
pygame.display.flip()
pygame.time.wait(10)
if self.done:
pygame.time.wait(3000)
@staticmethod
def _get_color(n, max_n):
BLUE_HUE = 0.6
RED_HUE = 0.0
HUE = RED_HUE + (BLUE_HUE - RED_HUE) * ((max_n - n) / max_n)**3
color = 255 * np.array(colorsys.hsv_to_rgb(HUE, 1, 0.7))
return color
def _plot_block(self, index, state):
position = tuple(self.origin + self.scale_factor * self.BLOCK_SIZE * np.array((index[1], index[0])))
label = None
if state == self.HIDDEN and not self.done:
img_key = 'hidden'
elif state == self.FLAG:
if not self.done:
img_key = 'flag'
else:
content = self.state[index][0]
if content == self.BOMB:
img_key = 'disabled_mine' if not self.chicken else 'disabled_chicken'
else:
img_key = 'misplaced_flag'
else:
content = self.state[index][0]
if content == self.BOMB:
if state == self.HIDDEN:
img_key = 'mine' if not self.chicken else 'chicken'
else:
img_key = 'exploded_mine' if not self.chicken else 'exploded_chicken'
else:
img_key = 'revealed'
label = self.num_font.render(str(content), 1, self._get_color(content, self.BOMB))
self.window.blit(self.images[img_key], position)
if label: self.window.blit(label, position + self.font_offset - (content > 9) * self.decimal_font_offset)
def _init_pygame(self):
pygame.init() # pylint: disable=E1101
# Open Pygame window
self.scale_factor = 2 * min(12 / self.grid_shape[0], 25 / self.grid_shape[1])
self.BLOCK_SIZE = 32
self.header_size = self.scale_factor * 100
self.origin = np.array([self.header_size, 0])
self.width = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[0])
self.height = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[1] + self.header_size)
self.window = pygame.display.set_mode((self.height, self.width))
# Setup font for numbers
num_font_size = 20
self.num_font = pygame.font.SysFont("monospace", int(self.scale_factor * num_font_size))
self.font_offset = self.scale_factor * self.BLOCK_SIZE * np.array([0.325, 0.15])
self.decimal_font_offset = self.scale_factor * self.BLOCK_SIZE * np.array([0.225, 0])
self.score_font = pygame.font.SysFont("monospace", int(self.scale_factor * 12))
# Load images
def scale_image(img, scale_factor=self.scale_factor):
return scale(img, (int(scale_factor*img.get_width()), int(scale_factor*img.get_height())))
images_names = ['hidden', 'revealed', 'flag', 'misplaced_flag']
if self.chicken:
images_names += ['chicken', 'exploded_chicken', 'disabled_chicken']
else:
images_names += ['mine', 'exploded_mine', 'disabled_mine']
self.images = {}
for img_name in images_names:
with pkg_resources.path(images, img_name + '.png') as path:
img = pygame.image.load(str(path)).convert()
self.images[img_name] = scale_image(img)
| [
"numpy.prod",
"pygame.init",
"pygame.quit",
"colorsys.hsv_to_rgb",
"numpy.array",
"copy.copy",
"importlib_resources.path",
"pygame.display.set_mode",
"pygame.display.flip",
"numpy.ndenumerate",
"pygame.draw.rect",
"numpy.ones",
"gym.spaces.MultiDiscrete",
"numpy.indices",
"numpy.any",
"time.time",
"numpy.logical_and",
"pygame.event.get",
"pygame.time.wait",
"numpy.sum",
"numpy.zeros",
"numpy.argwhere"
] | [((567, 586), 'numpy.prod', 'np.prod', (['grid_shape'], {}), '(grid_shape)\n', (574, 586), True, 'import numpy as np\n'), ((1274, 1312), 'gym.spaces.MultiDiscrete', 'spaces.MultiDiscrete', (['nvec_observation'], {}), '(nvec_observation)\n', (1294, 1312), False, 'from gym import Env, spaces\n'), ((1336, 1368), 'numpy.array', 'np.array', (['(self.grid_shape + (2,))'], {}), '(self.grid_shape + (2,))\n', (1344, 1368), True, 'import numpy as np\n'), ((1397, 1430), 'gym.spaces.MultiDiscrete', 'spaces.MultiDiscrete', (['nvec_action'], {}), '(nvec_action)\n', (1417, 1430), False, 'from gym import Env, spaces\n'), ((1479, 1527), 'numpy.zeros', 'np.zeros', (['(self.grid_shape + (2,))'], {'dtype': 'np.uint8'}), '(self.grid_shape + (2,), dtype=np.uint8)\n', (1487, 1527), True, 'import numpy as np\n'), ((1882, 1943), 'numpy.ones', 'np.ones', (['(self.impact_size, self.impact_size)'], {'dtype': 'np.uint8'}), '((self.impact_size, self.impact_size), dtype=np.uint8)\n', (1889, 1943), True, 'import numpy as np\n'), ((2428, 2434), 'time.time', 'time', ([], {}), '()\n', (2432, 2434), False, 'from time import time\n'), ((2687, 2712), 'copy.copy', 'copy', (['self.state[:, :, 1]'], {}), '(self.state[:, :, 1])\n', (2691, 2712), False, 'from copy import copy\n'), ((2866, 2901), 'copy.copy', 'copy', (['self.state[:, :, 0][revealed]'], {}), '(self.state[:, :, 0][revealed])\n', (2870, 2901), False, 'from copy import copy\n'), ((7579, 7597), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (7595, 7597), False, 'import pygame\n'), ((7762, 7838), 'pygame.draw.rect', 'pygame.draw.rect', (['self.window', '(60, 56, 53)', '(0, 0, self.height, self.width)'], {}), '(self.window, (60, 56, 53), (0, 0, self.height, self.width))\n', (7778, 7838), False, 'import pygame\n'), ((7888, 7922), 'numpy.ndenumerate', 'np.ndenumerate', (['self.state[..., 1]'], {}), '(self.state[..., 1])\n', (7902, 7922), True, 'import numpy as np\n'), ((9316, 9337), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (9335, 9337), False, 'import pygame\n'), ((9346, 9366), 'pygame.time.wait', 'pygame.time.wait', (['(10)'], {}), '(10)\n', (9362, 9366), False, 'import pygame\n'), ((10977, 10990), 'pygame.init', 'pygame.init', ([], {}), '()\n', (10988, 10990), False, 'import pygame\n'), ((11233, 11264), 'numpy.array', 'np.array', (['[self.header_size, 0]'], {}), '([self.header_size, 0])\n', (11241, 11264), True, 'import numpy as np\n'), ((11474, 11524), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(self.height, self.width)'], {}), '((self.height, self.width))\n', (11497, 11524), False, 'import pygame\n'), ((1216, 1240), 'numpy.ones', 'np.ones', (['self.grid_shape'], {}), '(self.grid_shape)\n', (1223, 1240), True, 'import numpy as np\n'), ((3272, 3299), 'numpy.sum', 'np.sum', (['(region[..., 1] == 0)'], {}), '(region[..., 1] == 0)\n', (3278, 3299), True, 'import numpy as np\n'), ((3460, 3487), 'numpy.sum', 'np.sum', (['(region[..., 1] == 2)'], {}), '(region[..., 1] == 2)\n', (3466, 3487), True, 'import numpy as np\n'), ((9401, 9423), 'pygame.time.wait', 'pygame.time.wait', (['(3000)'], {}), '(3000)\n', (9417, 9423), False, 'import pygame\n'), ((11748, 11771), 'numpy.array', 'np.array', (['[0.325, 0.15]'], {}), '([0.325, 0.15])\n', (11756, 11771), True, 'import numpy as np\n'), ((11845, 11865), 'numpy.array', 'np.array', (['[0.225, 0]'], {}), '([0.225, 0])\n', (11853, 11865), True, 'import numpy as np\n'), ((1573, 1600), 'numpy.indices', 'np.indices', (['self.grid_shape'], {}), '(self.grid_shape)\n', (1583, 1600), True, 'import numpy as np\n'), ((2464, 2470), 'time.time', 'time', ([], {}), '()\n', (2468, 2470), False, 'from time import time\n'), ((3588, 3654), 'numpy.logical_and', 'np.logical_and', (['(region[..., 0] == 0)', '(region[..., 1] == self.HIDDEN)'], {}), '(region[..., 0] == 0, region[..., 1] == self.HIDDEN)\n', (3602, 3654), True, 'import numpy as np\n'), ((3674, 3705), 'numpy.any', 'np.any', (['unrevealed_zeros_around'], {}), '(unrevealed_zeros_around)\n', (3680, 3705), True, 'import numpy as np\n'), ((4183, 4255), 'numpy.logical_and', 'np.logical_and', (['(region[..., 0] == self.BOMB)', '(region[..., 1] != self.FLAG)'], {}), '(region[..., 0] == self.BOMB, region[..., 1] != self.FLAG)\n', (4197, 4255), True, 'import numpy as np\n'), ((4275, 4305), 'numpy.any', 'np.any', (['unflagged_bombs_around'], {}), '(unflagged_bombs_around)\n', (4281, 4305), True, 'import numpy as np\n'), ((5152, 5158), 'time.time', 'time', ([], {}), '()\n', (5156, 5158), False, 'from time import time\n'), ((6920, 6984), 'numpy.logical_and', 'np.logical_and', (['(self.state[..., 0] == 9)', '(self.state[..., 1] == 1)'], {}), '(self.state[..., 0] == 9, self.state[..., 1] == 1)\n', (6934, 6984), True, 'import numpy as np\n'), ((7681, 7694), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (7692, 7694), False, 'import pygame\n'), ((8451, 8457), 'time.time', 'time', ([], {}), '()\n', (8455, 8457), False, 'from time import time\n'), ((9625, 9657), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['HUE', '(1)', '(0.7)'], {}), '(HUE, 1, 0.7)\n', (9644, 9657), False, 'import colorsys\n'), ((12495, 12540), 'importlib_resources.path', 'pkg_resources.path', (['images', "(img_name + '.png')"], {}), "(images, img_name + '.png')\n", (12513, 12540), True, 'import importlib_resources as pkg_resources\n'), ((3742, 3778), 'numpy.argwhere', 'np.argwhere', (['unrevealed_zeros_around'], {}), '(unrevealed_zeros_around)\n', (3753, 3778), True, 'import numpy as np\n'), ((9799, 9829), 'numpy.array', 'np.array', (['(index[1], index[0])'], {}), '((index[1], index[0]))\n', (9807, 9829), True, 'import numpy as np\n')] |
import tables
import numpy as np
import matplotlib.pyplot as plt
# Reading the file.
fileh = tables.open_file('development.hdf5', mode='r')
# Dimentionality of the data structure.
print(fileh.root.utterance_test.shape)
print(fileh.root.utterance_train.shape)
print(fileh.root.label_train.shape)
print(fileh.root.label_test.shape)
| [
"tables.open_file"
] | [((94, 140), 'tables.open_file', 'tables.open_file', (['"""development.hdf5"""'], {'mode': '"""r"""'}), "('development.hdf5', mode='r')\n", (110, 140), False, 'import tables\n')] |
#!/usr/bin/env python
from distutils.core import setup
setup(name='Mimik',
version='1.0',
description='Python framework for markov models',
author='<NAME>',
author_email='<EMAIL>',
url='https://www.python.org/sigs/distutils-sig/',
packages=['distutils', 'distutils.command'],
)
| [
"distutils.core.setup"
] | [((57, 292), 'distutils.core.setup', 'setup', ([], {'name': '"""Mimik"""', 'version': '"""1.0"""', 'description': '"""Python framework for markov models"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://www.python.org/sigs/distutils-sig/"""', 'packages': "['distutils', 'distutils.command']"}), "(name='Mimik', version='1.0', description=\n 'Python framework for markov models', author='<NAME>', author_email=\n '<EMAIL>', url='https://www.python.org/sigs/distutils-sig/', packages=[\n 'distutils', 'distutils.command'])\n", (62, 292), False, 'from distutils.core import setup\n')] |
import os
import shutil
import sqlite3
import tarfile
from datetime import datetime
import bagit
def create_package(images, batch_dir):
package_threshold = 838860800 # 800 Mib to the next power of 2 = 1GiB
print("Package threshold: " + get_human_readable_file_size(package_threshold))
abs_path = os.getcwd()
try:
package_size = 0
for image in images:
package_size += image[1]
print("Total batch size: " + get_human_readable_file_size(package_size))
if package_size < package_threshold:
print("Not enough images yet to make a package from this batch.")
return()
else:
try:
# create new batch directory
split = os.path.split(batch_dir)
new_dir_number = int(split[1]) + 1
new_batch_dir = os.path.join(split[0], str(new_dir_number))
os.makedirs(new_batch_dir)
# move all related files for the last image that's getting removed from batch to keep within threshold
last_image = images[-1]
path, dirs, files = next(os.walk(batch_dir))
for file in files:
if file.find(last_image[0]) != -1:
filepath = os.path.join(path, file)
shutil.move(filepath, os.path.join(
new_batch_dir, file))
# drop the last image from the list (convert tuple) to get the package size back under threshold
images.pop(-1)
except Exception as e:
print("Unable to separate batch to make a package.")
print(e)
return()
# Convert batch directory into a Bagit directory
external_identifier = "deplatformr-open-images-" + split[1]
bagit.make_bag(batch_dir,
{'Source-Organization': 'Deplatformr Project', 'Organization-Address': 'https://open-images.deplatformr.com', 'External-Description': 'This package contains a subset of the Google Open Images dataset used for machine learning training. The image files have been downloaded from their Flickr server source, verified for fixity, had EXIF metadata extracted, and are now bundled here with their annotation data, segmentation files and newly generated sha512 checksums. This content and context is described in a sidecar metadata files using schema.org/ImageObject and JSON-LD format.', 'External-Identifier': external_identifier, 'License': 'https://creativecommons.org/licenses/by/2.0/'}, checksums=["sha512"])
print("Created a Bagit directory.")
try:
# Create the tar package
packages_dir = os.path.join(
os.getcwd(), "source_data/packages/")
tarball_name = external_identifier + ".tar"
tarball = tarfile.open(os.path.join(
packages_dir, tarball_name), "w")
tarball.add(batch_dir, arcname=external_identifier)
tarball.close()
print("Created tarball " + tarball_name + ".")
except Exception as e:
print("Unable to create a tarball package from batch.")
print(e)
return()
try:
shutil.rmtree(batch_dir)
print("Deleted the batch source directory.")
except OSError as e:
print("Unable to delete the source directory.")
print(e)
# record the tarball package name for each image
db_path = os.path.join(
abs_path, "source_data/deplatformr_open_images_v6.sqlite")
images_db = sqlite3.connect(db_path)
cursor = images_db.cursor()
for image in images:
cursor.execute("UPDATE open_images SET package_name = ? WHERE ImageID = ?",
(tarball_name, image[0],),)
images_db.commit()
images_db.close()
# add tarball name, size, and timestamp to the workflow dbase
utctime = datetime.utcnow()
tarball_size = os.path.getsize(
os.path.join(packages_dir, tarball_name))
print("Tarball size is: " + get_human_readable_file_size(tarball_size))
db_path = os.path.join(
abs_path, "deplatformr_open_images_workflow.sqlite")
workflow_db = sqlite3.connect(db_path)
cursor = workflow_db.cursor()
for image in images:
print("Linking image " +
image[0] + " to " + tarball_name + " in SQLite.")
cursor.execute(
"UPDATE images SET package_name = ? WHERE image_id = ?", (tarball_name, image[0],),)
cursor.execute("INSERT INTO packages (name, size, timestamp) VALUES (?,?,?)",
(tarball_name, tarball_size, utctime,),)
workflow_db.commit()
workflow_db.close()
except Exception as e:
print("Unable to create a package for batch directory " + batch_dir)
print(e)
def get_human_readable_file_size(size, precision=2):
suffixes = ["B", "KiB", "MiB", "GiB", "TiB"]
suffixIndex = 0
while size > 1024 and suffixIndex < 4:
suffixIndex += 1 # increment the index of the suffix
size = size / 1024.0 # apply the division
return "%.*f %s" % (precision, size, suffixes[suffixIndex])
return()
| [
"sqlite3.connect",
"os.makedirs",
"datetime.datetime.utcnow",
"os.path.join",
"os.getcwd",
"os.path.split",
"shutil.rmtree",
"bagit.make_bag",
"os.walk"
] | [((313, 324), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (322, 324), False, 'import os\n'), ((1870, 2625), 'bagit.make_bag', 'bagit.make_bag', (['batch_dir', "{'Source-Organization': 'Deplatformr Project', 'Organization-Address':\n 'https://open-images.deplatformr.com', 'External-Description':\n 'This package contains a subset of the Google Open Images dataset used for machine learning training. The image files have been downloaded from their Flickr server source, verified for fixity, had EXIF metadata extracted, and are now bundled here with their annotation data, segmentation files and newly generated sha512 checksums. This content and context is described in a sidecar metadata files using schema.org/ImageObject and JSON-LD format.'\n , 'External-Identifier': external_identifier, 'License':\n 'https://creativecommons.org/licenses/by/2.0/'}"], {'checksums': "['sha512']"}), "(batch_dir, {'Source-Organization': 'Deplatformr Project',\n 'Organization-Address': 'https://open-images.deplatformr.com',\n 'External-Description':\n 'This package contains a subset of the Google Open Images dataset used for machine learning training. The image files have been downloaded from their Flickr server source, verified for fixity, had EXIF metadata extracted, and are now bundled here with their annotation data, segmentation files and newly generated sha512 checksums. This content and context is described in a sidecar metadata files using schema.org/ImageObject and JSON-LD format.'\n , 'External-Identifier': external_identifier, 'License':\n 'https://creativecommons.org/licenses/by/2.0/'}, checksums=['sha512'])\n", (1884, 2625), False, 'import bagit\n'), ((3655, 3726), 'os.path.join', 'os.path.join', (['abs_path', '"""source_data/deplatformr_open_images_v6.sqlite"""'], {}), "(abs_path, 'source_data/deplatformr_open_images_v6.sqlite')\n", (3667, 3726), False, 'import os\n'), ((3768, 3792), 'sqlite3.connect', 'sqlite3.connect', (['db_path'], {}), '(db_path)\n', (3783, 3792), False, 'import sqlite3\n'), ((4175, 4192), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4190, 4192), False, 'from datetime import datetime\n'), ((4401, 4466), 'os.path.join', 'os.path.join', (['abs_path', '"""deplatformr_open_images_workflow.sqlite"""'], {}), "(abs_path, 'deplatformr_open_images_workflow.sqlite')\n", (4413, 4466), False, 'import os\n'), ((4510, 4534), 'sqlite3.connect', 'sqlite3.connect', (['db_path'], {}), '(db_path)\n', (4525, 4534), False, 'import sqlite3\n'), ((751, 775), 'os.path.split', 'os.path.split', (['batch_dir'], {}), '(batch_dir)\n', (764, 775), False, 'import os\n'), ((919, 945), 'os.makedirs', 'os.makedirs', (['new_batch_dir'], {}), '(new_batch_dir)\n', (930, 945), False, 'import os\n'), ((3363, 3387), 'shutil.rmtree', 'shutil.rmtree', (['batch_dir'], {}), '(batch_dir)\n', (3376, 3387), False, 'import shutil\n'), ((4253, 4293), 'os.path.join', 'os.path.join', (['packages_dir', 'tarball_name'], {}), '(packages_dir, tarball_name)\n', (4265, 4293), False, 'import os\n'), ((1146, 1164), 'os.walk', 'os.walk', (['batch_dir'], {}), '(batch_dir)\n', (1153, 1164), False, 'import os\n'), ((2804, 2815), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2813, 2815), False, 'import os\n'), ((2941, 2981), 'os.path.join', 'os.path.join', (['packages_dir', 'tarball_name'], {}), '(packages_dir, tarball_name)\n', (2953, 2981), False, 'import os\n'), ((1291, 1315), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (1303, 1315), False, 'import os\n'), ((1362, 1395), 'os.path.join', 'os.path.join', (['new_batch_dir', 'file'], {}), '(new_batch_dir, file)\n', (1374, 1395), False, 'import os\n')] |
import sklearn
import pandas
import seaborn as sns
import matplotlib.pyplot as pyplot
from functools import reduce
# import numpy as np
def metrics_from_prediction_and_label(labels, predictions, verbose=False):
measures = {
"accuracy": sklearn.metrics.accuracy_score(labels, predictions),
"balanced_accuracy": sklearn.metrics.balanced_accuracy_score(labels, predictions),
"precision_micro": sklearn.metrics.precision_score(labels, predictions, average='micro'),
"precision_macro": sklearn.metrics.precision_score(labels, predictions, average='macro'),
"precision_weighted": sklearn.metrics.precision_score(labels, predictions, average='weighted'),
"recall_micro": sklearn.metrics.recall_score(labels, predictions, average='micro'),
"recall_macro": sklearn.metrics.recall_score(labels, predictions, average='macro'),
"recall_weighted": sklearn.metrics.recall_score(labels, predictions, average='weighted'),
"f1_score_micro": sklearn.metrics.f1_score(labels, predictions, average='micro'),
"f1_score_macro": sklearn.metrics.f1_score(labels, predictions, average='macro'),
"f1_score_weighted": sklearn.metrics.f1_score(labels, predictions, average='weighted')
}
try:
measures["roc_auc_weighted"] = multi_class_roc_auc_score(labels, predictions, 'weighted')
measures["roc_auc_macro"] = multi_class_roc_auc_score(labels, predictions, 'macro')
measures["roc_auc_micro"] = multi_class_roc_auc_score(labels, predictions, 'micro')
except ValueError:
print("Warning: Roc auc score can not be calculated ...")
try:
# note we use the average precision at different threshold values as the auc of the pr-curve
# and not the auc-pr-curve with the trapezoidal rule / linear interpolation because it could be too optimistic
measures["auc_prc_weighted"] = multi_class_prc_auc_score(labels, predictions, 'weighted')
measures["auc_prc_macro"] = multi_class_prc_auc_score(labels, predictions, 'macro')
measures["auc_prc_micro"] = multi_class_prc_auc_score(labels, predictions, 'micro')
except ValueError:
print("Warning: Auc prc score can not be calculated ...")
save_confusion_matrix(labels, predictions)
report = save_classification_report(labels, predictions)
classes = list(sorted(set(labels)))
for pos_class in classes:
measures[str(pos_class) + "_precision"] = report[str(pos_class)]['precision']
measures[str(pos_class) + "_recall"] = report[str(pos_class)]['recall']
measures[str(pos_class) + "_f1-score"] = report[str(pos_class)]['f1-score']
measures[str(pos_class) + "_support"] = report[str(pos_class)]['support']
if pos_class == 1:
neg_class = 0
else:
neg_class = 1
tp, fp, tn, fn = calculate_cm_states(labels, predictions, pos_class, neg_class)
measures[str(pos_class) + "_tp"] = tp
measures[str(pos_class) + "_fp"] = fp
measures[str(pos_class) + "_tn"] = tn
measures[str(pos_class) + "_fn"] = fn
if tn + fp == 0:
pass
else:
# Specificity or true negative rate
measures[str(pos_class) + "_tnr"] = tn / (tn + fp)
# Fall out or false positive rate
measures[str(pos_class) + "_fpr"] = fp / (fp + tn)
if tn + fn == 0:
pass
else:
# Negative predictive value
measures[str(pos_class) + "_npv"] = tn / (tn + fn)
if tp + fn == 0:
pass
else:
# False negative rate
measures[str(pos_class) + "_fnr"] = fn / (tp + fn)
if tp + fp == 0:
pass
else:
# False discovery rate
measures[str(pos_class) + "_fdr"] = fp / (tp + fp)
return measures
def calculate_cm_states(labels, predictions, pos_class, neg_class):
tp = 0
fp = 0
tn = 0
fn = 0
for i in range(len(predictions)):
if labels[i] == predictions[i] == pos_class:
tp += 1
if predictions[i] == pos_class and labels[i] != predictions[i]:
fp += 1
if labels[i] == predictions[i] == neg_class:
tn += 1
if predictions[i] == neg_class and labels[i] != predictions[i]:
fn += 1
return tp, fp, tn, fn
def save_classification_report(labels, predictions):
return sklearn.metrics.classification_report(y_true=labels, y_pred=predictions, output_dict=True)
def multi_class_roc_auc_score(label, predict, average):
label_binarizer = sklearn.preprocessing.LabelBinarizer()
label_binarizer.fit(label)
label = label_binarizer.transform(label)
predict = label_binarizer.transform(predict)
return sklearn.metrics.roc_auc_score(label, predict, average=average)
def multi_class_prc_auc_score(label, predict, average):
label_binarizer = sklearn.preprocessing.LabelBinarizer()
label_binarizer.fit(label)
label = label_binarizer.transform(label)
predict = label_binarizer.transform(predict)
return sklearn.metrics.average_precision_score(label, predict, average=average)
def label_binarizer(labels):
for index in range(0, len(labels)):
if labels[index] >= 0.5:
labels[index] = 1.0
else:
labels[index] = 0.0
return labels
def save_confusion_matrix(labels, predictions, path="../../../results/cm.pdf"):
classes = sklearn.utils.multiclass.unique_labels(labels, predictions)
cms = []
cm = sklearn.metrics.confusion_matrix(labels, predictions)
cm_df = pandas.DataFrame(cm, index=classes, columns=classes)
cms.append(cm_df)
def prettify(n):
"""
if n > 1000000:
return str(np.round(n / 1000000, 1)) + 'M'
elif n > 1000:
return str(np.round(n / 1000, 1)) + 'K'
else:
return str(n)
"""
return str(n)
cm = reduce(lambda x, y: x.add(y, fill_value=0), cms)
annot = cm.applymap(prettify)
cm = (cm.T / cm.sum(axis=1)).T
fig, g = pyplot.subplots(figsize=(7, 4.5))
g = sns.heatmap(cm, annot=annot, fmt='', cmap='Blues', cbar=False, rasterized=True, linewidths=0.1)
_ = g.set(ylabel='Actual', xlabel='Prediction')
for _, spine in g.spines.items():
spine.set_visible(True)
pyplot.xticks(rotation=45)
fig.tight_layout()
fig.savefig(path)
pyplot.close() | [
"sklearn.metrics.accuracy_score",
"sklearn.preprocessing.LabelBinarizer",
"sklearn.metrics.f1_score",
"matplotlib.pyplot.xticks",
"sklearn.metrics.balanced_accuracy_score",
"sklearn.metrics.classification_report",
"sklearn.metrics.average_precision_score",
"seaborn.heatmap",
"sklearn.metrics.roc_auc_score",
"matplotlib.pyplot.close",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"pandas.DataFrame",
"sklearn.utils.multiclass.unique_labels",
"matplotlib.pyplot.subplots",
"sklearn.metrics.confusion_matrix"
] | [((4466, 4560), 'sklearn.metrics.classification_report', 'sklearn.metrics.classification_report', ([], {'y_true': 'labels', 'y_pred': 'predictions', 'output_dict': '(True)'}), '(y_true=labels, y_pred=predictions,\n output_dict=True)\n', (4503, 4560), False, 'import sklearn\n'), ((4637, 4675), 'sklearn.preprocessing.LabelBinarizer', 'sklearn.preprocessing.LabelBinarizer', ([], {}), '()\n', (4673, 4675), False, 'import sklearn\n'), ((4813, 4875), 'sklearn.metrics.roc_auc_score', 'sklearn.metrics.roc_auc_score', (['label', 'predict'], {'average': 'average'}), '(label, predict, average=average)\n', (4842, 4875), False, 'import sklearn\n'), ((4956, 4994), 'sklearn.preprocessing.LabelBinarizer', 'sklearn.preprocessing.LabelBinarizer', ([], {}), '()\n', (4992, 4994), False, 'import sklearn\n'), ((5132, 5204), 'sklearn.metrics.average_precision_score', 'sklearn.metrics.average_precision_score', (['label', 'predict'], {'average': 'average'}), '(label, predict, average=average)\n', (5171, 5204), False, 'import sklearn\n'), ((5504, 5563), 'sklearn.utils.multiclass.unique_labels', 'sklearn.utils.multiclass.unique_labels', (['labels', 'predictions'], {}), '(labels, predictions)\n', (5542, 5563), False, 'import sklearn\n'), ((5586, 5639), 'sklearn.metrics.confusion_matrix', 'sklearn.metrics.confusion_matrix', (['labels', 'predictions'], {}), '(labels, predictions)\n', (5618, 5639), False, 'import sklearn\n'), ((5652, 5704), 'pandas.DataFrame', 'pandas.DataFrame', (['cm'], {'index': 'classes', 'columns': 'classes'}), '(cm, index=classes, columns=classes)\n', (5668, 5704), False, 'import pandas\n'), ((6130, 6163), 'matplotlib.pyplot.subplots', 'pyplot.subplots', ([], {'figsize': '(7, 4.5)'}), '(figsize=(7, 4.5))\n', (6145, 6163), True, 'import matplotlib.pyplot as pyplot\n'), ((6172, 6272), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': 'annot', 'fmt': '""""""', 'cmap': '"""Blues"""', 'cbar': '(False)', 'rasterized': '(True)', 'linewidths': '(0.1)'}), "(cm, annot=annot, fmt='', cmap='Blues', cbar=False, rasterized=\n True, linewidths=0.1)\n", (6183, 6272), True, 'import seaborn as sns\n'), ((6396, 6422), 'matplotlib.pyplot.xticks', 'pyplot.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (6409, 6422), True, 'import matplotlib.pyplot as pyplot\n'), ((6472, 6486), 'matplotlib.pyplot.close', 'pyplot.close', ([], {}), '()\n', (6484, 6486), True, 'import matplotlib.pyplot as pyplot\n'), ((251, 302), 'sklearn.metrics.accuracy_score', 'sklearn.metrics.accuracy_score', (['labels', 'predictions'], {}), '(labels, predictions)\n', (281, 302), False, 'import sklearn\n'), ((333, 393), 'sklearn.metrics.balanced_accuracy_score', 'sklearn.metrics.balanced_accuracy_score', (['labels', 'predictions'], {}), '(labels, predictions)\n', (372, 393), False, 'import sklearn\n'), ((422, 491), 'sklearn.metrics.precision_score', 'sklearn.metrics.precision_score', (['labels', 'predictions'], {'average': '"""micro"""'}), "(labels, predictions, average='micro')\n", (453, 491), False, 'import sklearn\n'), ((520, 589), 'sklearn.metrics.precision_score', 'sklearn.metrics.precision_score', (['labels', 'predictions'], {'average': '"""macro"""'}), "(labels, predictions, average='macro')\n", (551, 589), False, 'import sklearn\n'), ((621, 693), 'sklearn.metrics.precision_score', 'sklearn.metrics.precision_score', (['labels', 'predictions'], {'average': '"""weighted"""'}), "(labels, predictions, average='weighted')\n", (652, 693), False, 'import sklearn\n'), ((719, 785), 'sklearn.metrics.recall_score', 'sklearn.metrics.recall_score', (['labels', 'predictions'], {'average': '"""micro"""'}), "(labels, predictions, average='micro')\n", (747, 785), False, 'import sklearn\n'), ((811, 877), 'sklearn.metrics.recall_score', 'sklearn.metrics.recall_score', (['labels', 'predictions'], {'average': '"""macro"""'}), "(labels, predictions, average='macro')\n", (839, 877), False, 'import sklearn\n'), ((906, 975), 'sklearn.metrics.recall_score', 'sklearn.metrics.recall_score', (['labels', 'predictions'], {'average': '"""weighted"""'}), "(labels, predictions, average='weighted')\n", (934, 975), False, 'import sklearn\n'), ((1003, 1065), 'sklearn.metrics.f1_score', 'sklearn.metrics.f1_score', (['labels', 'predictions'], {'average': '"""micro"""'}), "(labels, predictions, average='micro')\n", (1027, 1065), False, 'import sklearn\n'), ((1093, 1155), 'sklearn.metrics.f1_score', 'sklearn.metrics.f1_score', (['labels', 'predictions'], {'average': '"""macro"""'}), "(labels, predictions, average='macro')\n", (1117, 1155), False, 'import sklearn\n'), ((1186, 1251), 'sklearn.metrics.f1_score', 'sklearn.metrics.f1_score', (['labels', 'predictions'], {'average': '"""weighted"""'}), "(labels, predictions, average='weighted')\n", (1210, 1251), False, 'import sklearn\n')] |
import warnings
from unittest.mock import patch
from django.apps import apps
from django.core import management
from django.core.management.base import CommandError
from django.db import models
from django.db.utils import ProgrammingError
from django.test import TransactionTestCase, tag
from django_pgschemas.checks import check_schema_names
from django_pgschemas.models import TenantMixin
from django_pgschemas.utils import get_tenant_model
TenantModel = get_tenant_model()
def patched_get_tenant_model(*args, **kwargs):
class TenantModel(TenantMixin):
dummy = models.TextField()
class Meta:
app_label = get_tenant_model()._meta.app_label
return TenantModel
@tag("bug")
class MigrationZeroRoundTripTestCase(TransactionTestCase):
"""
Provoke a handled ProgrammingError by migrating models from empty database.
"""
def test_database_checks_with_zero_migrations(self):
management.call_command("migrate", "shared_public", "zero", verbosity=0)
# The goal is that the next line doesn't raise ProgrammingError
check_schema_names(apps.get_app_config("django_pgschemas"))
management.call_command("migrate", verbosity=0)
@tag("bug")
class UnappliedMigrationTestCase(TransactionTestCase):
"""
Provoke a handled ProgrammingError by running tenant command with pending model changes.
"""
@classmethod
def setUpClass(cls):
tenant1 = TenantModel(schema_name="tenant1")
tenant1.save(verbosity=0)
@classmethod
def tearDownClass(cls):
for tenant in TenantModel.objects.all():
tenant.delete(force_drop=True)
@patch("django_pgschemas.management.commands.get_tenant_model", patched_get_tenant_model)
def test_whowill_with_pending_migrations(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore") # Avoid warnings about model being registered twice
with self.assertRaises(CommandError) as ctx:
management.call_command("whowill", all_schemas=True, verbosity=0)
self.assertEqual(
str(ctx.exception),
"Error while attempting to retrieve dynamic schemas. "
"Perhaps you need to migrate the 'public' schema first?",
)
@tag("bug")
class MigrateIgnoringExcludedSchemasTestCase(TransactionTestCase):
@classmethod
def setUpClass(cls):
tenant1 = TenantModel(schema_name="tenant1")
tenant1.save(verbosity=0)
@classmethod
def tearDownClass(cls):
for tenant in TenantModel.objects.all():
tenant.delete(force_drop=True)
def test_migrate_with_exclusions(self):
# We first unapply a migration with fake so we can reapply it without fake
# This should work without errors
management.call_command("migrate", "app_tenants", "0001_initial", fake=True, schemas=["tenant1"], verbosity=0)
# We then migrate on all schemas except for tenant1, THIS IS THE CASE WE WANT TO TEST
# This should work without errors
management.call_command("migrate", all_schemas=True, excluded_schemas=["tenant1"], verbosity=0)
# If we try to global migrate now, we should get a ProgrammingError
with self.assertRaises(ProgrammingError):
management.call_command("migrate", all_schemas=True, verbosity=0)
# We finally apply the migration again with fake
# This should work without errors
management.call_command("migrate", fake=True, all_schemas=True, verbosity=0)
| [
"django.db.models.TextField",
"django.core.management.call_command",
"django_pgschemas.utils.get_tenant_model",
"warnings.catch_warnings",
"django.apps.apps.get_app_config",
"warnings.simplefilter",
"django.test.tag",
"unittest.mock.patch"
] | [((460, 478), 'django_pgschemas.utils.get_tenant_model', 'get_tenant_model', ([], {}), '()\n', (476, 478), False, 'from django_pgschemas.utils import get_tenant_model\n'), ((706, 716), 'django.test.tag', 'tag', (['"""bug"""'], {}), "('bug')\n", (709, 716), False, 'from django.test import TransactionTestCase, tag\n'), ((1210, 1220), 'django.test.tag', 'tag', (['"""bug"""'], {}), "('bug')\n", (1213, 1220), False, 'from django.test import TransactionTestCase, tag\n'), ((2304, 2314), 'django.test.tag', 'tag', (['"""bug"""'], {}), "('bug')\n", (2307, 2314), False, 'from django.test import TransactionTestCase, tag\n'), ((1659, 1751), 'unittest.mock.patch', 'patch', (['"""django_pgschemas.management.commands.get_tenant_model"""', 'patched_get_tenant_model'], {}), "('django_pgschemas.management.commands.get_tenant_model',\n patched_get_tenant_model)\n", (1664, 1751), False, 'from unittest.mock import patch\n'), ((580, 598), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (596, 598), False, 'from django.db import models\n'), ((938, 1010), 'django.core.management.call_command', 'management.call_command', (['"""migrate"""', '"""shared_public"""', '"""zero"""'], {'verbosity': '(0)'}), "('migrate', 'shared_public', 'zero', verbosity=0)\n", (961, 1010), False, 'from django.core import management\n'), ((1159, 1206), 'django.core.management.call_command', 'management.call_command', (['"""migrate"""'], {'verbosity': '(0)'}), "('migrate', verbosity=0)\n", (1182, 1206), False, 'from django.core import management\n'), ((2827, 2941), 'django.core.management.call_command', 'management.call_command', (['"""migrate"""', '"""app_tenants"""', '"""0001_initial"""'], {'fake': '(True)', 'schemas': "['tenant1']", 'verbosity': '(0)'}), "('migrate', 'app_tenants', '0001_initial', fake=True,\n schemas=['tenant1'], verbosity=0)\n", (2850, 2941), False, 'from django.core import management\n'), ((3082, 3182), 'django.core.management.call_command', 'management.call_command', (['"""migrate"""'], {'all_schemas': '(True)', 'excluded_schemas': "['tenant1']", 'verbosity': '(0)'}), "('migrate', all_schemas=True, excluded_schemas=[\n 'tenant1'], verbosity=0)\n", (3105, 3182), False, 'from django.core import management\n'), ((3489, 3565), 'django.core.management.call_command', 'management.call_command', (['"""migrate"""'], {'fake': '(True)', 'all_schemas': '(True)', 'verbosity': '(0)'}), "('migrate', fake=True, all_schemas=True, verbosity=0)\n", (3512, 3565), False, 'from django.core import management\n'), ((1110, 1149), 'django.apps.apps.get_app_config', 'apps.get_app_config', (['"""django_pgschemas"""'], {}), "('django_pgschemas')\n", (1129, 1149), False, 'from django.apps import apps\n'), ((1813, 1838), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (1836, 1838), False, 'import warnings\n'), ((1852, 1883), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1873, 1883), False, 'import warnings\n'), ((3316, 3381), 'django.core.management.call_command', 'management.call_command', (['"""migrate"""'], {'all_schemas': '(True)', 'verbosity': '(0)'}), "('migrate', all_schemas=True, verbosity=0)\n", (3339, 3381), False, 'from django.core import management\n'), ((2010, 2075), 'django.core.management.call_command', 'management.call_command', (['"""whowill"""'], {'all_schemas': '(True)', 'verbosity': '(0)'}), "('whowill', all_schemas=True, verbosity=0)\n", (2033, 2075), False, 'from django.core import management\n'), ((644, 662), 'django_pgschemas.utils.get_tenant_model', 'get_tenant_model', ([], {}), '()\n', (660, 662), False, 'from django_pgschemas.utils import get_tenant_model\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 13 14:47:13 2021
@author: huzongxiang
"""
import tensorflow as tf
from tensorflow.keras import layers
class PartitionPadding(layers.Layer):
def __init__(self, batch_size, **kwargs):
super().__init__(**kwargs)
self.batch_size = batch_size
def call(self, inputs):
features, graph_indices = inputs
# Obtain subgraphs
features = tf.dynamic_partition(
features, graph_indices, self.batch_size
)
# Pad and stack subgraphs
num_features = [tf.shape(f)[0] for f in features]
max_num = tf.reduce_max(num_features)
features_padded = tf.stack(
[
tf.pad(f, [(0, max_num - n), (0, 0)])
for f, n in zip(features, num_features)
],
axis=0,
)
# Remove empty subgraphs (usually for last batch)
nonempty_examples = tf.where(tf.reduce_sum(features_padded, (1, 2)) != 0)
nonempty_examples = tf.squeeze(nonempty_examples, axis=-1)
features_batch = tf.gather(features_padded, nonempty_examples, axis=0)
return features_batch
def get_config(self):
config = super().get_config()
config.update({"batch": self.batch_size})
return config
class PartitionPaddingPair(layers.Layer):
def __init__(self, batch_size, **kwargs):
super().__init__(**kwargs)
self.batch_size = batch_size
def call(self, inputs):
features, graph_indices = inputs
# Obtain subgraphs
features = tf.dynamic_partition(
features, graph_indices, self.batch_size
)
# Pad and stack subgraphs
num_features = [tf.shape(f)[0] for f in features]
max_num = tf.reduce_max(num_features)
features_padded = tf.stack(
[
tf.pad(f, [(0, max_num - n), (0, 0)])
for f, n in zip(features, num_features)
],
axis=0,
)
# Remove empty subgraphs (usually for last batch)
nonempty_examples = tf.unique(graph_indices)[0]
features_batch = tf.gather(features_padded, nonempty_examples, axis=0)
return features_batch
def get_config(self):
config = super().get_config()
config.update({"batch_size": self.batch_size})
return config | [
"tensorflow.unique",
"tensorflow.shape",
"tensorflow.pad",
"tensorflow.reduce_sum",
"tensorflow.reduce_max",
"tensorflow.gather",
"tensorflow.dynamic_partition",
"tensorflow.squeeze"
] | [((428, 490), 'tensorflow.dynamic_partition', 'tf.dynamic_partition', (['features', 'graph_indices', 'self.batch_size'], {}), '(features, graph_indices, self.batch_size)\n', (448, 490), True, 'import tensorflow as tf\n'), ((624, 651), 'tensorflow.reduce_max', 'tf.reduce_max', (['num_features'], {}), '(num_features)\n', (637, 651), True, 'import tensorflow as tf\n'), ((1034, 1072), 'tensorflow.squeeze', 'tf.squeeze', (['nonempty_examples'], {'axis': '(-1)'}), '(nonempty_examples, axis=-1)\n', (1044, 1072), True, 'import tensorflow as tf\n'), ((1100, 1153), 'tensorflow.gather', 'tf.gather', (['features_padded', 'nonempty_examples'], {'axis': '(0)'}), '(features_padded, nonempty_examples, axis=0)\n', (1109, 1153), True, 'import tensorflow as tf\n'), ((1615, 1677), 'tensorflow.dynamic_partition', 'tf.dynamic_partition', (['features', 'graph_indices', 'self.batch_size'], {}), '(features, graph_indices, self.batch_size)\n', (1635, 1677), True, 'import tensorflow as tf\n'), ((1811, 1838), 'tensorflow.reduce_max', 'tf.reduce_max', (['num_features'], {}), '(num_features)\n', (1824, 1838), True, 'import tensorflow as tf\n'), ((2202, 2255), 'tensorflow.gather', 'tf.gather', (['features_padded', 'nonempty_examples'], {'axis': '(0)'}), '(features_padded, nonempty_examples, axis=0)\n', (2211, 2255), True, 'import tensorflow as tf\n'), ((2147, 2171), 'tensorflow.unique', 'tf.unique', (['graph_indices'], {}), '(graph_indices)\n', (2156, 2171), True, 'import tensorflow as tf\n'), ((572, 583), 'tensorflow.shape', 'tf.shape', (['f'], {}), '(f)\n', (580, 583), True, 'import tensorflow as tf\n'), ((718, 755), 'tensorflow.pad', 'tf.pad', (['f', '[(0, max_num - n), (0, 0)]'], {}), '(f, [(0, max_num - n), (0, 0)])\n', (724, 755), True, 'import tensorflow as tf\n'), ((961, 999), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['features_padded', '(1, 2)'], {}), '(features_padded, (1, 2))\n', (974, 999), True, 'import tensorflow as tf\n'), ((1759, 1770), 'tensorflow.shape', 'tf.shape', (['f'], {}), '(f)\n', (1767, 1770), True, 'import tensorflow as tf\n'), ((1905, 1942), 'tensorflow.pad', 'tf.pad', (['f', '[(0, max_num - n), (0, 0)]'], {}), '(f, [(0, max_num - n), (0, 0)])\n', (1911, 1942), True, 'import tensorflow as tf\n')] |
from django.urls import path
from . import views
app_name = 'reservation'
urlpatterns = [
path('', views.reserve_table, name = 'reserve_table'),
] | [
"django.urls.path"
] | [((96, 147), 'django.urls.path', 'path', (['""""""', 'views.reserve_table'], {'name': '"""reserve_table"""'}), "('', views.reserve_table, name='reserve_table')\n", (100, 147), False, 'from django.urls import path\n')] |
#!/usr/bin/env python3
"""
Author : <NAME> <<EMAIL>>
Date : 2021-12-15
Purpose: Working with lists
"""
import argparse
# --------------------------------------------------
def get_args():
"""Get command-line arguments"""
parser = argparse.ArgumentParser(
description="Working with lists",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("items",
type=str,
nargs="+",
metavar="str",
help="item(s) to bring")
parser.add_argument("-s",
"--sorted",
help="a boolean flag",
action="store_true")
return parser.parse_args()
# --------------------------------------------------
def main():
"""The main function: formatting and printing the output"""
args = get_args()
sort_flag = args.sorted
items = args.items
if sort_flag:
items = sorted(items)
if len(items) == 1:
print(f"You are bringing {items[0]}.")
elif len(items) < 3:
items.insert(-1, "and")
print(f"You are bringing {' '.join(items)}.")
else:
# print(items)
last = items[-1]
and_last = "and " + last
items[-1] = and_last
# print(items)
print(f"You are bringing {', '.join(items)}.")
# --------------------------------------------------
if __name__ == "__main__":
main()
| [
"argparse.ArgumentParser"
] | [((244, 362), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Working with lists"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Working with lists', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (267, 362), False, 'import argparse\n')] |
import numpy as np
import cv2
import os
import json
import glob
from PIL import Image, ImageDraw
plate_diameter = 25 #cm
plate_depth = 1.5 #cm
plate_thickness = 0.2 #cm
def Max(x, y):
if (x >= y):
return x
else:
return y
def polygons_to_mask(img_shape, polygons):
mask = np.zeros(img_shape, dtype=np.uint8)
mask = Image.fromarray(mask)
xy = list(map(tuple, polygons))
ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1)
mask = np.array(mask, dtype=bool)
return mask
def mask2box(mask):
index = np.argwhere(mask == 1)
rows = index[:, 0]
clos = index[:, 1]
left_top_r = np.min(rows)
left_top_c = np.min(clos)
right_bottom_r = np.max(rows)
right_bottom_c = np.max(clos)
return [left_top_c, left_top_r, right_bottom_c, right_bottom_r]
def get_bbox(points, h, w):
polygons = points
mask = polygons_to_mask([h,w], polygons)
return mask2box(mask)
def get_scale(points, img, lowest):
bbox = get_bbox(points, img.shape[0], img.shape[1])
diameter = (bbox[2]-bbox[0]+1+bbox[3]-bbox[1]+1)/2
len_per_pix = plate_diameter/float(diameter)
avg = 0
k = 0
for point in points:
avg += img[point[1]][point[0]]
k += 1
avg = avg/float(k)
depth = lowest - avg
depth_per_pix = plate_depth/depth
return len_per_pix, depth_per_pix
def cal_volume(points, img, len_per_pix, depth_per_pix, lowest):
volume = 0.0
bbox = get_bbox(points, img.shape[0], img.shape[1])
points = np.array(points)
shape = points.shape
points = points.reshape(shape[0], 1, shape[1])
for i in range(bbox[0], bbox[2]+1):
for j in range(bbox[1], bbox[3]+1):
if (cv2.pointPolygonTest(points, (i,j), False) >= 0):
volume += Max(0, (lowest - img[j][i]) * depth_per_pix - plate_thickness) * len_per_pix * len_per_pix
return volume
def get_volume(img, json_path):
lowest = np.max(img)
vol_dict = {}
#print(lowest)
len_per_pix = 0.0
depth_per_pix = 0.0
with open(json_path, 'r') as json_file:
data = json.load(json_file)
for shape in data['shapes']:
if (shape['label'] == "plate"):
len_per_pix, depth_per_pix = get_scale(shape['points'], img, lowest)
#print(len_per_pix, depth_per_pix)
break
for shape in data['shapes']:
label = shape['label']
if (label == "plate"):
continue
points = shape['points']
volume = cal_volume(points, img, len_per_pix, depth_per_pix, lowest)
if (label in vol_dict):
vol_dict[label] += volume
else:
vol_dict[label] = volume
return vol_dict
img = cv2.imread("out.png",0)
print(get_volume(img,"test.json")) | [
"PIL.Image.fromarray",
"cv2.pointPolygonTest",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.argwhere",
"PIL.ImageDraw.Draw",
"numpy.min",
"json.load",
"cv2.imread"
] | [((2774, 2798), 'cv2.imread', 'cv2.imread', (['"""out.png"""', '(0)'], {}), "('out.png', 0)\n", (2784, 2798), False, 'import cv2\n'), ((302, 337), 'numpy.zeros', 'np.zeros', (['img_shape'], {'dtype': 'np.uint8'}), '(img_shape, dtype=np.uint8)\n', (310, 337), True, 'import numpy as np\n'), ((349, 370), 'PIL.Image.fromarray', 'Image.fromarray', (['mask'], {}), '(mask)\n', (364, 370), False, 'from PIL import Image, ImageDraw\n'), ((477, 503), 'numpy.array', 'np.array', (['mask'], {'dtype': 'bool'}), '(mask, dtype=bool)\n', (485, 503), True, 'import numpy as np\n'), ((554, 576), 'numpy.argwhere', 'np.argwhere', (['(mask == 1)'], {}), '(mask == 1)\n', (565, 576), True, 'import numpy as np\n'), ((640, 652), 'numpy.min', 'np.min', (['rows'], {}), '(rows)\n', (646, 652), True, 'import numpy as np\n'), ((670, 682), 'numpy.min', 'np.min', (['clos'], {}), '(clos)\n', (676, 682), True, 'import numpy as np\n'), ((704, 716), 'numpy.max', 'np.max', (['rows'], {}), '(rows)\n', (710, 716), True, 'import numpy as np\n'), ((738, 750), 'numpy.max', 'np.max', (['clos'], {}), '(clos)\n', (744, 750), True, 'import numpy as np\n'), ((1520, 1536), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (1528, 1536), True, 'import numpy as np\n'), ((1945, 1956), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (1951, 1956), True, 'import numpy as np\n'), ((2099, 2119), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (2108, 2119), False, 'import json\n'), ((411, 431), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['mask'], {}), '(mask)\n', (425, 431), False, 'from PIL import Image, ImageDraw\n'), ((1713, 1756), 'cv2.pointPolygonTest', 'cv2.pointPolygonTest', (['points', '(i, j)', '(False)'], {}), '(points, (i, j), False)\n', (1733, 1756), False, 'import cv2\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cntk as C
import numpy as np
from .common import floatx, epsilon, image_dim_ordering, image_data_format
from collections import defaultdict
from contextlib import contextmanager
import warnings
C.set_global_option('align_axis', 1)
b_any = any
dev = C.device.use_default_device()
if dev.type() == 0:
warnings.warn(
'CNTK backend warning: GPU is not detected. '
'CNTK\'s CPU version is not fully optimized,'
'please run with GPU to get better performance.')
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
# LEARNING_PHASE_PLACEHOLDER is the placeholder for dynamic learning phase
_LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase')
# static learning phase flag, if it is not 0 or 1, we will go with dynamic learning phase tensor.
_LEARNING_PHASE = -1
_UID_PREFIXES = defaultdict(int)
# cntk doesn't support gradient as symbolic op, to hook up with keras model,
# we will create gradient as a constant placeholder, here use this global
# map to keep the mapping from grad placeholder to parameter
grad_parameter_dict = {}
NAME_SCOPE_STACK = []
@contextmanager
def name_scope(name):
global NAME_SCOPE_STACK
NAME_SCOPE_STACK.append(name)
yield
NAME_SCOPE_STACK.pop()
def get_uid(prefix=''):
_UID_PREFIXES[prefix] += 1
return _UID_PREFIXES[prefix]
def learning_phase():
# If _LEARNING_PHASE is not 0 or 1, return dynamic learning phase tensor
return _LEARNING_PHASE if _LEARNING_PHASE in {0, 1} else _LEARNING_PHASE_PLACEHOLDER
def set_learning_phase(value):
global _LEARNING_PHASE
if value not in {0, 1}:
raise ValueError('CNTK Backend: Set learning phase '
'with value %s is not supported, '
'expected 0 or 1.' % value)
_LEARNING_PHASE = value
def clear_session():
"""Reset learning phase flag for cntk backend.
"""
global _LEARNING_PHASE
global _LEARNING_PHASE_PLACEHOLDER
_LEARNING_PHASE = -1
_LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0)
def in_train_phase(x, alt, training=None):
global _LEARNING_PHASE
if training is None:
training = learning_phase()
uses_learning_phase = True
else:
uses_learning_phase = False
# CNTK currently don't support cond op, so here we use
# element_select approach as workaround. It may have
# perf issue, will resolve it later with cntk cond op.
if callable(x) and isinstance(x, C.cntk_py.Function) is False:
x = x()
if callable(alt) and isinstance(alt, C.cntk_py.Function) is False:
alt = alt()
if training is True:
x._uses_learning_phase = uses_learning_phase
return x
else:
# if _LEARNING_PHASE is static
if isinstance(training, int) or isinstance(training, bool):
result = x if training == 1 or training is True else alt
else:
result = C.element_select(training, x, alt)
result._uses_learning_phase = uses_learning_phase
return result
def in_test_phase(x, alt, training=None):
return in_train_phase(alt, x, training=training)
def _convert_string_dtype(dtype):
# cntk only support float32 and float64
if dtype == 'float32':
return np.float32
elif dtype == 'float64':
return np.float64
else:
# cntk only running with float,
# try to cast to float to run the model
return np.float32
def _convert_dtype_string(dtype):
if dtype == np.float32:
return 'float32'
elif dtype == np.float64:
return 'float64'
else:
raise ValueError('CNTK Backend: Unsupported dtype: %s. '
'CNTK only supports float32 and '
'float64.' % dtype)
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
# Arguments
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
# Returns
A variable instance (with Keras metadata included).
"""
if dtype is None:
dtype = floatx()
if name is None:
name = ''
if isinstance(
value,
C.variables.Constant) or isinstance(
value,
C.variables.Parameter):
value = value.value
# we don't support init parameter with symbolic op, so eval it first as
# workaround
if isinstance(value, C.cntk_py.Function):
value = eval(value)
shape = value.shape if hasattr(value, 'shape') else ()
if hasattr(value, 'dtype') and value.dtype != dtype and len(shape) > 0:
value = value.astype(dtype)
# TODO: remove the conversion when cntk supports int32, int64
# https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter
dtype = 'float32' if 'int' in str(dtype) else dtype
v = C.parameter(shape=shape,
init=value,
dtype=dtype,
name=_prepare_name(name, 'variable'))
v._keras_shape = v.shape
v._uses_learning_phase = False
v.constraint = constraint
return v
def bias_add(x, bias, data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
dims = len(x.shape)
if dims > 0 and x.shape[0] == C.InferredDimension:
dims -= 1
bias_dims = len(bias.shape)
if bias_dims != 1 and bias_dims != dims:
raise ValueError('Unexpected bias dimensions %d, '
'expected 1 or %d dimensions' % (bias_dims, dims))
if dims == 4:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1, 1, 1)
else:
shape = (bias.shape[3],) + bias.shape[:3]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, 1, 1, bias.shape[0])
else:
shape = bias.shape
elif dims == 3:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1, 1)
else:
shape = (bias.shape[2],) + bias.shape[:2]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, 1, bias.shape[0])
else:
shape = bias.shape
elif dims == 2:
if data_format == 'channels_first':
if bias_dims == 1:
shape = (bias.shape[0], 1)
else:
shape = (bias.shape[1],) + bias.shape[:1]
elif data_format == 'channels_last':
if bias_dims == 1:
shape = (1, bias.shape[0])
else:
shape = bias.shape
else:
shape = bias.shape
return x + reshape(bias, shape)
def eval(x):
if isinstance(x, C.cntk_py.Function):
return x.eval()
elif isinstance(x, C.variables.Constant) or isinstance(x, C.variables.Parameter):
return x.value
else:
raise ValueError('CNTK Backend: `eval` method on '
'`%s` type is not supported. '
'CNTK only supports `eval` with '
'`Function`, `Constant` or '
'`Parameter`.' % type(x))
def placeholder(
shape=None,
ndim=None,
dtype=None,
sparse=False,
name=None,
dynamic_axis_num=1):
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = tuple([None for _ in range(ndim)])
dynamic_dimension = C.FreeDimension if _get_cntk_version() >= 2.2 else C.InferredDimension
cntk_shape = [dynamic_dimension if s is None else s for s in shape]
cntk_shape = tuple(cntk_shape)
if dynamic_axis_num > len(cntk_shape):
raise ValueError('CNTK backend: creating placeholder with '
'%d dimension is not supported, at least '
'%d dimensions are needed.'
% (len(cntk_shape, dynamic_axis_num)))
if name is None:
name = ''
cntk_shape = cntk_shape[dynamic_axis_num:]
x = C.input(
shape=cntk_shape,
dtype=_convert_string_dtype(dtype),
is_sparse=sparse,
name=name)
x._keras_shape = shape
x._uses_learning_phase = False
x._cntk_placeholder = True
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
# Arguments
x: A candidate placeholder.
# Returns
Boolean.
"""
return hasattr(x, '_cntk_placeholder') and x._cntk_placeholder
def is_keras_tensor(x):
if not is_tensor(x):
raise ValueError('Unexpectedly found an instance of type `' +
str(type(x)) + '`. '
'Expected a symbolic tensor instance.')
return hasattr(x, '_keras_history')
def is_tensor(x):
return isinstance(x, (C.variables.Constant,
C.variables.Variable,
C.variables.Parameter,
C.ops.functions.Function))
def shape(x):
shape = list(int_shape(x))
num_dynamic = _get_dynamic_axis_num(x)
non_dyn_shape = []
for i in range(len(x.shape)):
if shape[i + num_dynamic] is None:
non_dyn_shape.append(x.shape[i])
else:
non_dyn_shape.append(shape[i + num_dynamic])
return shape[:num_dynamic] + non_dyn_shape
def is_sparse(tensor):
return tensor.is_sparse
def int_shape(x):
if hasattr(x, '_keras_shape'):
return x._keras_shape
shape = x.shape
if hasattr(x, 'dynamic_axes'):
dynamic_shape = [None for a in x.dynamic_axes]
shape = tuple(dynamic_shape) + shape
return shape
def ndim(x):
shape = int_shape(x)
return len(shape)
def _prepare_name(name, default):
prefix = '_'.join(NAME_SCOPE_STACK)
if name is None or name == '':
return prefix + '/' + default
return prefix + '/' + name
def constant(value, dtype=None, shape=None, name=None):
if dtype is None:
dtype = floatx()
if shape is None:
shape = ()
np_value = value * np.ones(shape)
const = C.constant(np_value,
dtype=dtype,
name=_prepare_name(name, 'constant'))
const._keras_shape = const.shape
const._uses_learning_phase = False
return const
def random_binomial(shape, p=0.0, dtype=None, seed=None):
# use numpy workaround now
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e7)
np.random.seed(seed)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
size = 1
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
size *= _
binomial = np.random.binomial(1, p, size).astype(dtype).reshape(shape)
return variable(value=binomial, dtype=dtype)
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
return random_uniform_variable(shape, minval, maxval, dtype, seed)
def random_uniform_variable(shape, low, high,
dtype=None, name=None, seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e3)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
if name is None:
name = ''
scale = (high - low) / 2
p = C.parameter(
shape,
init=C.initializer.uniform(
scale,
seed=seed),
dtype=dtype,
name=name)
return variable(value=p.value + low + scale)
def random_normal_variable(
shape,
mean,
scale,
dtype=None,
name=None,
seed=None):
if dtype is None:
dtype = floatx()
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e7)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
if name is None:
name = ''
return C.parameter(
shape=shape,
init=C.initializer.normal(
scale=scale,
seed=seed),
dtype=dtype,
name=name)
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
if dtype is None:
dtype = floatx()
for _ in shape:
if _ is None:
raise ValueError('CNTK Backend: randomness op with '
'dynamic shape is not supported now. '
'Please provide fixed dimension '
'instead of `None`.')
# how to apply mean and stddev
return random_normal_variable(shape=shape, mean=mean, scale=1.0, seed=seed)
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
if seed is None:
seed = np.random.randint(1, 10e6)
if dtype is None:
dtype = np.float32
else:
dtype = _convert_string_dtype(dtype)
return C.parameter(
shape, init=C.initializer.truncated_normal(
stddev, seed=seed), dtype=dtype)
def dtype(x):
return _convert_dtype_string(x.dtype)
def zeros(shape, dtype=None, name=None):
if dtype is None:
dtype = floatx()
ctype = _convert_string_dtype(dtype)
return variable(value=np.zeros(shape, ctype), dtype=dtype, name=name)
def ones(shape, dtype=None, name=None):
if dtype is None:
dtype = floatx()
ctype = _convert_string_dtype(dtype)
return variable(value=np.ones(shape, ctype), dtype=dtype, name=name)
def eye(size, dtype=None, name=None):
if dtype is None:
dtype = floatx()
return variable(np.eye(size), dtype, name)
def zeros_like(x, dtype=None, name=None):
return x * 0
def ones_like(x, dtype=None, name=None):
return zeros_like(x) + 1
def count_params(x):
for _ in x.shape:
if _ == C.InferredDimension or _ == C.FreeDimension:
raise ValueError('CNTK backend: `count_params` with dynamic '
'shape is not supported. Please provide '
'fixed dimension instead of `None`.')
return np.prod(int_shape(x))
def cast(x, dtype):
# cntk calculate everything in float, so don't need case from bool / int
return x
def dot(x, y):
if len(x.shape) > 2 or len(y.shape) > 2:
y_shape = int_shape(y)
if len(y_shape) > 2:
permutation = [len(y_shape) - 2]
permutation += list(range(len(y_shape) - 2))
permutation += [len(y_shape) - 1]
y = C.transpose(y, perm=permutation)
return C.times(x, y, len(y_shape) - 1)
else:
return C.times(x, y)
def batch_dot(x, y, axes=None):
x_shape = int_shape(x)
y_shape = int_shape(y)
if isinstance(axes, int):
axes = (axes, axes)
if axes is None:
# behaves like tf.batch_matmul as default
axes = [len(x_shape) - 1, len(y_shape) - 2]
if b_any([isinstance(a, (list, tuple)) for a in axes]):
raise ValueError('Multiple target dimensions are not supported. ' +
'Expected: None, int, (int, int), ' +
'Provided: ' + str(axes))
if len(x_shape) == 2 and len(y_shape) == 2:
if axes[0] == axes[1]:
result = sum(x * y, axis=axes[0], keepdims=True)
return result if axes[0] == 1 else transpose(result)
else:
return sum(x * transpose(y), axis=axes[0], keepdims=True)
else:
if len(y_shape) == 2:
y = expand_dims(y)
normalized_axis = []
normalized_axis.append(_normalize_axis(axes[0], x)[0])
normalized_axis.append(_normalize_axis(axes[1], y)[0])
# transpose
i = normalized_axis[0]
while i < len(x.shape) - 1:
x = C.swapaxes(x, i, i + 1)
i += 1
i = normalized_axis[1]
while i > 0:
y = C.swapaxes(y, i, i - 1)
i -= 1
result = C.times(x, y, output_rank=(len(y.shape) - 1)
if len(y.shape) > 1 else 1)
if len(y_shape) == 2:
result = squeeze(result, -1)
return result
def transpose(x):
return C.swapaxes(x, 0, 1)
def gather(reference, indices):
# There is a bug in cntk gather op which may cause crash.
# We have made a fix but not catched in CNTK 2.1 release.
# Will update with gather op in next release
if _get_cntk_version() >= 2.2:
return C.ops.gather(reference, indices)
else:
num_classes = reference.shape[0]
one_hot_matrix = C.ops.one_hot(indices, num_classes)
return C.times(one_hot_matrix, reference, output_rank=len(reference.shape) - 1)
def _remove_dims(x, axis, keepdims=False):
if keepdims is False and isinstance(axis, list):
# sequence axis is removed by default, so don't need reshape on it
reduce_axes = []
for a in axis:
if isinstance(a, C.Axis) is False:
reduce_axes.append(a)
return _reshape_dummy_dim(x, reduce_axes)
else:
if isinstance(axis, list):
has_seq = False
for a in axis:
if isinstance(a, C.Axis):
has_seq = True
break
if has_seq:
nones = _get_dynamic_axis_num(x)
x = expand_dims(x, nones)
return x
def max(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_max')
return _remove_dims(output, axis, keepdims)
def min(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_min')
return _remove_dims(output, axis, keepdims)
def sum(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_sum')
return _remove_dims(output, axis, keepdims)
def prod(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_prod')
return _remove_dims(output, axis, keepdims)
def logsumexp(x, axis=None, keepdims=False):
return log(sum(exp(x), axis=axis, keepdims=keepdims))
def var(x, axis=None, keepdims=False):
m = mean(x, axis, keepdims=True)
devs_squared = C.square(x - m)
return mean(devs_squared, axis=axis, keepdims=keepdims)
def std(x, axis=None, keepdims=False):
return C.sqrt(var(x, axis=axis, keepdims=keepdims))
def expand_dims(x, axis=-1):
shape = list(int_shape(x))
nones = _get_dynamic_axis_num(x)
index = axis if axis >= 0 else len(shape) + 1
shape.insert(index, 1)
new_shape = shape[nones:]
new_shape = tuple(
[C.InferredDimension if _ is None else _ for _ in new_shape])
result = C.reshape(x, new_shape)
if index < nones:
result._keras_shape = shape
return result
def squeeze(x, axis):
if isinstance(axis, tuple):
axis = list(axis)
if not isinstance(axis, list):
axis = [axis]
shape = list(int_shape(x))
_axis = []
for _ in axis:
if isinstance(_, int):
_axis.append(_ if _ >= 0 else _ + len(shape))
if len(_axis) == 0:
return x
nones = _get_dynamic_axis_num(x)
for _ in sorted(_axis, reverse=True):
del shape[_]
new_shape = shape[nones:]
new_shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for _ in new_shape])
return C.reshape(x, new_shape)
def tile(x, n):
if isinstance(n, int):
n = (n,)
elif isinstance(n, list):
n = tuple(n)
shape = int_shape(x)
num_dynamic_axis = _get_dynamic_axis_num(x)
# Padding the axis
if len(n) < len(shape):
n = tuple([1 for _ in range(len(shape) - len(n))]) + n
if len(n) != len(shape):
raise NotImplementedError
i = num_dynamic_axis
for i, rep in enumerate(n):
if i >= num_dynamic_axis and shape[i] is not None:
tmp = [x] * rep
x = C.splice(*tmp, axis=i - num_dynamic_axis)
i += 1
return x
def _normalize_axis(axis, x):
shape = int_shape(x)
ndim = len(shape)
nones = _get_dynamic_axis_num(x)
if nones > ndim:
raise ValueError('CNTK Backend: tensor with keras shape: `%s` has '
'%d cntk dynamic axis, this is not expected, please '
'double check the keras shape history.' % (str(shape), nones))
# Current cntk does not support shape like (1, batch). so using the workaround
# here to mapping the correct axis. Will remove this tricky after we add support
# in native cntk op
cntk_axis = []
dynamic_axis_index = 0
for i in range(ndim):
if shape[i] is None and dynamic_axis_index < nones:
cntk_axis.append(x.dynamic_axes[dynamic_axis_index])
dynamic_axis_index += 1
else:
cntk_axis.append(i - dynamic_axis_index)
if dynamic_axis_index < nones:
i = 0
while dynamic_axis_index < nones:
cntk_axis[i] = x.dynamic_axes[dynamic_axis_index]
i += 1
dynamic_axis_index += 1
while i < len(cntk_axis):
cntk_axis[i] -= nones
i += 1
if isinstance(axis, tuple):
_axis = list(axis)
elif isinstance(axis, int):
_axis = [axis]
elif isinstance(axis, list):
_axis = list(axis)
else:
_axis = axis
if isinstance(_axis, list):
for i, a in enumerate(_axis):
if a is not None and a < 0:
_axis[i] = (a % ndim)
if _axis[i] is not None:
_axis[i] = cntk_axis[_axis[i]]
else:
if _axis is None:
_axis = C.Axis.all_axes()
return _axis
def _reshape_dummy_dim(x, axis):
shape = list(x.shape)
_axis = [_ + len(shape) if _ < 0 else _ for _ in axis]
if shape.count(C.InferredDimension) > 1 or shape.count(C.FreeDimension) > 1:
result = x
for index in sorted(_axis, reverse=True):
result = C.reshape(result,
shape=(),
begin_axis=index,
end_axis=index + 1)
return result
else:
for index in sorted(_axis, reverse=True):
del shape[index]
shape = [C.InferredDimension if _ == C.FreeDimension else _ for _ in shape]
return C.reshape(x, shape)
def mean(x, axis=None, keepdims=False):
axis = _normalize_axis(axis, x)
output = _reduce_on_axis(x, axis, 'reduce_mean')
return _remove_dims(output, axis, keepdims)
def any(x, axis=None, keepdims=False):
reduce_result = sum(x, axis, keepdims=keepdims)
any_matrix = C.element_select(
reduce_result,
ones_like(reduce_result),
zeros_like(reduce_result))
if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0:
return C.reduce_sum(any_matrix)
else:
return any_matrix
def all(x, axis=None, keepdims=False):
reduce_result = prod(x, axis, keepdims=keepdims)
all_matrix = C.element_select(
reduce_result,
ones_like(reduce_result),
zeros_like(reduce_result))
if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0:
return C.reduce_sum(all_matrix)
else:
return all_matrix
def classification_error(target, output, axis=-1):
return C.ops.reduce_mean(
C.equal(
argmax(
output,
axis=-1),
argmax(
target,
axis=-1)),
axis=C.Axis.all_axes())
def argmax(x, axis=-1):
axis = [axis]
axis = _normalize_axis(axis, x)
output = C.ops.argmax(x, axis=axis[0])
return _reshape_dummy_dim(output, axis)
def argmin(x, axis=-1):
axis = [axis]
axis = _normalize_axis(axis, x)
output = C.ops.argmin(x, axis=axis[0])
return _reshape_dummy_dim(output, axis)
def square(x):
return C.square(x)
def abs(x):
return C.abs(x)
def sqrt(x):
return C.sqrt(x)
def exp(x):
return C.exp(x)
def log(x):
return C.log(x)
def round(x):
return C.round(x)
def sigmoid(x):
return C.sigmoid(x)
def sign(x):
return x / C.abs(x)
def pow(x, a):
return C.pow(x, a)
def clip(x, min_value, max_value):
if max_value is not None and max_value < min_value:
max_value = min_value
if max_value is None:
max_value = np.inf
if min_value is None:
min_value = -np.inf
return C.clip(x, min_value, max_value)
def binary_crossentropy(target, output, from_logits=False):
if from_logits:
output = C.sigmoid(output)
output = C.clip(output, epsilon(), 1.0 - epsilon())
output = -target * C.log(output) - (1.0 - target) * C.log(1.0 - output)
return output
def get_variable_shape(x):
return int_shape(x)
def update(x, new_x):
return C.assign(x, new_x)
def moving_average_update(variable, value, momentum):
return C.assign(variable, variable * momentum + value * (1. - momentum))
def update_add(x, increment):
result = x + increment
return C.assign(x, result)
def gradients(loss, variables):
# cntk does not support gradients as symbolic op,
# to hook up with keras model
# we will return a constant as place holder, the cntk learner will apply
# the gradient during training.
global grad_parameter_dict
if isinstance(variables, list) is False:
variables = [variables]
grads = []
for v in variables:
g = C.constant(0, shape=v.shape, name='keras_grad_placeholder')
grads.append(g)
grad_parameter_dict[g] = v
return grads
def equal(x, y):
return C.equal(x, y)
def not_equal(x, y):
return C.not_equal(x, y)
def greater(x, y):
return C.greater(x, y)
def greater_equal(x, y):
return C.greater_equal(x, y)
def less(x, y):
return C.less(x, y)
def less_equal(x, y):
return C.less_equal(x, y)
def maximum(x, y):
return C.element_max(x, y)
def minimum(x, y):
return C.element_min(x, y)
def sin(x):
return C.sin(x)
def cos(x):
return C.cos(x)
def normalize_batch_in_training(x, gamma, beta,
reduction_axes, epsilon=1e-3):
if gamma is None:
if beta is None:
gamma = ones_like(x)
else:
gamma = ones_like(beta)
if beta is None:
if gamma is None:
beta = zeros_like(x)
else:
beta = zeros_like(gamma)
mean, variant = _moments(x, _normalize_axis(reduction_axes, x))
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
normalized = batch_normalization(
x, mean, variant, beta, gamma, epsilon)
else:
# need broadcasting
target_shape = []
x_shape = int_shape(x)
# skip the batch axis
for axis in range(1, ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
if ndim(gamma) > axis:
gamma = C.reduce_mean(gamma, axis - 1)
beta = C.reduce_mean(beta, axis - 1)
else:
target_shape.append(x_shape[axis])
broadcast_mean = C.reshape(mean, target_shape)
broadcast_var = C.reshape(variant, target_shape)
broadcast_gamma = C.reshape(gamma, target_shape)
broadcast_beta = C.reshape(beta, target_shape)
normalized = batch_normalization(
x,
broadcast_mean,
broadcast_var,
broadcast_beta,
broadcast_gamma,
epsilon)
return normalized, mean, variant
def _moments(x, axes=None, shift=None, keep_dims=False):
_axes = tuple(axes)
if shift is None:
shift = x
# Compute true mean while keeping the dims for proper broadcasting.
for axis in _axes:
shift = C.reduce_mean(shift, axis=axis)
shift = C.stop_gradient(shift)
shifted_mean = C.minus(x, shift)
for axis in _axes:
shifted_mean = C.reduce_mean(shifted_mean, axis=axis)
variance_mean = C.square(C.minus(x, shift))
for axis in _axes:
variance_mean = C.reduce_mean(variance_mean, axis=axis)
variance = C.minus(variance_mean, C.square(shifted_mean))
mean = C.plus(shifted_mean, shift)
if not keep_dims:
mean = squeeze(mean, _axes)
variance = squeeze(variance, _axes)
return mean, variance
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
# The mean / var / beta / gamma may be processed by broadcast
# so it may have an extra batch axis with 1, it is not needed
# in cntk, need to remove those dummy axis.
if ndim(mean) == ndim(x) and shape(mean)[0] == 1:
mean = _reshape_dummy_dim(mean, [0])
if ndim(var) == ndim(x) and shape(var)[0] == 1:
var = _reshape_dummy_dim(var, [0])
if gamma is None:
gamma = ones_like(var)
elif ndim(gamma) == ndim(x) and shape(gamma)[0] == 1:
gamma = _reshape_dummy_dim(gamma, [0])
if beta is None:
beta = zeros_like(mean)
elif ndim(beta) == ndim(x) and shape(beta)[0] == 1:
beta = _reshape_dummy_dim(beta, [0])
return (x - mean) / (C.sqrt(var) + epsilon) * gamma + beta
def concatenate(tensors, axis=-1):
if len(tensors) == 0:
return None
axis = [axis]
axis = _normalize_axis(axis, tensors[0])
return C.splice(*tensors, axis=axis[0])
def flatten(x):
return reshape(x, (-1,))
def reshape(x, shape):
shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for _ in shape])
if isinstance(x, C.variables.Parameter):
return C.reshape(x, shape)
else:
num_dynamic_axis = _get_dynamic_axis_num(x)
if num_dynamic_axis == 1 and len(shape) > 0 and shape[0] == -1:
# collapse axis with batch axis
if b_any(_ == C.InferredDimension for _ in x.shape) or b_any(
_ == C.FreeDimension for _ in x.shape):
warnings.warn(
'Warning: CNTK backend does not support '
'collapse of batch axis with inferred dimension. '
'The reshape did not take place.')
return x
return _reshape_batch(x, shape)
else:
# no collapse, then first need to padding the shape
if num_dynamic_axis >= len(shape):
i = 0
while i < len(shape):
if shape[i] is None or shape[i] == -1:
i += 1
else:
break
shape = tuple([-1 for _ in range(num_dynamic_axis - i)]) + shape
new_shape = list(shape)
new_shape = new_shape[num_dynamic_axis:]
new_shape = [C.InferredDimension if _ is None else _ for _ in new_shape]
return C.reshape(x, new_shape)
def permute_dimensions(x, pattern):
dims = len(int_shape(x))
num_dynamic_axis = _get_dynamic_axis_num(x)
if isinstance(pattern, list):
current_layout = [i for i in range(dims)]
else:
current_layout = tuple([i for i in range(dims)])
if num_dynamic_axis > 0 and pattern[:num_dynamic_axis] != current_layout[:num_dynamic_axis]:
raise ValueError('CNTK backend: the permute pattern %s '
'requested permute on dynamic axis, '
'which is not supported. Please do permute '
'on static axis.' % pattern)
axis = list(pattern)
axis = axis[num_dynamic_axis:]
axis = _normalize_axis(axis, x)
return C.transpose(x, axis)
def resize_images(x, height_factor, width_factor, data_format):
if data_format == 'channels_first':
output = repeat_elements(x, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, height_factor, axis=1)
output = repeat_elements(output, width_factor, axis=2)
return output
else:
raise ValueError('CNTK Backend: Invalid data_format:', data_format)
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('CNTK Backend: Invalid data_format:', data_format)
def repeat_elements(x, rep, axis):
axis = _normalize_axis(axis, x)
axis = axis[0]
slices = []
shape = x.shape
i = 0
while i < shape[axis]:
tmp = C.ops.slice(x, axis, i, i + 1)
for _ in range(rep):
slices.append(tmp)
i += 1
return C.splice(*slices, axis=axis)
def repeat(x, n):
# this is a workaround for recurrent layer
# if n is inferred dimension,
# we can't figure out how to repeat it in cntk now
# return the same x to take cntk broadcast feature
# to make the recurrent layer work.
# need to be fixed in GA.
if n is C.InferredDimension or n is C.FreeDimension:
return x
index = 1 - _get_dynamic_axis_num(x)
if index < 0 or index > 1:
raise NotImplementedError
new_shape = list(x.shape)
new_shape.insert(index, 1)
new_shape = tuple(new_shape)
x = C.reshape(x, new_shape)
temp = [x] * n
return C.splice(*temp, axis=index)
def tanh(x):
return C.tanh(x)
def _static_rnn(step_function, inputs, initial_states,
go_backwards=False, mask=None, constants=None,
unroll=False, input_length=None):
shape = int_shape(inputs)
dims = len(shape)
uses_learning_phase = False
if dims < 3:
raise ValueError('Input should be at least 3D.')
# if the second axis is static axis, CNTK will do unroll by default
if shape[1] is None:
raise ValueError('CNTK Backend: the input of static rnn '
'has shape `%s`, the second axis '
'is not static. If you want to run '
'rnn with non-static axis, please try '
'dynamic rnn with sequence axis.' % shape)
if constants is None:
constants = []
if mask is not None:
mask_shape = int_shape(mask)
if len(mask_shape) == dims - 1:
mask = expand_dims(mask)
nones = _get_dynamic_axis_num(inputs)
states = tuple(initial_states)
outputs = []
time_axis = 1 - nones if nones > 0 else 1
if go_backwards:
i = shape[1] - 1
while i >= 0:
current = C.ops.slice(inputs, time_axis, i, i + 1)
# remove dummy dimension
current = squeeze(current, time_axis)
output, new_states = step_function(
current, tuple(states) + tuple(constants))
if getattr(output, '_uses_learning_phase', False):
uses_learning_phase = True
if mask is not None:
mask_slice = C.ops.slice(mask, time_axis, i, i + 1)
mask_slice = squeeze(mask_slice, time_axis)
if len(outputs) == 0:
prev_output = zeros_like(output)
else:
prev_output = outputs[-1]
output = C.ops.element_select(mask_slice, output, prev_output)
return_states = []
for s, n_s in zip(states, new_states):
return_states.append(
C.ops.element_select(
mask_slice, n_s, s))
new_states = return_states
outputs.append(output)
states = new_states
i -= 1
else:
i = 0
while i < shape[1]:
current = C.ops.slice(inputs, time_axis, i, i + 1)
# remove dummy dimension
current = squeeze(current, 1)
output, new_states = step_function(
current, tuple(states) + tuple(constants))
if getattr(output, '_uses_learning_phase', False):
uses_learning_phase = True
if mask is not None:
mask_slice = C.ops.slice(mask, time_axis, i, i + 1)
mask_slice = squeeze(mask_slice, 1)
if len(outputs) == 0:
prev_output = zeros_like(output)
else:
prev_output = outputs[-1]
output = C.ops.element_select(mask_slice, output, prev_output)
return_states = []
for s, n_s in zip(states, new_states):
return_states.append(
C.ops.element_select(
mask_slice, n_s, s))
new_states = return_states
outputs.append(output)
states = new_states[:len(states)]
i += 1
i = 1
# add the time_step axis back
final_output = expand_dims(outputs[0], 1)
last_output = outputs[0]
while i < len(outputs):
# add the time_step axis back
output_slice = expand_dims(outputs[i], 1)
final_output = C.splice(final_output, output_slice, axis=time_axis)
last_output = outputs[i]
i += 1
last_output._uses_learning_phase = uses_learning_phase
return last_output, final_output, states
def rnn(step_function, inputs, initial_states,
go_backwards=False, mask=None, constants=None,
unroll=False, input_length=None):
shape = int_shape(inputs)
dims = len(shape)
global uses_learning_phase
uses_learning_phase = False
if dims < 3:
raise ValueError('CNTK Backend: the input of rnn has only rank %d '
'Need at least rank 3 to run RNN.' % dims)
if _get_dynamic_axis_num(inputs) == 0 or unroll:
return _static_rnn(
step_function,
inputs,
initial_states,
go_backwards,
mask,
constants,
unroll,
input_length)
if constants is None:
constants = []
num_time_step = shape[1]
if num_time_step is None and not has_seq_axis(inputs):
num_time_step = inputs.shape[0]
initial = []
for s in initial_states:
if _get_dynamic_axis_num(s) == 0:
if hasattr(C, 'to_batch'):
initial.append(C.to_batch(s))
else:
initial.append(C.user_function(ConvertToBatch(s)))
else:
initial.append(s)
need_convert = not has_seq_axis(inputs)
if go_backwards and need_convert is False:
raise NotImplementedError('CNTK Backend: `go_backwards` is not supported with '
'variable-length sequences. Please specify a '
'static length for your sequences.')
rnn_inputs = inputs
if need_convert:
if go_backwards:
rnn_inputs = reverse(rnn_inputs, 1)
rnn_inputs = C.to_sequence(rnn_inputs)
rnn_constants = []
for constant in constants:
if isinstance(constant, list):
new_c = []
for c in constant:
if _get_dynamic_axis_num(c) == 1:
new_c.append(C.sequence.broadcast_as(c, rnn_inputs))
else:
new_c.append(c)
rnn_constants.append(new_c)
else:
if _get_dynamic_axis_num(constant) == 1:
rnn_constants.append(C.sequence.broadcast_as(constant, rnn_inputs))
else:
rnn_constants.append(constant)
else:
rnn_constants = constants
if mask is not None and not has_seq_axis(mask):
if go_backwards:
mask = reverse(mask, 1)
if len(int_shape(mask)) == 2:
mask = expand_dims(mask)
mask = C.to_sequence_like(mask, rnn_inputs)
states = tuple(initial)
with C.default_options(axis_offset=1):
def _recurrence(x, states, m):
# create place holder
place_holders = [C.placeholder(dynamic_axes=x.dynamic_axes) for _ in states]
past_values = []
for s, p in zip(states, place_holders):
past_values.append(C.sequence.past_value(p, s))
new_output, new_states = step_function(
x, tuple(past_values) + tuple(rnn_constants))
if getattr(new_output, '_uses_learning_phase', False):
global uses_learning_phase
uses_learning_phase = True
if m is not None:
new_states = [C.element_select(m, n, s) for n, s in zip(new_states, past_values)]
n_s = []
for o, p in zip(new_states, place_holders):
n_s.append(o.replace_placeholders({p: o.output}))
if len(n_s) > 0:
new_output = n_s[0]
return new_output, n_s
final_output, final_states = _recurrence(rnn_inputs, states, mask)
last_output = C.sequence.last(final_output)
last_states = [C.sequence.last(s) for s in final_states]
if need_convert:
final_output = C.sequence.unpack(final_output, 0, no_mask_output=True)
if num_time_step is not None and num_time_step is not C.FreeDimension:
final_output = _reshape_sequence(final_output, num_time_step)
f_stats = []
for l_s, i_s in zip(last_states, initial_states):
if _get_dynamic_axis_num(i_s) == 0 and _get_dynamic_axis_num(l_s) == 1:
if hasattr(C, 'unpack_batch'):
f_stats.append(C.unpack_batch(l_s))
else:
f_stats.append(C.user_function(ConvertToStatic(l_s, batch_size=i_s.shape[0])))
else:
f_stats.append(l_s)
last_output._uses_learning_phase = uses_learning_phase
return last_output, final_output, f_stats
def has_seq_axis(x):
return hasattr(x, 'dynamic_axes') and len(x.dynamic_axes) > 1
def l2_normalize(x, axis=None):
axis = [axis]
axis = _normalize_axis(axis, x)
norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0]))
return x / norm
def hard_sigmoid(x):
x = (0.2 * x) + 0.5
x = C.clip(x, 0.0, 1.0)
return x
def conv1d(x, kernel, strides=1, padding='valid',
data_format=None, dilation_rate=1):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if padding == 'causal':
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel.shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
if data_format == 'channels_last':
x = C.swapaxes(x, 0, 1)
kernel = C.swapaxes(kernel, 0, 2)
padding = _preprocess_border_mode(padding)
strides = [strides]
x = C.convolution(
kernel,
x,
strides=tuple(strides),
auto_padding=[
False,
padding])
if data_format == 'channels_last':
x = C.swapaxes(x, 0, 1)
return x
def conv2d(x, kernel, strides=(1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
kernel = _preprocess_conv2d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
if dilation_rate == (1, 1):
strides = (1,) + strides
x = C.convolution(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding])
else:
assert dilation_rate[0] == dilation_rate[1]
assert strides == (1, 1), 'Invalid strides for dilated convolution'
x = C.convolution(
kernel,
x,
strides=dilation_rate[0],
auto_padding=[
False,
padding,
padding])
return _postprocess_conv2d_output(x, data_format)
def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1,
padding='valid', data_format=None, dilation_rate=1):
raise NotImplementedError
def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1),
padding='valid', data_format=None, dilation_rate=(1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format)
depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)),
(-1, 1) + depthwise_kernel.shape[2:])
pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel, data_format)
padding = _preprocess_border_mode(padding)
if dilation_rate == (1, 1):
strides = (1,) + strides
x = C.convolution(depthwise_kernel, x,
strides=strides,
auto_padding=[False, padding, padding],
groups=x.shape[0])
x = C.convolution(pointwise_kernel, x,
strides=(1, 1, 1),
auto_padding=[False])
else:
if dilation_rate[0] != dilation_rate[1]:
raise ValueError('CNTK Backend: non-square dilation_rate is '
'not supported.')
if strides != (1, 1):
raise ValueError('Invalid strides for dilated convolution')
x = C.convolution(depthwise_kernel, x,
strides=dilation_rate[0],
auto_padding=[False, padding, padding])
x = C.convolution(pointwise_kernel, x,
strides=(1, 1, 1),
auto_padding=[False])
return _postprocess_conv2d_output(x, data_format)
def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format)
depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)),
(-1, 1) + depthwise_kernel.shape[2:])
padding = _preprocess_border_mode(padding)
if dilation_rate == (1, 1):
strides = (1,) + strides
x = C.convolution(depthwise_kernel, x,
strides=strides,
auto_padding=[False, padding, padding],
groups=x.shape[0])
else:
if dilation_rate[0] != dilation_rate[1]:
raise ValueError('CNTK Backend: non-square dilation_rate is '
'not supported.')
if strides != (1, 1):
raise ValueError('Invalid strides for dilated convolution')
x = C.convolution(depthwise_kernel, x,
strides=dilation_rate[0],
auto_padding=[False, padding, padding],
groups=x.shape[0])
return _postprocess_conv2d_output(x, data_format)
def conv3d(x, kernel, strides=(1, 1, 1), padding='valid',
data_format=None, dilation_rate=(1, 1, 1)):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv3d_input(x, data_format)
kernel = _preprocess_conv3d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
strides = strides + (strides[0],)
x = C.convolution(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding,
padding])
return _postprocess_conv3d_output(x, data_format)
def conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1),
padding='valid', data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv3d_input(x, data_format)
kernel = _preprocess_conv3d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
strides = (1,) + strides
# cntk output_shape does not include batch axis
output_shape = output_shape[1:]
# in keras2, need handle output shape in different format
if data_format == 'channels_last':
shape = list(output_shape)
shape[0] = output_shape[3]
shape[1] = output_shape[0]
shape[2] = output_shape[1]
shape[3] = output_shape[2]
output_shape = tuple(shape)
x = C.convolution_transpose(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding,
padding],
output_shape=output_shape)
return _postprocess_conv3d_output(x, data_format)
def pool2d(x, pool_size, strides=(1, 1),
padding='valid', data_format=None,
pool_mode='max'):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
padding = _preprocess_border_mode(padding)
strides = strides
pool_size = pool_size
x = _preprocess_conv2d_input(x, data_format)
if pool_mode == 'max':
x = C.pooling(
x,
C.MAX_POOLING,
pool_size,
strides,
auto_padding=[padding])
elif pool_mode == 'avg':
x = C.pooling(
x,
C.AVG_POOLING,
pool_size,
strides,
auto_padding=[padding])
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
return _postprocess_conv2d_output(x, data_format)
def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid',
data_format=None, pool_mode='max'):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
padding = _preprocess_border_mode(padding)
x = _preprocess_conv3d_input(x, data_format)
if pool_mode == 'max':
x = C.pooling(
x,
C.MAX_POOLING,
pool_size,
strides,
auto_padding=[padding])
elif pool_mode == 'avg':
x = C.pooling(
x,
C.AVG_POOLING,
pool_size,
strides,
auto_padding=[padding])
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
return _postprocess_conv3d_output(x, data_format)
def relu(x, alpha=0., max_value=None):
if alpha != 0.:
negative_part = C.relu(-x)
x = C.relu(x)
if max_value is not None:
x = C.clip(x, 0.0, max_value)
if alpha != 0.:
x -= alpha * negative_part
return x
def dropout(x, level, noise_shape=None, seed=None):
if level < 0. or level >= 1:
raise ValueError('CNTK Backend: Invalid dropout level %s, '
'must be in interval [0, 1].' % level)
return C.dropout(x, level)
def batch_flatten(x):
# cntk's batch axis is not in shape,
# so just flatten all the dim in x.shape
dim = np.prod(x.shape)
x = C.reshape(x, (-1,))
x._keras_shape = (None, dim)
return x
def softmax(x, axis=-1):
return C.softmax(x, axis=axis)
def softplus(x):
return C.softplus(x)
def softsign(x):
return x / (1 + C.abs(x))
def categorical_crossentropy(target, output, from_logits=False):
if from_logits:
result = C.cross_entropy_with_softmax(output, target)
# cntk's result shape is (batch, 1), while keras expect (batch, )
return C.reshape(result, ())
else:
# scale preds so that the class probas of each sample sum to 1
output /= C.reduce_sum(output, axis=-1)
# avoid numerical instability with epsilon clipping
output = C.clip(output, epsilon(), 1.0 - epsilon())
return -sum(target * C.log(output), axis=-1)
def sparse_categorical_crossentropy(target, output, from_logits=False):
target = C.one_hot(target, output.shape[-1])
target = C.reshape(target, output.shape)
return categorical_crossentropy(target, output, from_logits)
class Function(object):
def __init__(self, inputs, outputs, updates=[], **kwargs):
self.placeholders = inputs
self.trainer = None
self.unrelated_updates = None
self.updates = updates
if len(updates) > 0:
assert len(outputs) > 0
self.loss = outputs[0]
# need group update by gradient place holder
u_ops = []
unrelated_updates = []
for update in updates:
if isinstance(update, tuple):
if len(update) != 2:
raise NotImplementedError
else:
u = C.assign(update[0], update[1])
else:
u = update
if len(u.arguments) == 0:
u_ops.append(u)
else:
unrelated_updates.append(u)
update_func = C.combine([u.output for u in u_ops])
grads = update_func.find_all_with_name('keras_grad_placeholder')
u_list = []
p_list = []
for g in grads:
if g in grad_parameter_dict:
p_list.append(grad_parameter_dict[g])
u_list.append(g)
else:
raise ValueError(
'CNTK backend: when constructing trainer, '
'found gradient node `%s` which is not '
'related to any parameters in the model. '
'Please double check how the gradient node '
'is constructed.' % g)
if len(u_list) > 0:
learner = C.cntk_py.universal_learner(p_list, u_list, update_func)
criterion = (
outputs[0],
outputs[1]) if len(outputs) > 1 else (
outputs[0],
)
self.trainer = C.trainer.Trainer(
outputs[0], criterion, [learner])
self.trainer_output = tuple([f.output for f in criterion])
elif len(u_ops) > 0:
unrelated_updates.extend(u_ops)
if len(unrelated_updates) > 0:
self.unrelated_updates = C.combine([_.output for _ in unrelated_updates])
if self.trainer is None:
self.metrics_outputs = [f.output for f in outputs]
self.metrics_func = C.combine(self.metrics_outputs)
# cntk only could handle loss and 1 metric in trainer, for metrics more
# than 2, need manual eval
elif len(outputs) > 2:
self.metrics_outputs = [f.output for f in outputs[2:]]
self.metrics_func = C.combine(self.metrics_outputs)
else:
self.metrics_func = None
@staticmethod
def _is_input_shape_compatible(input, placeholder):
if hasattr(input, 'shape') and hasattr(placeholder, 'shape'):
num_dynamic = get_num_dynamic_axis(placeholder)
input_shape = input.shape[num_dynamic:]
placeholder_shape = placeholder.shape
for i, p in zip(input_shape, placeholder_shape):
if i != p and p != C.InferredDimension and p != C.FreeDimension:
return False
return True
def __call__(self, inputs):
global _LEARNING_PHASE_PLACEHOLDER
global _LEARNING_PHASE
assert isinstance(inputs, (list, tuple))
feed_dict = {}
for tensor, value in zip(self.placeholders, inputs):
# cntk only support calculate on float, do auto cast here
if (hasattr(value, 'dtype') and
value.dtype != np.float32 and
value.dtype != np.float64):
value = value.astype(np.float32)
if tensor == _LEARNING_PHASE_PLACEHOLDER:
_LEARNING_PHASE_PLACEHOLDER.value = np.asarray(value)
else:
# in current version cntk can't support input with variable
# length. Will support it in next release.
if not self._is_input_shape_compatible(value, tensor):
raise ValueError('CNTK backend: The placeholder has been resolved '
'to shape `%s`, but input shape is `%s`. Currently '
'CNTK can not take variable length inputs. Please '
'pass inputs that have a static shape.'
% (str(tensor.shape), str(value.shape)))
feed_dict[tensor] = value
updated = []
if self.trainer is not None:
input_dict = {}
for argument in self.loss.arguments:
if argument in feed_dict:
input_dict[argument] = feed_dict[argument]
else:
raise ValueError(
'CNTK backend: argument %s is not found in inputs. '
'Please double check the model and inputs in '
'`train_function`.' % argument.name)
result = self.trainer.train_minibatch(
input_dict, self.trainer_output)
assert(len(result) == 2)
outputs = result[1]
for o in self.trainer_output:
updated.append(outputs[o])
if self.metrics_func is not None:
input_dict = {}
for argument in self.metrics_func.arguments:
if argument in feed_dict:
input_dict[argument] = feed_dict[argument]
else:
raise ValueError('CNTK backend: metrics argument %s '
'is not found in inputs. Please double '
'check the model and inputs.' % argument.name)
# Some ops (like dropout) won't be applied during "eval" in cntk.
# They only evaluated in training phase. To make it work, call
# "forward" method to let cntk know we want to evaluate them.from
# But the assign ops won't be executed under this mode, that's why
# we need this check.
if (self.unrelated_updates is None and
(_LEARNING_PHASE_PLACEHOLDER.value == 1.0 or _LEARNING_PHASE == 1)):
_, output_values = self.metrics_func.forward(
input_dict,
self.metrics_func.outputs,
(self.metrics_func.outputs[0],),
as_numpy=False)
else:
output_values = self.metrics_func.eval(input_dict, as_numpy=False)
if isinstance(output_values, dict):
for o in self.metrics_outputs:
value = output_values[o]
v = value.asarray()
updated.append(v)
else:
v = output_values.asarray()
for o in self.metrics_outputs:
updated.append(v)
if self.unrelated_updates is not None:
input_dict = {}
for argument in self.unrelated_updates.arguments:
if argument in feed_dict:
input_dict[argument] = feed_dict[argument]
else:
raise ValueError(
'CNTK backend: assign ops argument %s '
'is not found in inputs. Please double '
'check the model and inputs.' % argument.name)
self.unrelated_updates.eval(input_dict, as_numpy=False)
return updated
def function(inputs, outputs, updates=[], **kwargs):
return Function(inputs, outputs, updates=updates, **kwargs)
def temporal_padding(x, padding=(1, 1)):
assert len(padding) == 2
num_dynamic_axis = _get_dynamic_axis_num(x)
base_shape = x.shape
if num_dynamic_axis > 0:
assert len(base_shape) == 2
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[padding, (0, 0)])
else:
x = _padding(x, padding, 0)
else:
assert len(base_shape) == 3
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[(0, 0), padding, (0, 0)])
else:
x = _padding(x, padding, 1)
return x
def _padding(x, pattern, axis):
base_shape = x.shape
if b_any([dim < 0 for dim in base_shape]):
raise ValueError('CNTK Backend: padding input tensor with '
'shape `%s` contains non-specified dimension, '
'which is not supported. Please give fixed '
'dimension to enable padding.' % base_shape)
if pattern[0] > 0:
prefix_shape = list(base_shape)
prefix_shape[axis] = pattern[0]
prefix_shape = tuple(prefix_shape)
x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis)
base_shape = x.shape
if pattern[1] > 0:
postfix_shape = list(base_shape)
postfix_shape[axis] = pattern[1]
postfix_shape = tuple(postfix_shape)
x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis)
return x
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
num_dynamic_axis = _get_dynamic_axis_num(x)
base_shape = x.shape
if data_format == 'channels_first':
if num_dynamic_axis > 0:
assert len(base_shape) == 3
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1])])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
else:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1])])
else:
x = _padding(x, padding[0], 2)
x = _padding(x, padding[1], 3)
else:
if num_dynamic_axis > 0:
assert len(base_shape) == 3
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), [0, 0]])
else:
x = _padding(x, padding[0], 0)
x = _padding(x, padding[1], 1)
else:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), [0, 0]])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
return x
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
num_dynamic_axis = _get_dynamic_axis_num(x)
base_shape = x.shape
if data_format == 'channels_first':
if num_dynamic_axis > 0:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2])])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
x = _padding(x, padding[2], 3)
else:
assert len(base_shape) == 5
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1]), list(padding[2])])
else:
x = _padding(x, padding[0], 2)
x = _padding(x, padding[1], 3)
x = _padding(x, padding[2], 4)
else:
if num_dynamic_axis > 0:
assert len(base_shape) == 4
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]])
else:
x = _padding(x, padding[0], 0)
x = _padding(x, padding[1], 1)
x = _padding(x, padding[2], 2)
else:
assert len(base_shape) == 5
if hasattr(C, 'pad'):
x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]])
else:
x = _padding(x, padding[0], 1)
x = _padding(x, padding[1], 2)
x = _padding(x, padding[2], 3)
return x
def one_hot(indices, num_classes):
return C.one_hot(indices, num_classes)
def get_value(x):
if isinstance(
x,
C.variables.Parameter) or isinstance(
x,
C.variables.Constant):
return x.value
else:
return eval(x)
def batch_get_value(xs):
result = []
for x in xs:
if (isinstance(x, C.variables.Parameter) or
isinstance(x, C.variables.Constant)):
result.append(x.value)
else:
result.append(eval(x))
return result
def set_value(x, value):
if (isinstance(x, C.variables.Parameter) or
isinstance(x, C.variables.Constant)):
if isinstance(value, (float, int)):
value = np.full(x.shape, value, dtype=floatx())
x.value = value
else:
raise NotImplementedError
def print_tensor(x, message=''):
return C.user_function(
LambdaFunc(x,
when=lambda x: True,
execute=lambda x: print(message)))
def batch_set_value(tuples):
for t in tuples:
x = t[0]
value = t[1]
if isinstance(value, np.ndarray) is False:
value = np.asarray(value)
if isinstance(x, C.variables.Parameter):
x.value = value
else:
raise NotImplementedError
def stop_gradient(variables):
if isinstance(variables, (list, tuple)):
return map(C.stop_gradient, variables)
else:
return C.stop_gradient(variables)
def switch(condition, then_expression, else_expression):
ndim_cond = ndim(condition)
ndim_expr = ndim(then_expression)
if ndim_cond > ndim_expr:
raise ValueError('Rank of condition should be less'
' than or equal to rank of then and'
' else expressions. ndim(condition)=' +
str(ndim_cond) + ', ndim(then_expression)'
'=' + str(ndim_expr))
elif ndim_cond < ndim_expr:
shape_expr = int_shape(then_expression)
ndim_diff = ndim_expr - ndim_cond
for i in range(ndim_diff):
condition = expand_dims(condition)
condition = tile(condition, shape_expr[ndim_cond + i])
return C.element_select(condition,
then_expression,
else_expression)
def elu(x, alpha=1.):
res = C.elu(x)
if alpha == 1:
return res
else:
return C.element_select(C.greater(x, 0), res, alpha * res)
def in_top_k(predictions, targets, k):
_targets = C.one_hot(targets, predictions.shape[-1])
result = C.classification_error(predictions, _targets, topN=k)
return 1 - C.reshape(result, shape=())
def conv2d_transpose(x, kernel, output_shape, strides=(1, 1),
padding='valid', data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(x, data_format)
kernel = _preprocess_conv2d_kernel(kernel, data_format)
padding = _preprocess_border_mode(padding)
strides = (1,) + strides
# cntk output_shape does not include batch axis
output_shape = output_shape[1:]
# in keras2, need handle output shape in different format
if data_format == 'channels_last':
shape = list(output_shape)
shape[0] = output_shape[2]
shape[1] = output_shape[0]
shape[2] = output_shape[1]
output_shape = tuple(shape)
x = C.convolution_transpose(
kernel,
x,
strides,
auto_padding=[
False,
padding,
padding],
output_shape=output_shape)
return _postprocess_conv2d_output(x, data_format)
def identity(x, name=None):
if name is None:
name = '%s_alias' % x.name
return C.alias(x, name=name)
def _preprocess_conv2d_input(x, data_format):
if data_format == 'channels_last':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols)
# TF input shape: (samples, rows, cols, input_depth)
x = C.transpose(x, (2, 0, 1))
return x
def _preprocess_conv2d_kernel(kernel, data_format):
# As of Keras 2.0.0, all kernels are normalized
# on the format `(rows, cols, input_depth, depth)`,
# independently of `data_format`.
# CNTK expects `(depth, input_depth, rows, cols)`.
kernel = C.transpose(kernel, (3, 2, 0, 1))
return kernel
def _preprocess_border_mode(padding):
if padding == 'same':
padding = True
elif padding == 'valid':
padding = False
else:
raise ValueError('Invalid border mode: ' + str(padding))
return padding
def _postprocess_conv2d_output(x, data_format):
if data_format == 'channels_last':
x = C.transpose(x, (1, 2, 0))
return x
def _preprocess_conv3d_input(x, data_format):
if data_format == 'channels_last':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
# TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3,
# input_depth)
x = C.transpose(x, (3, 0, 1, 2))
return x
def _preprocess_conv3d_kernel(kernel, dim_ordering):
kernel = C.transpose(kernel, (4, 3, 0, 1, 2))
return kernel
def _postprocess_conv3d_output(x, dim_ordering):
if dim_ordering == 'channels_last':
x = C.transpose(x, (1, 2, 3, 0))
return x
def _get_dynamic_axis_num(x):
if hasattr(x, 'dynamic_axes'):
return len(x.dynamic_axes)
else:
return 0
def _contain_seqence_axis(x):
if _get_dynamic_axis_num(x) > 1:
return x.dynamic_axes[1] == C.Axis.default_dynamic_axis()
else:
return False
def get_num_dynamic_axis(x):
return _get_dynamic_axis_num(x)
def _reduce_on_axis(x, axis, reduce_fun_name):
if isinstance(axis, list):
for a in axis:
if isinstance(a, C.Axis) \
and a != C.Axis.default_batch_axis() \
and hasattr(C.sequence, reduce_fun_name):
x = getattr(C.sequence, reduce_fun_name)(x, a)
else:
x = getattr(C, reduce_fun_name)(x, a)
else:
x = getattr(C, reduce_fun_name)(x, axis)
return x
def _reshape_sequence(x, time_step):
tmp_shape = list(int_shape(x))
tmp_shape[1] = time_step
return reshape(x, tmp_shape)
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
stride = strides[0]
kernel_shape = int_shape(kernel)
output_length, feature_dim, filters = kernel_shape
xs = []
for i in range(output_length):
slice_length = slice(i * stride,
i * stride + kernel_size[0])
xs.append(reshape(inputs[:, slice_length, :],
(-1, 1, feature_dim)))
x_aggregate = concatenate(xs, axis=1)
# transpose kernel to output_filters first, to apply broadcast
weight = permute_dimensions(kernel, (2, 0, 1))
# Shape: (batch, filters, output_length, input_length * kernel_size)
output = x_aggregate * weight
# Shape: (batch, filters, output_length)
output = sum(output, axis=3)
# Shape: (batch, output_length, filters)
return permute_dimensions(output, (0, 2, 1))
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
stride_row, stride_col = strides
output_row, output_col = output_shape
kernel_shape = int_shape(kernel)
_, feature_dim, filters = kernel_shape
xs = []
for i in range(output_row):
for j in range(output_col):
slice_row = slice(i * stride_row,
i * stride_row + kernel_size[0])
slice_col = slice(j * stride_col,
j * stride_col + kernel_size[1])
if data_format == 'channels_first':
xs.append(reshape(inputs[:, :, slice_row, slice_col],
(-1, 1, feature_dim)))
else:
xs.append(reshape(inputs[:, slice_row, slice_col, :],
(-1, 1, feature_dim)))
x_aggregate = concatenate(xs, axis=1)
# transpose kernel to put filters first
weight = permute_dimensions(kernel, (2, 0, 1))
# shape: batch, filters, output_length, input_length * kernel_size
output = x_aggregate * weight
# shape: batch, filters, output_length
output = sum(output, axis=3)
# shape: batch, filters, row, col
output = reshape(output,
(-1, filters, output_row, output_col))
if data_format == 'channels_last':
# shape: batch, row, col, filters
output = permute_dimensions(output, (0, 2, 3, 1))
return output
def reverse(x, axes):
if isinstance(axes, int):
axes = [axes]
cntk_axes = _normalize_axis(axes, x)
begin_index = [0 for _ in cntk_axes]
end_index = [0 for _ in cntk_axes]
strides = [-1 for _ in cntk_axes]
return C.slice(x, cntk_axes, begin_index, end_index, strides)
def _reshape_batch(x, shape):
# there is a bug in cntk 2.1's unpack_batch implementation
if hasattr(C, 'unpack_batch') and _get_cntk_version() >= 2.2:
const_a = C.unpack_batch(x)
const_a = C.reshape(const_a, shape)
return C.to_batch(const_a)
else:
return C.user_function(ReshapeBatch(x, shape[1:]))
def _get_cntk_version():
version = C.__version__
if version.endswith('+'):
version = version[:-1]
# for hot fix, ignore all the . except the first one.
if len(version) > 2 and version[1] == '.':
version = version[:2] + version[2:].replace('.', '')
try:
return float(version)
except:
warnings.warn(
'CNTK backend warning: CNTK version not detected. '
'Will using CNTK 2.0 GA as default.')
return float(2.0)
class ReshapeBatch(C.ops.functions.UserFunction):
def __init__(self, input, shape, name='reshape_with_batch'):
super(ReshapeBatch, self).__init__([input], as_numpy=False, name=name)
self.from_shape = input.shape
self.target_shape = shape
def infer_outputs(self):
batch_axis = C.Axis.default_batch_axis()
return [
C.output_variable(
self.target_shape,
self.inputs[0].dtype,
[batch_axis])]
def forward(self, arguments, device=None, outputs_to_retain=None):
num_element = arguments.shape()[0] * np.prod(np.asarray(self.from_shape))
num_static_element = np.prod(np.asarray(self.target_shape))
num_batch = int(num_element / num_static_element)
result = arguments.data().as_shape((num_batch,) + self.target_shape)
return None, C.cntk_py.Value(result)
def backward(self, state, root_gradients):
grad_array_view = root_gradients.data()
num_element = root_gradients.shape()[0] * np.prod(np.asarray(self.target_shape))
num_static_element = np.prod(np.asarray(self.from_shape))
num_old_batch = int(num_element / num_static_element)
return C.cntk_py.Value(
grad_array_view.as_shape(
(num_old_batch,) + self.from_shape))
class ConvertToBatch(C.ops.functions.UserFunction):
"""Converts input first axis to CNTK batch axis.
We may introduce this operation in CNTK native
implementation later.
# Arguments
inputs: a cntk variable (parameter/constant)
name: name of this node
"""
def __init__(self, input, name='convert_to_batch'):
super(ConvertToBatch, self).__init__([input], as_numpy=False, name=name)
def infer_outputs(self):
batch_axis = C.Axis.default_batch_axis()
return [
C.output_variable(
self.inputs[0].shape[1:],
self.inputs[0].dtype,
[batch_axis])]
def forward(self, arguments, device=None, outputs_to_retain=None):
return None, C.cntk_py.Value(arguments.data())
def backward(self, state, root_gradients):
return C.cntk_py.Value(root_gradients.data())
class ConvertToStatic(C.ops.functions.UserFunction):
"""Converts input first axis to CNTK static axis.
We may introduce this operation in CNTK native
implementation later.
# Arguments
inputs: a cntk tensor which has batch axis
batch_size: size of batch axis.
name: name of this node.
"""
def __init__(self, input, batch_size, name='convert_to_static'):
super(ConvertToStatic, self).__init__([input], as_numpy=False, name=name)
self.target_shape = (batch_size,) + input.shape
def infer_outputs(self):
return [
C.output_variable(
self.target_shape,
self.inputs[0].dtype,
[])]
def forward(self, arguments, device=None, outputs_to_retain=None):
return None, C.cntk_py.Value(arguments.data())
def backward(self, state, root_gradients):
return C.cntk_py.Value(root_gradients.data())
class LambdaFunc(C.ops.functions.UserFunction):
def __init__(self,
arg,
when=lambda arg: True,
execute=lambda arg: print(arg),
name=''):
self.when = when
self.execute = execute
super(LambdaFunc, self).__init__([arg], name=name)
def infer_outputs(self):
return [
C.output_variable(
self.inputs[0].shape,
self.inputs[0].dtype,
self.inputs[0].dynamic_axes)]
def forward(self, argument, device=None, outputs_to_retain=None):
if self.when(argument):
self.execute(argument)
return None, argument
def backward(self, state, root_gradients):
return root_gradients
| [
"cntk.cntk_py.Value",
"cntk.constant",
"cntk.element_select",
"numpy.random.binomial",
"cntk.assign",
"cntk.swapaxes",
"cntk.relu",
"cntk.pooling",
"cntk.initializer.normal",
"cntk.log",
"numpy.random.seed",
"warnings.warn",
"cntk.less_equal",
"cntk.sin",
"cntk.one_hot",
"cntk.sequence.broadcast_as",
"cntk.times",
"cntk.minus",
"cntk.round",
"cntk.ops.argmin",
"numpy.random.randint",
"cntk.dropout",
"cntk.splice",
"cntk.classification_error",
"cntk.ops.slice",
"cntk.placeholder",
"cntk.sqrt",
"cntk.Axis.all_axes",
"cntk.unpack_batch",
"numpy.ones",
"cntk.softmax",
"cntk.abs",
"cntk.transpose",
"cntk.alias",
"cntk.convolution_transpose",
"cntk.sequence.last",
"cntk.sequence.past_value",
"cntk.equal",
"cntk.elu",
"cntk.to_sequence",
"cntk.ops.one_hot",
"cntk.less",
"cntk.softplus",
"cntk.plus",
"numpy.prod",
"cntk.set_global_option",
"cntk.Axis.default_dynamic_axis",
"cntk.greater",
"cntk.cntk_py.universal_learner",
"cntk.output_variable",
"cntk.cross_entropy_with_softmax",
"cntk.to_sequence_like",
"cntk.reshape",
"cntk.clip",
"cntk.element_min",
"cntk.ops.element_select",
"cntk.stop_gradient",
"cntk.tanh",
"cntk.combine",
"cntk.pow",
"cntk.slice",
"cntk.cos",
"cntk.to_batch",
"cntk.exp",
"cntk.square",
"cntk.element_max",
"cntk.sequence.unpack",
"cntk.ops.gather",
"cntk.sigmoid",
"cntk.ops.argmax",
"numpy.zeros",
"cntk.device.use_default_device",
"cntk.default_options",
"cntk.greater_equal",
"cntk.convolution",
"numpy.asarray",
"numpy.eye",
"cntk.initializer.uniform",
"cntk.initializer.truncated_normal",
"cntk.reduce_mean",
"cntk.Axis.default_batch_axis",
"cntk.trainer.Trainer",
"cntk.not_equal",
"cntk.reduce_sum",
"cntk.pad",
"collections.defaultdict"
] | [((313, 349), 'cntk.set_global_option', 'C.set_global_option', (['"""align_axis"""', '(1)'], {}), "('align_axis', 1)\n", (332, 349), True, 'import cntk as C\n'), ((371, 400), 'cntk.device.use_default_device', 'C.device.use_default_device', ([], {}), '()\n', (398, 400), True, 'import cntk as C\n'), ((854, 933), 'cntk.constant', 'C.constant', ([], {'shape': '()', 'dtype': 'np.float32', 'value': '(1.0)', 'name': '"""_keras_learning_phase"""'}), "(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase')\n", (864, 933), True, 'import cntk as C\n'), ((1069, 1085), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1080, 1085), False, 'from collections import defaultdict\n'), ((425, 583), 'warnings.warn', 'warnings.warn', (['"""CNTK backend warning: GPU is not detected. CNTK\'s CPU version is not fully optimized,please run with GPU to get better performance."""'], {}), '(\n "CNTK backend warning: GPU is not detected. CNTK\'s CPU version is not fully optimized,please run with GPU to get better performance."\n )\n', (438, 583), False, 'import warnings\n'), ((2269, 2284), 'numpy.asarray', 'np.asarray', (['(1.0)'], {}), '(1.0)\n', (2279, 2284), True, 'import numpy as np\n'), ((17501, 17520), 'cntk.swapaxes', 'C.swapaxes', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (17511, 17520), True, 'import cntk as C\n'), ((19618, 19633), 'cntk.square', 'C.square', (['(x - m)'], {}), '(x - m)\n', (19626, 19633), True, 'import cntk as C\n'), ((20103, 20126), 'cntk.reshape', 'C.reshape', (['x', 'new_shape'], {}), '(x, new_shape)\n', (20112, 20126), True, 'import cntk as C\n'), ((20778, 20801), 'cntk.reshape', 'C.reshape', (['x', 'new_shape'], {}), '(x, new_shape)\n', (20787, 20801), True, 'import cntk as C\n'), ((25068, 25097), 'cntk.ops.argmax', 'C.ops.argmax', (['x'], {'axis': 'axis[0]'}), '(x, axis=axis[0])\n', (25080, 25097), True, 'import cntk as C\n'), ((25235, 25264), 'cntk.ops.argmin', 'C.ops.argmin', (['x'], {'axis': 'axis[0]'}), '(x, axis=axis[0])\n', (25247, 25264), True, 'import cntk as C\n'), ((25337, 25348), 'cntk.square', 'C.square', (['x'], {}), '(x)\n', (25345, 25348), True, 'import cntk as C\n'), ((25374, 25382), 'cntk.abs', 'C.abs', (['x'], {}), '(x)\n', (25379, 25382), True, 'import cntk as C\n'), ((25409, 25418), 'cntk.sqrt', 'C.sqrt', (['x'], {}), '(x)\n', (25415, 25418), True, 'import cntk as C\n'), ((25444, 25452), 'cntk.exp', 'C.exp', (['x'], {}), '(x)\n', (25449, 25452), True, 'import cntk as C\n'), ((25478, 25486), 'cntk.log', 'C.log', (['x'], {}), '(x)\n', (25483, 25486), True, 'import cntk as C\n'), ((25514, 25524), 'cntk.round', 'C.round', (['x'], {}), '(x)\n', (25521, 25524), True, 'import cntk as C\n'), ((25554, 25566), 'cntk.sigmoid', 'C.sigmoid', (['x'], {}), '(x)\n', (25563, 25566), True, 'import cntk as C\n'), ((25634, 25645), 'cntk.pow', 'C.pow', (['x', 'a'], {}), '(x, a)\n', (25639, 25645), True, 'import cntk as C\n'), ((25887, 25918), 'cntk.clip', 'C.clip', (['x', 'min_value', 'max_value'], {}), '(x, min_value, max_value)\n', (25893, 25918), True, 'import cntk as C\n'), ((26274, 26292), 'cntk.assign', 'C.assign', (['x', 'new_x'], {}), '(x, new_x)\n', (26282, 26292), True, 'import cntk as C\n'), ((26360, 26426), 'cntk.assign', 'C.assign', (['variable', '(variable * momentum + value * (1.0 - momentum))'], {}), '(variable, variable * momentum + value * (1.0 - momentum))\n', (26368, 26426), True, 'import cntk as C\n'), ((26496, 26515), 'cntk.assign', 'C.assign', (['x', 'result'], {}), '(x, result)\n', (26504, 26515), True, 'import cntk as C\n'), ((27076, 27089), 'cntk.equal', 'C.equal', (['x', 'y'], {}), '(x, y)\n', (27083, 27089), True, 'import cntk as C\n'), ((27124, 27141), 'cntk.not_equal', 'C.not_equal', (['x', 'y'], {}), '(x, y)\n', (27135, 27141), True, 'import cntk as C\n'), ((27174, 27189), 'cntk.greater', 'C.greater', (['x', 'y'], {}), '(x, y)\n', (27183, 27189), True, 'import cntk as C\n'), ((27228, 27249), 'cntk.greater_equal', 'C.greater_equal', (['x', 'y'], {}), '(x, y)\n', (27243, 27249), True, 'import cntk as C\n'), ((27279, 27291), 'cntk.less', 'C.less', (['x', 'y'], {}), '(x, y)\n', (27285, 27291), True, 'import cntk as C\n'), ((27327, 27345), 'cntk.less_equal', 'C.less_equal', (['x', 'y'], {}), '(x, y)\n', (27339, 27345), True, 'import cntk as C\n'), ((27378, 27397), 'cntk.element_max', 'C.element_max', (['x', 'y'], {}), '(x, y)\n', (27391, 27397), True, 'import cntk as C\n'), ((27430, 27449), 'cntk.element_min', 'C.element_min', (['x', 'y'], {}), '(x, y)\n', (27443, 27449), True, 'import cntk as C\n'), ((27475, 27483), 'cntk.sin', 'C.sin', (['x'], {}), '(x)\n', (27480, 27483), True, 'import cntk as C\n'), ((27509, 27517), 'cntk.cos', 'C.cos', (['x'], {}), '(x)\n', (27514, 27517), True, 'import cntk as C\n'), ((29326, 29348), 'cntk.stop_gradient', 'C.stop_gradient', (['shift'], {}), '(shift)\n', (29341, 29348), True, 'import cntk as C\n'), ((29368, 29385), 'cntk.minus', 'C.minus', (['x', 'shift'], {}), '(x, shift)\n', (29375, 29385), True, 'import cntk as C\n'), ((29681, 29708), 'cntk.plus', 'C.plus', (['shifted_mean', 'shift'], {}), '(shifted_mean, shift)\n', (29687, 29708), True, 'import cntk as C\n'), ((30817, 30849), 'cntk.splice', 'C.splice', (['*tensors'], {'axis': 'axis[0]'}), '(*tensors, axis=axis[0])\n', (30825, 30849), True, 'import cntk as C\n'), ((33044, 33064), 'cntk.transpose', 'C.transpose', (['x', 'axis'], {}), '(x, axis)\n', (33055, 33064), True, 'import cntk as C\n'), ((34544, 34572), 'cntk.splice', 'C.splice', (['*slices'], {'axis': 'axis'}), '(*slices, axis=axis)\n', (34552, 34572), True, 'import cntk as C\n'), ((35137, 35160), 'cntk.reshape', 'C.reshape', (['x', 'new_shape'], {}), '(x, new_shape)\n', (35146, 35160), True, 'import cntk as C\n'), ((35191, 35218), 'cntk.splice', 'C.splice', (['*temp'], {'axis': 'index'}), '(*temp, axis=index)\n', (35199, 35218), True, 'import cntk as C\n'), ((35245, 35254), 'cntk.tanh', 'C.tanh', (['x'], {}), '(x)\n', (35251, 35254), True, 'import cntk as C\n'), ((44060, 44079), 'cntk.clip', 'C.clip', (['x', '(0.0)', '(1.0)'], {}), '(x, 0.0, 1.0)\n', (44066, 44079), True, 'import cntk as C\n'), ((50105, 50191), 'cntk.convolution', 'C.convolution', (['kernel', 'x', 'strides'], {'auto_padding': '[False, padding, padding, padding]'}), '(kernel, x, strides, auto_padding=[False, padding, padding,\n padding])\n', (50118, 50191), True, 'import cntk as C\n'), ((51244, 51367), 'cntk.convolution_transpose', 'C.convolution_transpose', (['kernel', 'x', 'strides'], {'auto_padding': '[False, padding, padding, padding]', 'output_shape': 'output_shape'}), '(kernel, x, strides, auto_padding=[False, padding,\n padding, padding], output_shape=output_shape)\n', (51267, 51367), True, 'import cntk as C\n'), ((53443, 53452), 'cntk.relu', 'C.relu', (['x'], {}), '(x)\n', (53449, 53452), True, 'import cntk as C\n'), ((53819, 53838), 'cntk.dropout', 'C.dropout', (['x', 'level'], {}), '(x, level)\n', (53828, 53838), True, 'import cntk as C\n'), ((53959, 53975), 'numpy.prod', 'np.prod', (['x.shape'], {}), '(x.shape)\n', (53966, 53975), True, 'import numpy as np\n'), ((53984, 54003), 'cntk.reshape', 'C.reshape', (['x', '(-1,)'], {}), '(x, (-1,))\n', (53993, 54003), True, 'import cntk as C\n'), ((54088, 54111), 'cntk.softmax', 'C.softmax', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (54097, 54111), True, 'import cntk as C\n'), ((54142, 54155), 'cntk.softplus', 'C.softplus', (['x'], {}), '(x)\n', (54152, 54155), True, 'import cntk as C\n'), ((54854, 54889), 'cntk.one_hot', 'C.one_hot', (['target', 'output.shape[-1]'], {}), '(target, output.shape[-1])\n', (54863, 54889), True, 'import cntk as C\n'), ((54903, 54934), 'cntk.reshape', 'C.reshape', (['target', 'output.shape'], {}), '(target, output.shape)\n', (54912, 54934), True, 'import cntk as C\n'), ((67909, 67940), 'cntk.one_hot', 'C.one_hot', (['indices', 'num_classes'], {}), '(indices, num_classes)\n', (67918, 67940), True, 'import cntk as C\n'), ((70112, 70173), 'cntk.element_select', 'C.element_select', (['condition', 'then_expression', 'else_expression'], {}), '(condition, then_expression, else_expression)\n', (70128, 70173), True, 'import cntk as C\n'), ((70264, 70272), 'cntk.elu', 'C.elu', (['x'], {}), '(x)\n', (70269, 70272), True, 'import cntk as C\n'), ((70444, 70485), 'cntk.one_hot', 'C.one_hot', (['targets', 'predictions.shape[-1]'], {}), '(targets, predictions.shape[-1])\n', (70453, 70485), True, 'import cntk as C\n'), ((70499, 70552), 'cntk.classification_error', 'C.classification_error', (['predictions', '_targets'], {'topN': 'k'}), '(predictions, _targets, topN=k)\n', (70521, 70552), True, 'import cntk as C\n'), ((71478, 71592), 'cntk.convolution_transpose', 'C.convolution_transpose', (['kernel', 'x', 'strides'], {'auto_padding': '[False, padding, padding]', 'output_shape': 'output_shape'}), '(kernel, x, strides, auto_padding=[False, padding,\n padding], output_shape=output_shape)\n', (71501, 71592), True, 'import cntk as C\n'), ((71818, 71839), 'cntk.alias', 'C.alias', (['x'], {'name': 'name'}), '(x, name=name)\n', (71825, 71839), True, 'import cntk as C\n'), ((72461, 72494), 'cntk.transpose', 'C.transpose', (['kernel', '(3, 2, 0, 1)'], {}), '(kernel, (3, 2, 0, 1))\n', (72472, 72494), True, 'import cntk as C\n'), ((73365, 73401), 'cntk.transpose', 'C.transpose', (['kernel', '(4, 3, 0, 1, 2)'], {}), '(kernel, (4, 3, 0, 1, 2))\n', (73376, 73401), True, 'import cntk as C\n'), ((77621, 77675), 'cntk.slice', 'C.slice', (['x', 'cntk_axes', 'begin_index', 'end_index', 'strides'], {}), '(x, cntk_axes, begin_index, end_index, strides)\n', (77628, 77675), True, 'import cntk as C\n'), ((10726, 10740), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (10733, 10740), True, 'import numpy as np\n'), ((11156, 11186), 'numpy.random.randint', 'np.random.randint', (['(100000000.0)'], {}), '(100000000.0)\n', (11173, 11186), True, 'import numpy as np\n'), ((11188, 11208), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (11202, 11208), True, 'import numpy as np\n'), ((12455, 12481), 'numpy.random.randint', 'np.random.randint', (['(10000.0)'], {}), '(10000.0)\n', (12472, 12481), True, 'import numpy as np\n'), ((13139, 13169), 'numpy.random.randint', 'np.random.randint', (['(100000000.0)'], {}), '(100000000.0)\n', (13156, 13169), True, 'import numpy as np\n'), ((14113, 14145), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10000000.0)'], {}), '(1, 10000000.0)\n', (14130, 14145), True, 'import numpy as np\n'), ((14939, 14951), 'numpy.eye', 'np.eye', (['size'], {}), '(size)\n', (14945, 14951), True, 'import numpy as np\n'), ((15954, 15967), 'cntk.times', 'C.times', (['x', 'y'], {}), '(x, y)\n', (15961, 15967), True, 'import cntk as C\n'), ((17778, 17810), 'cntk.ops.gather', 'C.ops.gather', (['reference', 'indices'], {}), '(reference, indices)\n', (17790, 17810), True, 'import cntk as C\n'), ((17887, 17922), 'cntk.ops.one_hot', 'C.ops.one_hot', (['indices', 'num_classes'], {}), '(indices, num_classes)\n', (17900, 17922), True, 'import cntk as C\n'), ((23765, 23784), 'cntk.reshape', 'C.reshape', (['x', 'shape'], {}), '(x, shape)\n', (23774, 23784), True, 'import cntk as C\n'), ((24272, 24296), 'cntk.reduce_sum', 'C.reduce_sum', (['any_matrix'], {}), '(any_matrix)\n', (24284, 24296), True, 'import cntk as C\n'), ((24641, 24665), 'cntk.reduce_sum', 'C.reduce_sum', (['all_matrix'], {}), '(all_matrix)\n', (24653, 24665), True, 'import cntk as C\n'), ((25597, 25605), 'cntk.abs', 'C.abs', (['x'], {}), '(x)\n', (25602, 25605), True, 'import cntk as C\n'), ((26018, 26035), 'cntk.sigmoid', 'C.sigmoid', (['output'], {}), '(output)\n', (26027, 26035), True, 'import cntk as C\n'), ((26910, 26969), 'cntk.constant', 'C.constant', (['(0)'], {'shape': 'v.shape', 'name': '"""keras_grad_placeholder"""'}), "(0, shape=v.shape, name='keras_grad_placeholder')\n", (26920, 26969), True, 'import cntk as C\n'), ((28608, 28637), 'cntk.reshape', 'C.reshape', (['mean', 'target_shape'], {}), '(mean, target_shape)\n', (28617, 28637), True, 'import cntk as C\n'), ((28662, 28694), 'cntk.reshape', 'C.reshape', (['variant', 'target_shape'], {}), '(variant, target_shape)\n', (28671, 28694), True, 'import cntk as C\n'), ((28721, 28751), 'cntk.reshape', 'C.reshape', (['gamma', 'target_shape'], {}), '(gamma, target_shape)\n', (28730, 28751), True, 'import cntk as C\n'), ((28777, 28806), 'cntk.reshape', 'C.reshape', (['beta', 'target_shape'], {}), '(beta, target_shape)\n', (28786, 28806), True, 'import cntk as C\n'), ((29432, 29470), 'cntk.reduce_mean', 'C.reduce_mean', (['shifted_mean'], {'axis': 'axis'}), '(shifted_mean, axis=axis)\n', (29445, 29470), True, 'import cntk as C\n'), ((29501, 29518), 'cntk.minus', 'C.minus', (['x', 'shift'], {}), '(x, shift)\n', (29508, 29518), True, 'import cntk as C\n'), ((29567, 29606), 'cntk.reduce_mean', 'C.reduce_mean', (['variance_mean'], {'axis': 'axis'}), '(variance_mean, axis=axis)\n', (29580, 29606), True, 'import cntk as C\n'), ((29646, 29668), 'cntk.square', 'C.square', (['shifted_mean'], {}), '(shifted_mean)\n', (29654, 29668), True, 'import cntk as C\n'), ((31069, 31088), 'cntk.reshape', 'C.reshape', (['x', 'shape'], {}), '(x, shape)\n', (31078, 31088), True, 'import cntk as C\n'), ((34427, 34457), 'cntk.ops.slice', 'C.ops.slice', (['x', 'axis', 'i', '(i + 1)'], {}), '(x, axis, i, i + 1)\n', (34438, 34457), True, 'import cntk as C\n'), ((38958, 39010), 'cntk.splice', 'C.splice', (['final_output', 'output_slice'], {'axis': 'time_axis'}), '(final_output, output_slice, axis=time_axis)\n', (38966, 39010), True, 'import cntk as C\n'), ((40814, 40839), 'cntk.to_sequence', 'C.to_sequence', (['rnn_inputs'], {}), '(rnn_inputs)\n', (40827, 40839), True, 'import cntk as C\n'), ((41733, 41769), 'cntk.to_sequence_like', 'C.to_sequence_like', (['mask', 'rnn_inputs'], {}), '(mask, rnn_inputs)\n', (41751, 41769), True, 'import cntk as C\n'), ((41809, 41841), 'cntk.default_options', 'C.default_options', ([], {'axis_offset': '(1)'}), '(axis_offset=1)\n', (41826, 41841), True, 'import cntk as C\n'), ((42888, 42917), 'cntk.sequence.last', 'C.sequence.last', (['final_output'], {}), '(final_output)\n', (42903, 42917), True, 'import cntk as C\n'), ((43028, 43083), 'cntk.sequence.unpack', 'C.sequence.unpack', (['final_output', '(0)'], {'no_mask_output': '(True)'}), '(final_output, 0, no_mask_output=True)\n', (43045, 43083), True, 'import cntk as C\n'), ((44644, 44663), 'cntk.swapaxes', 'C.swapaxes', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (44654, 44663), True, 'import cntk as C\n'), ((44681, 44705), 'cntk.swapaxes', 'C.swapaxes', (['kernel', '(0)', '(2)'], {}), '(kernel, 0, 2)\n', (44691, 44705), True, 'import cntk as C\n'), ((44976, 44995), 'cntk.swapaxes', 'C.swapaxes', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (44986, 44995), True, 'import cntk as C\n'), ((45553, 45626), 'cntk.convolution', 'C.convolution', (['kernel', 'x', 'strides'], {'auto_padding': '[False, padding, padding]'}), '(kernel, x, strides, auto_padding=[False, padding, padding])\n', (45566, 45626), True, 'import cntk as C\n'), ((45875, 45969), 'cntk.convolution', 'C.convolution', (['kernel', 'x'], {'strides': 'dilation_rate[0]', 'auto_padding': '[False, padding, padding]'}), '(kernel, x, strides=dilation_rate[0], auto_padding=[False,\n padding, padding])\n', (45888, 45969), True, 'import cntk as C\n'), ((46816, 46859), 'cntk.transpose', 'C.transpose', (['depthwise_kernel', '(1, 0, 2, 3)'], {}), '(depthwise_kernel, (1, 0, 2, 3))\n', (46827, 46859), True, 'import cntk as C\n'), ((47137, 47251), 'cntk.convolution', 'C.convolution', (['depthwise_kernel', 'x'], {'strides': 'strides', 'auto_padding': '[False, padding, padding]', 'groups': 'x.shape[0]'}), '(depthwise_kernel, x, strides=strides, auto_padding=[False,\n padding, padding], groups=x.shape[0])\n', (47150, 47251), True, 'import cntk as C\n'), ((47338, 47413), 'cntk.convolution', 'C.convolution', (['pointwise_kernel', 'x'], {'strides': '(1, 1, 1)', 'auto_padding': '[False]'}), '(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False])\n', (47351, 47413), True, 'import cntk as C\n'), ((47760, 47865), 'cntk.convolution', 'C.convolution', (['depthwise_kernel', 'x'], {'strides': 'dilation_rate[0]', 'auto_padding': '[False, padding, padding]'}), '(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[\n False, padding, padding])\n', (47773, 47865), True, 'import cntk as C\n'), ((47925, 48000), 'cntk.convolution', 'C.convolution', (['pointwise_kernel', 'x'], {'strides': '(1, 1, 1)', 'auto_padding': '[False]'}), '(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False])\n', (47938, 48000), True, 'import cntk as C\n'), ((48610, 48653), 'cntk.transpose', 'C.transpose', (['depthwise_kernel', '(1, 0, 2, 3)'], {}), '(depthwise_kernel, (1, 0, 2, 3))\n', (48621, 48653), True, 'import cntk as C\n'), ((48850, 48964), 'cntk.convolution', 'C.convolution', (['depthwise_kernel', 'x'], {'strides': 'strides', 'auto_padding': '[False, padding, padding]', 'groups': 'x.shape[0]'}), '(depthwise_kernel, x, strides=strides, auto_padding=[False,\n padding, padding], groups=x.shape[0])\n', (48863, 48964), True, 'import cntk as C\n'), ((49333, 49457), 'cntk.convolution', 'C.convolution', (['depthwise_kernel', 'x'], {'strides': 'dilation_rate[0]', 'auto_padding': '[False, padding, padding]', 'groups': 'x.shape[0]'}), '(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[\n False, padding, padding], groups=x.shape[0])\n', (49346, 49457), True, 'import cntk as C\n'), ((52011, 52082), 'cntk.pooling', 'C.pooling', (['x', 'C.MAX_POOLING', 'pool_size', 'strides'], {'auto_padding': '[padding]'}), '(x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding])\n', (52020, 52082), True, 'import cntk as C\n'), ((52899, 52970), 'cntk.pooling', 'C.pooling', (['x', 'C.MAX_POOLING', 'pool_size', 'strides'], {'auto_padding': '[padding]'}), '(x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding])\n', (52908, 52970), True, 'import cntk as C\n'), ((53424, 53434), 'cntk.relu', 'C.relu', (['(-x)'], {}), '(-x)\n', (53430, 53434), True, 'import cntk as C\n'), ((53495, 53520), 'cntk.clip', 'C.clip', (['x', '(0.0)', 'max_value'], {}), '(x, 0.0, max_value)\n', (53501, 53520), True, 'import cntk as C\n'), ((54309, 54353), 'cntk.cross_entropy_with_softmax', 'C.cross_entropy_with_softmax', (['output', 'target'], {}), '(output, target)\n', (54337, 54353), True, 'import cntk as C\n'), ((54443, 54464), 'cntk.reshape', 'C.reshape', (['result', '()'], {}), '(result, ())\n', (54452, 54464), True, 'import cntk as C\n'), ((54564, 54593), 'cntk.reduce_sum', 'C.reduce_sum', (['output'], {'axis': '(-1)'}), '(output, axis=-1)\n', (54576, 54593), True, 'import cntk as C\n'), ((69342, 69368), 'cntk.stop_gradient', 'C.stop_gradient', (['variables'], {}), '(variables)\n', (69357, 69368), True, 'import cntk as C\n'), ((70568, 70595), 'cntk.reshape', 'C.reshape', (['result'], {'shape': '()'}), '(result, shape=())\n', (70577, 70595), True, 'import cntk as C\n'), ((72154, 72179), 'cntk.transpose', 'C.transpose', (['x', '(2, 0, 1)'], {}), '(x, (2, 0, 1))\n', (72165, 72179), True, 'import cntk as C\n'), ((72850, 72875), 'cntk.transpose', 'C.transpose', (['x', '(1, 2, 0)'], {}), '(x, (1, 2, 0))\n', (72861, 72875), True, 'import cntk as C\n'), ((73255, 73283), 'cntk.transpose', 'C.transpose', (['x', '(3, 0, 1, 2)'], {}), '(x, (3, 0, 1, 2))\n', (73266, 73283), True, 'import cntk as C\n'), ((73523, 73551), 'cntk.transpose', 'C.transpose', (['x', '(1, 2, 3, 0)'], {}), '(x, (1, 2, 3, 0))\n', (73534, 73551), True, 'import cntk as C\n'), ((77855, 77872), 'cntk.unpack_batch', 'C.unpack_batch', (['x'], {}), '(x)\n', (77869, 77872), True, 'import cntk as C\n'), ((77891, 77916), 'cntk.reshape', 'C.reshape', (['const_a', 'shape'], {}), '(const_a, shape)\n', (77900, 77916), True, 'import cntk as C\n'), ((77932, 77951), 'cntk.to_batch', 'C.to_batch', (['const_a'], {}), '(const_a)\n', (77942, 77951), True, 'import cntk as C\n'), ((78836, 78863), 'cntk.Axis.default_batch_axis', 'C.Axis.default_batch_axis', ([], {}), '()\n', (78861, 78863), True, 'import cntk as C\n'), ((80338, 80365), 'cntk.Axis.default_batch_axis', 'C.Axis.default_batch_axis', ([], {}), '()\n', (80363, 80365), True, 'import cntk as C\n'), ((3166, 3200), 'cntk.element_select', 'C.element_select', (['training', 'x', 'alt'], {}), '(training, x, alt)\n', (3182, 3200), True, 'import cntk as C\n'), ((12703, 12742), 'cntk.initializer.uniform', 'C.initializer.uniform', (['scale'], {'seed': 'seed'}), '(scale, seed=seed)\n', (12724, 12742), True, 'import cntk as C\n'), ((13366, 13410), 'cntk.initializer.normal', 'C.initializer.normal', ([], {'scale': 'scale', 'seed': 'seed'}), '(scale=scale, seed=seed)\n', (13386, 13410), True, 'import cntk as C\n'), ((14289, 14338), 'cntk.initializer.truncated_normal', 'C.initializer.truncated_normal', (['stddev'], {'seed': 'seed'}), '(stddev, seed=seed)\n', (14319, 14338), True, 'import cntk as C\n'), ((14581, 14603), 'numpy.zeros', 'np.zeros', (['shape', 'ctype'], {}), '(shape, ctype)\n', (14589, 14603), True, 'import numpy as np\n'), ((14785, 14806), 'numpy.ones', 'np.ones', (['shape', 'ctype'], {}), '(shape, ctype)\n', (14792, 14806), True, 'import numpy as np\n'), ((15849, 15881), 'cntk.transpose', 'C.transpose', (['y'], {'perm': 'permutation'}), '(y, perm=permutation)\n', (15860, 15881), True, 'import cntk as C\n'), ((17108, 17131), 'cntk.swapaxes', 'C.swapaxes', (['x', 'i', '(i + 1)'], {}), '(x, i, i + 1)\n', (17118, 17131), True, 'import cntk as C\n'), ((17219, 17242), 'cntk.swapaxes', 'C.swapaxes', (['y', 'i', '(i - 1)'], {}), '(y, i, i - 1)\n', (17229, 17242), True, 'import cntk as C\n'), ((21328, 21369), 'cntk.splice', 'C.splice', (['*tmp'], {'axis': '(i - num_dynamic_axis)'}), '(*tmp, axis=i - num_dynamic_axis)\n', (21336, 21369), True, 'import cntk as C\n'), ((23066, 23083), 'cntk.Axis.all_axes', 'C.Axis.all_axes', ([], {}), '()\n', (23081, 23083), True, 'import cntk as C\n'), ((23395, 23460), 'cntk.reshape', 'C.reshape', (['result'], {'shape': '()', 'begin_axis': 'index', 'end_axis': '(index + 1)'}), '(result, shape=(), begin_axis=index, end_axis=index + 1)\n', (23404, 23460), True, 'import cntk as C\n'), ((24956, 24973), 'cntk.Axis.all_axes', 'C.Axis.all_axes', ([], {}), '()\n', (24971, 24973), True, 'import cntk as C\n'), ((26115, 26128), 'cntk.log', 'C.log', (['output'], {}), '(output)\n', (26120, 26128), True, 'import cntk as C\n'), ((26148, 26167), 'cntk.log', 'C.log', (['(1.0 - output)'], {}), '(1.0 - output)\n', (26153, 26167), True, 'import cntk as C\n'), ((29281, 29312), 'cntk.reduce_mean', 'C.reduce_mean', (['shift'], {'axis': 'axis'}), '(shift, axis=axis)\n', (29294, 29312), True, 'import cntk as C\n'), ((32296, 32319), 'cntk.reshape', 'C.reshape', (['x', 'new_shape'], {}), '(x, new_shape)\n', (32305, 32319), True, 'import cntk as C\n'), ((36429, 36469), 'cntk.ops.slice', 'C.ops.slice', (['inputs', 'time_axis', 'i', '(i + 1)'], {}), '(inputs, time_axis, i, i + 1)\n', (36440, 36469), True, 'import cntk as C\n'), ((37602, 37642), 'cntk.ops.slice', 'C.ops.slice', (['inputs', 'time_axis', 'i', '(i + 1)'], {}), '(inputs, time_axis, i, i + 1)\n', (37613, 37642), True, 'import cntk as C\n'), ((42941, 42959), 'cntk.sequence.last', 'C.sequence.last', (['s'], {}), '(s)\n', (42956, 42959), True, 'import cntk as C\n'), ((43957, 43968), 'cntk.square', 'C.square', (['x'], {}), '(x)\n', (43965, 43968), True, 'import cntk as C\n'), ((52185, 52256), 'cntk.pooling', 'C.pooling', (['x', 'C.AVG_POOLING', 'pool_size', 'strides'], {'auto_padding': '[padding]'}), '(x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding])\n', (52194, 52256), True, 'import cntk as C\n'), ((53073, 53144), 'cntk.pooling', 'C.pooling', (['x', 'C.AVG_POOLING', 'pool_size', 'strides'], {'auto_padding': '[padding]'}), '(x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding])\n', (53082, 53144), True, 'import cntk as C\n'), ((54195, 54203), 'cntk.abs', 'C.abs', (['x'], {}), '(x)\n', (54200, 54203), True, 'import cntk as C\n'), ((55923, 55959), 'cntk.combine', 'C.combine', (['[u.output for u in u_ops]'], {}), '([u.output for u in u_ops])\n', (55932, 55959), True, 'import cntk as C\n'), ((57442, 57473), 'cntk.combine', 'C.combine', (['self.metrics_outputs'], {}), '(self.metrics_outputs)\n', (57451, 57473), True, 'import cntk as C\n'), ((63010, 63045), 'cntk.pad', 'C.pad', (['x'], {'pattern': '[padding, (0, 0)]'}), '(x, pattern=[padding, (0, 0)])\n', (63015, 63045), True, 'import cntk as C\n'), ((63192, 63235), 'cntk.pad', 'C.pad', (['x'], {'pattern': '[(0, 0), padding, (0, 0)]'}), '(x, pattern=[(0, 0), padding, (0, 0)])\n', (63197, 63235), True, 'import cntk as C\n'), ((63857, 63896), 'cntk.constant', 'C.constant', ([], {'value': '(0)', 'shape': 'prefix_shape'}), '(value=0, shape=prefix_shape)\n', (63867, 63896), True, 'import cntk as C\n'), ((64115, 64155), 'cntk.constant', 'C.constant', ([], {'value': '(0)', 'shape': 'postfix_shape'}), '(value=0, shape=postfix_shape)\n', (64125, 64155), True, 'import cntk as C\n'), ((69046, 69063), 'numpy.asarray', 'np.asarray', (['value'], {}), '(value)\n', (69056, 69063), True, 'import numpy as np\n'), ((70353, 70368), 'cntk.greater', 'C.greater', (['x', '(0)'], {}), '(x, 0)\n', (70362, 70368), True, 'import cntk as C\n'), ((73799, 73828), 'cntk.Axis.default_dynamic_axis', 'C.Axis.default_dynamic_axis', ([], {}), '()\n', (73826, 73828), True, 'import cntk as C\n'), ((78362, 78472), 'warnings.warn', 'warnings.warn', (['"""CNTK backend warning: CNTK version not detected. Will using CNTK 2.0 GA as default."""'], {}), "(\n 'CNTK backend warning: CNTK version not detected. Will using CNTK 2.0 GA as default.'\n )\n", (78375, 78472), False, 'import warnings\n'), ((78893, 78965), 'cntk.output_variable', 'C.output_variable', (['self.target_shape', 'self.inputs[0].dtype', '[batch_axis]'], {}), '(self.target_shape, self.inputs[0].dtype, [batch_axis])\n', (78910, 78965), True, 'import cntk as C\n'), ((79207, 79236), 'numpy.asarray', 'np.asarray', (['self.target_shape'], {}), '(self.target_shape)\n', (79217, 79236), True, 'import numpy as np\n'), ((79394, 79417), 'cntk.cntk_py.Value', 'C.cntk_py.Value', (['result'], {}), '(result)\n', (79409, 79417), True, 'import cntk as C\n'), ((79640, 79667), 'numpy.asarray', 'np.asarray', (['self.from_shape'], {}), '(self.from_shape)\n', (79650, 79667), True, 'import numpy as np\n'), ((80395, 80474), 'cntk.output_variable', 'C.output_variable', (['self.inputs[0].shape[1:]', 'self.inputs[0].dtype', '[batch_axis]'], {}), '(self.inputs[0].shape[1:], self.inputs[0].dtype, [batch_axis])\n', (80412, 80474), True, 'import cntk as C\n'), ((81357, 81419), 'cntk.output_variable', 'C.output_variable', (['self.target_shape', 'self.inputs[0].dtype', '[]'], {}), '(self.target_shape, self.inputs[0].dtype, [])\n', (81374, 81419), True, 'import cntk as C\n'), ((82085, 82180), 'cntk.output_variable', 'C.output_variable', (['self.inputs[0].shape', 'self.inputs[0].dtype', 'self.inputs[0].dynamic_axes'], {}), '(self.inputs[0].shape, self.inputs[0].dtype, self.inputs[0\n ].dynamic_axes)\n', (82102, 82180), True, 'import cntk as C\n'), ((31418, 31563), 'warnings.warn', 'warnings.warn', (['"""Warning: CNTK backend does not support collapse of batch axis with inferred dimension. The reshape did not take place."""'], {}), "(\n 'Warning: CNTK backend does not support collapse of batch axis with inferred dimension. The reshape did not take place.'\n )\n", (31431, 31563), False, 'import warnings\n'), ((36834, 36872), 'cntk.ops.slice', 'C.ops.slice', (['mask', 'time_axis', 'i', '(i + 1)'], {}), '(mask, time_axis, i, i + 1)\n', (36845, 36872), True, 'import cntk as C\n'), ((37117, 37170), 'cntk.ops.element_select', 'C.ops.element_select', (['mask_slice', 'output', 'prev_output'], {}), '(mask_slice, output, prev_output)\n', (37137, 37170), True, 'import cntk as C\n'), ((37999, 38037), 'cntk.ops.slice', 'C.ops.slice', (['mask', 'time_axis', 'i', '(i + 1)'], {}), '(mask, time_axis, i, i + 1)\n', (38010, 38037), True, 'import cntk as C\n'), ((38274, 38327), 'cntk.ops.element_select', 'C.ops.element_select', (['mask_slice', 'output', 'prev_output'], {}), '(mask_slice, output, prev_output)\n', (38294, 38327), True, 'import cntk as C\n'), ((41945, 41987), 'cntk.placeholder', 'C.placeholder', ([], {'dynamic_axes': 'x.dynamic_axes'}), '(dynamic_axes=x.dynamic_axes)\n', (41958, 41987), True, 'import cntk as C\n'), ((56690, 56746), 'cntk.cntk_py.universal_learner', 'C.cntk_py.universal_learner', (['p_list', 'u_list', 'update_func'], {}), '(p_list, u_list, update_func)\n', (56717, 56746), True, 'import cntk as C\n'), ((56950, 57001), 'cntk.trainer.Trainer', 'C.trainer.Trainer', (['outputs[0]', 'criterion', '[learner]'], {}), '(outputs[0], criterion, [learner])\n', (56967, 57001), True, 'import cntk as C\n'), ((57264, 57312), 'cntk.combine', 'C.combine', (['[_.output for _ in unrelated_updates]'], {}), '([_.output for _ in unrelated_updates])\n', (57273, 57312), True, 'import cntk as C\n'), ((57719, 57750), 'cntk.combine', 'C.combine', (['self.metrics_outputs'], {}), '(self.metrics_outputs)\n', (57728, 57750), True, 'import cntk as C\n'), ((58902, 58919), 'numpy.asarray', 'np.asarray', (['value'], {}), '(value)\n', (58912, 58919), True, 'import numpy as np\n'), ((79141, 79168), 'numpy.asarray', 'np.asarray', (['self.from_shape'], {}), '(self.from_shape)\n', (79151, 79168), True, 'import numpy as np\n'), ((79572, 79601), 'numpy.asarray', 'np.asarray', (['self.target_shape'], {}), '(self.target_shape)\n', (79582, 79601), True, 'import numpy as np\n'), ((11650, 11680), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'p', 'size'], {}), '(1, p, size)\n', (11668, 11680), True, 'import numpy as np\n'), ((28425, 28455), 'cntk.reduce_mean', 'C.reduce_mean', (['gamma', '(axis - 1)'], {}), '(gamma, axis - 1)\n', (28438, 28455), True, 'import cntk as C\n'), ((28483, 28512), 'cntk.reduce_mean', 'C.reduce_mean', (['beta', '(axis - 1)'], {}), '(beta, axis - 1)\n', (28496, 28512), True, 'import cntk as C\n'), ((30621, 30632), 'cntk.sqrt', 'C.sqrt', (['var'], {}), '(var)\n', (30627, 30632), True, 'import cntk as C\n'), ((40197, 40210), 'cntk.to_batch', 'C.to_batch', (['s'], {}), '(s)\n', (40207, 40210), True, 'import cntk as C\n'), ((42121, 42148), 'cntk.sequence.past_value', 'C.sequence.past_value', (['p', 's'], {}), '(p, s)\n', (42142, 42148), True, 'import cntk as C\n'), ((42479, 42504), 'cntk.element_select', 'C.element_select', (['m', 'n', 's'], {}), '(m, n, s)\n', (42495, 42504), True, 'import cntk as C\n'), ((43463, 43482), 'cntk.unpack_batch', 'C.unpack_batch', (['l_s'], {}), '(l_s)\n', (43477, 43482), True, 'import cntk as C\n'), ((54743, 54756), 'cntk.log', 'C.log', (['output'], {}), '(output)\n', (54748, 54756), True, 'import cntk as C\n'), ((74098, 74125), 'cntk.Axis.default_batch_axis', 'C.Axis.default_batch_axis', ([], {}), '()\n', (74123, 74125), True, 'import cntk as C\n'), ((37328, 37368), 'cntk.ops.element_select', 'C.ops.element_select', (['mask_slice', 'n_s', 's'], {}), '(mask_slice, n_s, s)\n', (37348, 37368), True, 'import cntk as C\n'), ((38485, 38525), 'cntk.ops.element_select', 'C.ops.element_select', (['mask_slice', 'n_s', 's'], {}), '(mask_slice, n_s, s)\n', (38505, 38525), True, 'import cntk as C\n'), ((41365, 41410), 'cntk.sequence.broadcast_as', 'C.sequence.broadcast_as', (['constant', 'rnn_inputs'], {}), '(constant, rnn_inputs)\n', (41388, 41410), True, 'import cntk as C\n'), ((55663, 55693), 'cntk.assign', 'C.assign', (['update[0]', 'update[1]'], {}), '(update[0], update[1])\n', (55671, 55693), True, 'import cntk as C\n'), ((41099, 41137), 'cntk.sequence.broadcast_as', 'C.sequence.broadcast_as', (['c', 'rnn_inputs'], {}), '(c, rnn_inputs)\n', (41122, 41137), True, 'import cntk as C\n')] |
# -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.endpoints.resource
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is an implementation of the resource protection provider logic of
OAuth 1.0 RFC 5849.
"""
from __future__ import absolute_import, unicode_literals
from oauthlib.common import log
from .base import BaseEndpoint
from .. import errors
class ResourceEndpoint(BaseEndpoint):
"""An endpoint responsible for protecting resources.
Typical use is to instantiate with a request validator and invoke the
``validate_protected_resource_request`` in a decorator around a view
function. If the request is valid, invoke and return the response of the
view. If invalid create and return an error response directly from the
decorator.
See :doc:`/oauth1/validator` for details on which validator methods to implement
for this endpoint.
An example decorator::
from functools import wraps
from your_validator import your_validator
from oauthlib.oauth1 import ResourceEndpoint
endpoint = ResourceEndpoint(your_validator)
def require_oauth(realms=None):
def decorator(f):
@wraps(f)
def wrapper(request, *args, **kwargs):
v, r = provider.validate_protected_resource_request(
request.url,
http_method=request.method,
body=request.data,
headers=request.headers,
realms=realms or [])
if v:
return f(*args, **kwargs)
else:
return abort(403)
"""
def validate_protected_resource_request(self, uri, http_method='GET',
body=None, headers=None, realms=None):
"""Create a request token response, with a new request token if valid.
:param uri: The full URI of the token request.
:param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc.
:param body: The request body as a string.
:param headers: The request headers as a dict.
:param realms: A list of realms the resource is protected under.
This will be supplied to the ``validate_realms``
method of the request validator.
:returns: A tuple of 2 elements.
1. True if valid, False otherwise.
2. An oauthlib.common.Request object.
"""
try:
request = self._create_request(uri, http_method, body, headers)
except errors.OAuth1Error:
return False, None
try:
self._check_transport_security(request)
self._check_mandatory_parameters(request)
except errors.OAuth1Error:
return False, request
if not request.resource_owner_key:
return False, request
if not self.request_validator.check_access_token(
request.resource_owner_key):
return False, request
if not self.request_validator.validate_timestamp_and_nonce(
request.client_key, request.timestamp, request.nonce, request,
access_token=request.resource_owner_key):
return False, request
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.request_validator.validate_client_key(
request.client_key, request)
if not valid_client:
request.client_key = self.request_validator.dummy_client
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid or expired token.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy token is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable resource owner enumeration
valid_resource_owner = self.request_validator.validate_access_token(
request.client_key, request.resource_owner_key, request)
if not valid_resource_owner:
request.resource_owner_key = self.request_validator.dummy_access_token
# Note that `realm`_ is only used in authorization headers and how
# it should be interepreted is not included in the OAuth spec.
# However they could be seen as a scope or realm to which the
# client has access and as such every client should be checked
# to ensure it is authorized access to that scope or realm.
# .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2
#
# Note that early exit would enable client realm access enumeration.
#
# The require_realm indicates this is the first step in the OAuth
# workflow where a client requests access to a specific realm.
# This first step (obtaining request token) need not require a realm
# and can then be identified by checking the require_resource_owner
# flag and abscence of realm.
#
# Clients obtaining an access token will not supply a realm and it will
# not be checked. Instead the previously requested realm should be
# transferred from the request token to the access token.
#
# Access to protected resources will always validate the realm but note
# that the realm is now tied to the access token and not provided by
# the client.
valid_realm = self.request_validator.validate_realms(request.client_key,
request.resource_owner_key, request, uri=request.uri,
realms=realms)
valid_signature = self._check_signature(request)
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_resource_owner, valid_realm,
valid_signature))
if not v:
log.info("[Failure] request verification failed.")
log.info("Valid client: %s", valid_client)
log.info("Valid token: %s", valid_resource_owner)
log.info("Valid realm: %s", valid_realm)
log.info("Valid signature: %s", valid_signature)
return v, request
| [
"oauthlib.common.log.info"
] | [((6638, 6688), 'oauthlib.common.log.info', 'log.info', (['"""[Failure] request verification failed."""'], {}), "('[Failure] request verification failed.')\n", (6646, 6688), False, 'from oauthlib.common import log\n'), ((6701, 6743), 'oauthlib.common.log.info', 'log.info', (['"""Valid client: %s"""', 'valid_client'], {}), "('Valid client: %s', valid_client)\n", (6709, 6743), False, 'from oauthlib.common import log\n'), ((6756, 6805), 'oauthlib.common.log.info', 'log.info', (['"""Valid token: %s"""', 'valid_resource_owner'], {}), "('Valid token: %s', valid_resource_owner)\n", (6764, 6805), False, 'from oauthlib.common import log\n'), ((6818, 6858), 'oauthlib.common.log.info', 'log.info', (['"""Valid realm: %s"""', 'valid_realm'], {}), "('Valid realm: %s', valid_realm)\n", (6826, 6858), False, 'from oauthlib.common import log\n'), ((6871, 6919), 'oauthlib.common.log.info', 'log.info', (['"""Valid signature: %s"""', 'valid_signature'], {}), "('Valid signature: %s', valid_signature)\n", (6879, 6919), False, 'from oauthlib.common import log\n')] |
# Natural Language Toolkit: Aligner Utilities
#
# Copyright (C) 2001-2015 NLTK Project
# Author: <NAME>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
from nltk.align.api import Alignment
def pharaohtext2tuples(pharaoh_text):
"""
Converts pharaoh text format into an Alignment object (a list of tuples).
>>> pharaoh_text = '0-0 2-1 9-2 21-3 10-4 7-5'
>>> pharaohtext2tuples(pharaoh_text)
Alignment([(0, 0), (2, 1), (7, 5), (9, 2), (10, 4), (21, 3)])
:type pharaoh_text: str
:param pharaoh_text: the word alignment outputs in the pharaoh output format
:rtype: Alignment
:return: An Alignment object that contains a list of integer tuples
"""
# Converts integers to strings for a word alignment point.
list_of_tuples = [tuple(map(int,a.split('-'))) for a in pharaoh_text.split()]
return Alignment(list_of_tuples)
def alignment2pharaohtext(alignment):
"""
Converts an Alignment object (a list of tuples) into pharaoh text format.
>>> alignment = [(0, 0), (2, 1), (9, 2), (21, 3), (10, 4), (7, 5)]
>>> alignment2pharaohtext(alignment)
'0-0 2-1 9-2 21-3 10-4 7-5'
:type alignment: Alignment
:param alignment: An Alignment object that contains a list of integer tuples
:rtype: str
:return: the word alignment outputs in the pharaoh output format
"""
pharaoh_text = ' '.join(str(i) + "-" + str(j) for i,j in alignment)
return pharaoh_text
| [
"nltk.align.api.Alignment"
] | [((893, 918), 'nltk.align.api.Alignment', 'Alignment', (['list_of_tuples'], {}), '(list_of_tuples)\n', (902, 918), False, 'from nltk.align.api import Alignment\n')] |
#!/usr/bin/env python3
import copy
import math
import torch
from ..distributions import MultivariateNormal
from ..lazy import DiagLazyTensor, LowRankRootAddedDiagLazyTensor, LowRankRootLazyTensor, MatmulLazyTensor, delazify
from ..mlls import InducingPointKernelAddedLossTerm
from ..models import exact_prediction_strategies
from ..utils.cholesky import psd_safe_cholesky
from .kernel import Kernel
class InducingPointKernel(Kernel):
def __init__(self, base_kernel, inducing_points, likelihood, active_dims=None):
super(InducingPointKernel, self).__init__(active_dims=active_dims)
self.base_kernel = base_kernel
self.likelihood = likelihood
if inducing_points.ndimension() == 1:
inducing_points = inducing_points.unsqueeze(-1)
self.register_parameter(name="inducing_points", parameter=torch.nn.Parameter(inducing_points))
self.register_added_loss_term("inducing_point_loss_term")
def _clear_cache(self):
if hasattr(self, "_cached_kernel_mat"):
del self._cached_kernel_mat
@property
def _inducing_mat(self):
if not self.training and hasattr(self, "_cached_kernel_mat"):
return self._cached_kernel_mat
else:
res = delazify(self.base_kernel(self.inducing_points, self.inducing_points))
if not self.training:
self._cached_kernel_mat = res
return res
@property
def _inducing_inv_root(self):
if not self.training and hasattr(self, "_cached_kernel_inv_root"):
return self._cached_kernel_inv_root
else:
chol = psd_safe_cholesky(self._inducing_mat, upper=True)
eye = torch.eye(chol.size(-1), device=chol.device, dtype=chol.dtype)
inv_root = torch.triangular_solve(eye, chol)[0]
res = inv_root
if not self.training:
self._cached_kernel_inv_root = res
return res
def _get_covariance(self, x1, x2):
k_ux1 = delazify(self.base_kernel(x1, self.inducing_points))
if torch.equal(x1, x2):
covar = LowRankRootLazyTensor(k_ux1.matmul(self._inducing_inv_root))
# Diagonal correction for predictive posterior
if not self.training:
correction = (self.base_kernel(x1, x2, diag=True) - covar.diag()).clamp(0, math.inf)
covar = LowRankRootAddedDiagLazyTensor(covar, DiagLazyTensor(correction))
else:
k_ux2 = delazify(self.base_kernel(x2, self.inducing_points))
covar = MatmulLazyTensor(
k_ux1.matmul(self._inducing_inv_root), k_ux2.matmul(self._inducing_inv_root).transpose(-1, -2)
)
return covar
def _covar_diag(self, inputs):
if inputs.ndimension() == 1:
inputs = inputs.unsqueeze(1)
# Get diagonal of covar
covar_diag = delazify(self.base_kernel(inputs, diag=True))
return DiagLazyTensor(covar_diag)
def forward(self, x1, x2, diag=False, **kwargs):
covar = self._get_covariance(x1, x2)
if self.training:
if not torch.equal(x1, x2):
raise RuntimeError("x1 should equal x2 in training mode")
zero_mean = torch.zeros_like(x1.select(-1, 0))
new_added_loss_term = InducingPointKernelAddedLossTerm(
MultivariateNormal(zero_mean, self._covar_diag(x1)),
MultivariateNormal(zero_mean, covar),
self.likelihood,
)
self.update_added_loss_term("inducing_point_loss_term", new_added_loss_term)
if diag:
return covar.diag()
else:
return covar
def num_outputs_per_input(self, x1, x2):
return self.base_kernel.num_outputs_per_input(x1, x2)
def __deepcopy__(self, memo):
replace_inv_root = False
replace_kernel_mat = False
if hasattr(self, "_cached_kernel_inv_root"):
replace_inv_root = True
kernel_inv_root = self._cached_kernel_inv_root
if hasattr(self, "_cached_kernel_mat"):
replace_kernel_mat = True
kernel_mat = self._cached_kernel_mat
cp = self.__class__(
base_kernel=copy.deepcopy(self.base_kernel),
inducing_points=copy.deepcopy(self.inducing_points),
likelihood=self.likelihood,
active_dims=self.active_dims,
)
if replace_inv_root:
cp._cached_kernel_inv_root = kernel_inv_root
if replace_kernel_mat:
cp._cached_kernel_mat = kernel_mat
return cp
def prediction_strategy(self, train_inputs, train_prior_dist, train_labels, likelihood):
# Allow for fast variances
return exact_prediction_strategies.SGPRPredictionStrategy(
train_inputs, train_prior_dist, train_labels, likelihood
)
| [
"copy.deepcopy",
"torch.nn.Parameter",
"torch.equal",
"torch.triangular_solve"
] | [((2083, 2102), 'torch.equal', 'torch.equal', (['x1', 'x2'], {}), '(x1, x2)\n', (2094, 2102), False, 'import torch\n'), ((848, 883), 'torch.nn.Parameter', 'torch.nn.Parameter', (['inducing_points'], {}), '(inducing_points)\n', (866, 883), False, 'import torch\n'), ((1790, 1823), 'torch.triangular_solve', 'torch.triangular_solve', (['eye', 'chol'], {}), '(eye, chol)\n', (1812, 1823), False, 'import torch\n'), ((3143, 3162), 'torch.equal', 'torch.equal', (['x1', 'x2'], {}), '(x1, x2)\n', (3154, 3162), False, 'import torch\n'), ((4262, 4293), 'copy.deepcopy', 'copy.deepcopy', (['self.base_kernel'], {}), '(self.base_kernel)\n', (4275, 4293), False, 'import copy\n'), ((4323, 4358), 'copy.deepcopy', 'copy.deepcopy', (['self.inducing_points'], {}), '(self.inducing_points)\n', (4336, 4358), False, 'import copy\n')] |
import logging
import os
from flask import Flask
from flask_cors import CORS
from app.extensions import api
from app.extensions.database import db
from app.extensions.schema import ma
from app.views import albums, artists, hello, tracks
def create_app(config, **kwargs):
logging.basicConfig(level=logging.INFO)
app = Flask(__name__, **kwargs)
CORS(app, resources={r"/api/*": {"origins": "*"}})
app.config.from_object(config)
# app.url_map.strict_slashes = False
with app.app_context():
api.init_app(app)
db.init_app(app)
db.create_all()
ma.init_app(app)
api.register_blueprint(hello.blp)
api.register_blueprint(artists.blp)
api.register_blueprint(albums.blp)
api.register_blueprint(tracks.blp)
try:
os.makedirs(app.instance_path)
except OSError:
pass
return app
| [
"logging.basicConfig",
"app.extensions.schema.ma.init_app",
"flask_cors.CORS",
"flask.Flask",
"os.makedirs",
"app.extensions.api.register_blueprint",
"app.extensions.api.init_app",
"app.extensions.database.db.init_app",
"app.extensions.database.db.create_all"
] | [((280, 319), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (299, 319), False, 'import logging\n'), ((331, 356), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__, **kwargs)\n', (336, 356), False, 'from flask import Flask\n'), ((361, 410), 'flask_cors.CORS', 'CORS', (['app'], {'resources': "{'/api/*': {'origins': '*'}}"}), "(app, resources={'/api/*': {'origins': '*'}})\n", (365, 410), False, 'from flask_cors import CORS\n'), ((526, 543), 'app.extensions.api.init_app', 'api.init_app', (['app'], {}), '(app)\n', (538, 543), False, 'from app.extensions import api\n'), ((553, 569), 'app.extensions.database.db.init_app', 'db.init_app', (['app'], {}), '(app)\n', (564, 569), False, 'from app.extensions.database import db\n'), ((578, 593), 'app.extensions.database.db.create_all', 'db.create_all', ([], {}), '()\n', (591, 593), False, 'from app.extensions.database import db\n'), ((603, 619), 'app.extensions.schema.ma.init_app', 'ma.init_app', (['app'], {}), '(app)\n', (614, 619), False, 'from app.extensions.schema import ma\n'), ((629, 662), 'app.extensions.api.register_blueprint', 'api.register_blueprint', (['hello.blp'], {}), '(hello.blp)\n', (651, 662), False, 'from app.extensions import api\n'), ((671, 706), 'app.extensions.api.register_blueprint', 'api.register_blueprint', (['artists.blp'], {}), '(artists.blp)\n', (693, 706), False, 'from app.extensions import api\n'), ((715, 749), 'app.extensions.api.register_blueprint', 'api.register_blueprint', (['albums.blp'], {}), '(albums.blp)\n', (737, 749), False, 'from app.extensions import api\n'), ((758, 792), 'app.extensions.api.register_blueprint', 'api.register_blueprint', (['tracks.blp'], {}), '(tracks.blp)\n', (780, 792), False, 'from app.extensions import api\n'), ((811, 841), 'os.makedirs', 'os.makedirs', (['app.instance_path'], {}), '(app.instance_path)\n', (822, 841), False, 'import os\n')] |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.bijectors import bijector_test_util
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util as tfp_test_util
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
@test_util.run_all_in_graph_and_eager_modes
class InvertBijectorTest(tf.test.TestCase):
"""Tests the correctness of the Y = Invert(bij) transformation."""
def testBijector(self):
for fwd in [
tfb.Identity(),
tfb.Exp(),
tfb.Affine(shift=[0., 1.], scale_diag=[2., 3.]),
tfb.Softplus(),
tfb.SoftmaxCentered(),
]:
rev = tfb.Invert(fwd)
self.assertStartsWith(rev.name, "_".join(["invert", fwd.name]))
x = [[[1., 2.],
[2., 3.]]]
self.assertAllClose(
self.evaluate(fwd.inverse(x)), self.evaluate(rev.forward(x)))
self.assertAllClose(
self.evaluate(fwd.forward(x)), self.evaluate(rev.inverse(x)))
self.assertAllClose(
self.evaluate(fwd.forward_log_det_jacobian(x, event_ndims=1)),
self.evaluate(rev.inverse_log_det_jacobian(x, event_ndims=1)))
self.assertAllClose(
self.evaluate(fwd.inverse_log_det_jacobian(x, event_ndims=1)),
self.evaluate(rev.forward_log_det_jacobian(x, event_ndims=1)))
def testScalarCongruency(self):
bijector = tfb.Invert(tfb.Exp())
bijector_test_util.assert_scalar_congruency(
bijector, lower_x=1e-3, upper_x=1.5, eval_func=self.evaluate, rtol=0.05)
def testShapeGetters(self):
bijector = tfb.Invert(
tfb.SoftmaxCentered(validate_args=True))
x = tf.TensorShape([2])
y = tf.TensorShape([1])
self.assertAllEqual(y, bijector.forward_event_shape(x))
self.assertAllEqual(
tensorshape_util.as_list(y),
self.evaluate(
bijector.forward_event_shape_tensor(tensorshape_util.as_list(x))))
self.assertAllEqual(x, bijector.inverse_event_shape(y))
self.assertAllEqual(
tensorshape_util.as_list(x),
self.evaluate(
bijector.inverse_event_shape_tensor(tensorshape_util.as_list(y))))
def testDocstringExample(self):
exp_gamma_distribution = (
tfd.TransformedDistribution(
distribution=tfd.Gamma(concentration=1., rate=2.),
bijector=tfb.Invert(tfb.Exp())))
self.assertAllEqual(
[],
self.evaluate(
tf.shape(
exp_gamma_distribution.sample(seed=tfp_test_util.test_seed()))))
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow_probability.python.bijectors.Identity",
"tensorflow_probability.python.distributions.Gamma",
"tensorflow_probability.python.bijectors.Invert",
"tensorflow.compat.v2.TensorShape",
"tensorflow_probability.python.bijectors.bijector_test_util.assert_scalar_congruency",
"tensorflow_probability.python.bijectors.Exp",
"tensorflow_probability.python.bijectors.SoftmaxCentered",
"tensorflow.compat.v2.test.main",
"tensorflow_probability.python.internal.test_util.test_seed",
"tensorflow_probability.python.internal.tensorshape_util.as_list",
"tensorflow_probability.python.bijectors.Affine",
"tensorflow_probability.python.bijectors.Softplus"
] | [((3555, 3569), 'tensorflow.compat.v2.test.main', 'tf.test.main', ([], {}), '()\n', (3567, 3569), True, 'import tensorflow.compat.v2 as tf\n'), ((2413, 2534), 'tensorflow_probability.python.bijectors.bijector_test_util.assert_scalar_congruency', 'bijector_test_util.assert_scalar_congruency', (['bijector'], {'lower_x': '(0.001)', 'upper_x': '(1.5)', 'eval_func': 'self.evaluate', 'rtol': '(0.05)'}), '(bijector, lower_x=0.001,\n upper_x=1.5, eval_func=self.evaluate, rtol=0.05)\n', (2456, 2534), False, 'from tensorflow_probability.python.bijectors import bijector_test_util\n'), ((2654, 2673), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['[2]'], {}), '([2])\n', (2668, 2673), True, 'import tensorflow.compat.v2 as tf\n'), ((2682, 2701), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['[1]'], {}), '([1])\n', (2696, 2701), True, 'import tensorflow.compat.v2 as tf\n'), ((1496, 1510), 'tensorflow_probability.python.bijectors.Identity', 'tfb.Identity', ([], {}), '()\n', (1508, 1510), True, 'from tensorflow_probability.python import bijectors as tfb\n'), ((1520, 1529), 'tensorflow_probability.python.bijectors.Exp', 'tfb.Exp', ([], {}), '()\n', (1527, 1529), True, 'from tensorflow_probability.python import bijectors as tfb\n'), ((1539, 1590), 'tensorflow_probability.python.bijectors.Affine', 'tfb.Affine', ([], {'shift': '[0.0, 1.0]', 'scale_diag': '[2.0, 3.0]'}), '(shift=[0.0, 1.0], scale_diag=[2.0, 3.0])\n', (1549, 1590), True, 'from tensorflow_probability.python import bijectors as tfb\n'), ((1596, 1610), 'tensorflow_probability.python.bijectors.Softplus', 'tfb.Softplus', ([], {}), '()\n', (1608, 1610), True, 'from tensorflow_probability.python import bijectors as tfb\n'), ((1620, 1641), 'tensorflow_probability.python.bijectors.SoftmaxCentered', 'tfb.SoftmaxCentered', ([], {}), '()\n', (1639, 1641), True, 'from tensorflow_probability.python import bijectors as tfb\n'), ((1662, 1677), 'tensorflow_probability.python.bijectors.Invert', 'tfb.Invert', (['fwd'], {}), '(fwd)\n', (1672, 1677), True, 'from tensorflow_probability.python import bijectors as tfb\n'), ((2398, 2407), 'tensorflow_probability.python.bijectors.Exp', 'tfb.Exp', ([], {}), '()\n', (2405, 2407), True, 'from tensorflow_probability.python import bijectors as tfb\n'), ((2605, 2644), 'tensorflow_probability.python.bijectors.SoftmaxCentered', 'tfb.SoftmaxCentered', ([], {'validate_args': '(True)'}), '(validate_args=True)\n', (2624, 2644), True, 'from tensorflow_probability.python import bijectors as tfb\n'), ((2795, 2822), 'tensorflow_probability.python.internal.tensorshape_util.as_list', 'tensorshape_util.as_list', (['y'], {}), '(y)\n', (2819, 2822), False, 'from tensorflow_probability.python.internal import tensorshape_util\n'), ((3019, 3046), 'tensorflow_probability.python.internal.tensorshape_util.as_list', 'tensorshape_util.as_list', (['x'], {}), '(x)\n', (3043, 3046), False, 'from tensorflow_probability.python.internal import tensorshape_util\n'), ((3278, 3316), 'tensorflow_probability.python.distributions.Gamma', 'tfd.Gamma', ([], {'concentration': '(1.0)', 'rate': '(2.0)'}), '(concentration=1.0, rate=2.0)\n', (3287, 3316), True, 'from tensorflow_probability.python import distributions as tfd\n'), ((2895, 2922), 'tensorflow_probability.python.internal.tensorshape_util.as_list', 'tensorshape_util.as_list', (['x'], {}), '(x)\n', (2919, 2922), False, 'from tensorflow_probability.python.internal import tensorshape_util\n'), ((3119, 3146), 'tensorflow_probability.python.internal.tensorshape_util.as_list', 'tensorshape_util.as_list', (['y'], {}), '(y)\n', (3143, 3146), False, 'from tensorflow_probability.python.internal import tensorshape_util\n'), ((3348, 3357), 'tensorflow_probability.python.bijectors.Exp', 'tfb.Exp', ([], {}), '()\n', (3355, 3357), True, 'from tensorflow_probability.python import bijectors as tfb\n'), ((3494, 3519), 'tensorflow_probability.python.internal.test_util.test_seed', 'tfp_test_util.test_seed', ([], {}), '()\n', (3517, 3519), True, 'from tensorflow_probability.python.internal import test_util as tfp_test_util\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: <NAME>
# Date : 2019.2
# Email : <EMAIL>
###################################################################
"""
MAlert class.
"""
import six
import functools
from dayu_widgets.avatar import MAvatar
from dayu_widgets.label import MLabel
from dayu_widgets import dayu_theme
from dayu_widgets.tool_button import MToolButton
from dayu_widgets.mixin import property_mixin
from dayu_widgets.qt import QWidget, QHBoxLayout, MPixmap, Qt, MIcon, Property
@property_mixin
class MAlert(QWidget):
"""
Alert component for feedback.
Property:
dayu_type: The feedback type with different color container.
dayu_text: The feedback string showed in container.
"""
InfoType = 'info'
SuccessType = 'success'
WarningType = 'warning'
ErrorType = 'error'
def __init__(self, text='', parent=None, flags=Qt.Widget):
super(MAlert, self).__init__(parent, flags)
self.setAttribute(Qt.WA_StyledBackground)
self._icon_label = MAvatar()
self._icon_label.set_dayu_size(dayu_theme.tiny)
self._content_label = MLabel().secondary()
self._close_button = MToolButton().svg('close_line.svg').tiny().icon_only()
self._close_button.clicked.connect(functools.partial(self.setVisible, False))
self._main_lay = QHBoxLayout()
self._main_lay.setContentsMargins(8, 8, 8, 8)
self._main_lay.addWidget(self._icon_label)
self._main_lay.addWidget(self._content_label)
self._main_lay.addStretch()
self._main_lay.addWidget(self._close_button)
self.setLayout(self._main_lay)
self.set_show_icon(True)
self.set_closeable(False)
self._dayu_type = None
self._dayu_text = None
self.set_dayu_type(MAlert.InfoType)
self.set_dayu_text(text)
def set_closeable(self, closeable):
"""Display the close icon button or not."""
self._close_button.setVisible(closeable)
def set_show_icon(self, show_icon):
"""Display the information type icon or not."""
self._icon_label.setVisible(show_icon)
def _set_dayu_text(self):
self._content_label.setText(self._dayu_text)
self.setVisible(bool(self._dayu_text))
def set_dayu_text(self, value):
"""Set the feedback content."""
if isinstance(value, six.string_types):
self._dayu_text = value
else:
raise TypeError("Input argument 'value' should be string type, "
"but get {}".format(type(value)))
self._set_dayu_text()
def _set_dayu_type(self):
self._icon_label.set_dayu_image(MPixmap('{}_fill.svg'.format(self._dayu_type),
vars(dayu_theme).get(self._dayu_type + '_color')))
self.style().polish(self)
def set_dayu_type(self, value):
"""Set feedback type."""
if value in [MAlert.InfoType, MAlert.SuccessType, MAlert.WarningType, MAlert.ErrorType]:
self._dayu_type = value
else:
raise ValueError("Input argument 'value' should be one of "
"info/success/warning/error string.")
self._set_dayu_type()
def get_dayu_type(self):
"""
Get MAlert feedback type.
:return: str
"""
return self._dayu_type
def get_dayu_text(self):
"""
Get MAlert feedback message.
:return: six.string_types
"""
return self._dayu_text
dayu_text = Property(six.text_type, get_dayu_text, set_dayu_text)
dayu_type = Property(str, get_dayu_type, set_dayu_type)
def info(self):
"""Set MAlert to InfoType"""
self.set_dayu_type(MAlert.InfoType)
return self
def success(self):
"""Set MAlert to SuccessType"""
self.set_dayu_type(MAlert.SuccessType)
return self
def warning(self):
"""Set MAlert to WarningType"""
self.set_dayu_type(MAlert.WarningType)
return self
def error(self):
"""Set MAlert to ErrorType"""
self.set_dayu_type(MAlert.ErrorType)
return self
def closable(self):
"""Set MAlert closebale is True"""
self.set_closeable(True)
return self
| [
"dayu_widgets.qt.QHBoxLayout",
"dayu_widgets.qt.Property",
"dayu_widgets.label.MLabel",
"functools.partial",
"dayu_widgets.tool_button.MToolButton",
"dayu_widgets.avatar.MAvatar"
] | [((3636, 3689), 'dayu_widgets.qt.Property', 'Property', (['six.text_type', 'get_dayu_text', 'set_dayu_text'], {}), '(six.text_type, get_dayu_text, set_dayu_text)\n', (3644, 3689), False, 'from dayu_widgets.qt import QWidget, QHBoxLayout, MPixmap, Qt, MIcon, Property\n'), ((3706, 3749), 'dayu_widgets.qt.Property', 'Property', (['str', 'get_dayu_type', 'set_dayu_type'], {}), '(str, get_dayu_type, set_dayu_type)\n', (3714, 3749), False, 'from dayu_widgets.qt import QWidget, QHBoxLayout, MPixmap, Qt, MIcon, Property\n'), ((1103, 1112), 'dayu_widgets.avatar.MAvatar', 'MAvatar', ([], {}), '()\n', (1110, 1112), False, 'from dayu_widgets.avatar import MAvatar\n'), ((1416, 1429), 'dayu_widgets.qt.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (1427, 1429), False, 'from dayu_widgets.qt import QWidget, QHBoxLayout, MPixmap, Qt, MIcon, Property\n'), ((1347, 1388), 'functools.partial', 'functools.partial', (['self.setVisible', '(False)'], {}), '(self.setVisible, False)\n', (1364, 1388), False, 'import functools\n'), ((1199, 1207), 'dayu_widgets.label.MLabel', 'MLabel', ([], {}), '()\n', (1205, 1207), False, 'from dayu_widgets.label import MLabel\n'), ((1249, 1262), 'dayu_widgets.tool_button.MToolButton', 'MToolButton', ([], {}), '()\n', (1260, 1262), False, 'from dayu_widgets.tool_button import MToolButton\n')] |
import tensorflow as tf
import numpy as np
import hyperchamber as hc
from hypergan.losses.base_loss import BaseLoss
from hypergan.multi_component import MultiComponent
TINY=1e-8
class MultiLoss(BaseLoss):
"""Takes multiple distributions and does an additional approximator"""
def _create(self, d_real, d_fake):
gan = self.gan
config = self.config
losses = []
split = self.split
for d in gan.discriminator.children:
if config.swapped:
d_swap = d_real
d_real = d_fake
d_fake = d_swap
ds = self.split_batch(d.sample, split)
d_real = ds[0]
d_fake = tf.add_n(ds[1:])/(len(ds)-1)
loss_object = self.config['loss_class'](gan, self.config, d_real=d_real, d_fake=d_fake)
losses.append(loss_object)
#relational layer?
combine = MultiComponent(combine='concat', components=losses)
g_loss = combine.g_loss_features
d_loss = combine.d_loss_features
self.d_loss = d_loss
self.g_loss = g_loss
self.losses = losses
return [d_loss, g_loss]
| [
"tensorflow.add_n",
"hypergan.multi_component.MultiComponent"
] | [((908, 959), 'hypergan.multi_component.MultiComponent', 'MultiComponent', ([], {'combine': '"""concat"""', 'components': 'losses'}), "(combine='concat', components=losses)\n", (922, 959), False, 'from hypergan.multi_component import MultiComponent\n'), ((692, 708), 'tensorflow.add_n', 'tf.add_n', (['ds[1:]'], {}), '(ds[1:])\n', (700, 708), True, 'import tensorflow as tf\n')] |
import logging
from typing import Any, Dict, List
from fastapi import APIRouter, Body, Depends, Security
from fastapi_pagination import (
Page,
Params,
)
from fastapi_pagination.bases import AbstractPage
from fastapi_pagination.ext.sqlalchemy import paginate
from fidesops.schemas.shared_schemas import FidesOpsKey
from pydantic import conlist
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session
from starlette.exceptions import HTTPException
from starlette.status import HTTP_404_NOT_FOUND
from fidesops.api import deps
from fidesops.api.v1 import scope_registry as scopes
from fidesops.api.v1 import urn_registry as urls
from fidesops.common_exceptions import (
DataCategoryNotSupported,
PolicyValidationError,
RuleValidationError,
RuleTargetValidationError,
KeyOrNameAlreadyExists,
)
from fidesops.models.client import ClientDetail
from fidesops.models.policy import (
ActionType,
Policy,
Rule,
RuleTarget,
)
from fidesops.models.storage import StorageConfig
from fidesops.schemas import policy as schemas
from fidesops.schemas.api import BulkUpdateFailed
from fidesops.util.oauth_util import verify_oauth_client
router = APIRouter(tags=["Policy"], prefix=urls.V1_URL_PREFIX)
logger = logging.getLogger(__name__)
@router.get(
urls.POLICY_LIST,
status_code=200,
response_model=Page[schemas.PolicyResponse],
dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])],
)
def get_policy_list(
*,
db: Session = Depends(deps.get_db),
params: Params = Depends(),
) -> AbstractPage[Policy]:
"""
Return a paginated list of all Policy records in this system
"""
logger.info(f"Finding all policies with pagination params '{params}'")
policies = Policy.query(db=db)
return paginate(policies, params=params)
def get_policy_or_error(db: Session, policy_key: FidesOpsKey) -> Policy:
"""Helper method to load Policy or throw a 404"""
logger.info(f"Finding policy with key '{policy_key}'")
policy = Policy.get_by(db=db, field="key", value=policy_key)
if not policy:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND,
detail=f"No Policy found for key {policy_key}.",
)
return policy
@router.get(
urls.POLICY_DETAIL,
status_code=200,
response_model=schemas.PolicyResponse,
dependencies=[Security(verify_oauth_client, scopes=[scopes.POLICY_READ])],
)
def get_policy(
*,
policy_key: FidesOpsKey,
db: Session = Depends(deps.get_db),
) -> schemas.PolicyResponse:
"""
Return a single Policy
"""
return get_policy_or_error(db, policy_key)
@router.patch(
urls.POLICY_LIST,
status_code=200,
response_model=schemas.BulkPutPolicyResponse,
)
def create_or_update_policies(
*,
client: ClientDetail = Security(
verify_oauth_client,
scopes=[scopes.POLICY_CREATE_OR_UPDATE],
),
db: Session = Depends(deps.get_db),
data: conlist(schemas.Policy, max_items=50) = Body(...), # type: ignore
) -> schemas.BulkPutPolicyResponse:
"""
Given a list of policy data elements, create or update corresponding Policy objects
or report failure
"""
created_or_updated: List[Policy] = []
failed: List[BulkUpdateFailed] = []
logger.info(f"Starting bulk upsert for {len(data)} policies")
for policy_schema in data:
policy_data: Dict[str, Any] = dict(policy_schema)
try:
policy = Policy.create_or_update(
db=db,
data={
"name": policy_data["name"],
"key": policy_data.get("key"),
"client_id": client.id,
},
)
except KeyOrNameAlreadyExists as exc:
logger.warning("Create/update failed for policy: %s", exc)
failure = {
"message": exc.args[0],
"data": policy_data,
}
failed.append(BulkUpdateFailed(**failure))
continue
except PolicyValidationError as exc:
logger.warning("Create/update failed for policy: %s", exc)
failure = {
"message": "This record could not be added because the data provided was invalid.",
"data": policy_data,
}
failed.append(BulkUpdateFailed(**failure))
continue
else:
created_or_updated.append(policy)
return schemas.BulkPutPolicyResponse(
succeeded=created_or_updated,
failed=failed,
)
@router.patch(
urls.RULE_LIST,
status_code=200,
response_model=schemas.BulkPutRuleResponse,
)
def create_or_update_rules(
*,
client: ClientDetail = Security(
verify_oauth_client,
scopes=[scopes.RULE_CREATE_OR_UPDATE],
),
policy_key: FidesOpsKey,
db: Session = Depends(deps.get_db),
input_data: conlist(schemas.RuleCreate, max_items=50) = Body(...), # type: ignore
) -> schemas.BulkPutRuleResponse:
"""
Given a list of Rule data elements, create or update corresponding Rule objects
or report failure
"""
logger.info(f"Finding policy with key '{policy_key}'")
policy = get_policy_or_error(db, policy_key)
created_or_updated: List[Rule] = []
failed: List[BulkUpdateFailed] = []
logger.info(
f"Starting bulk upsert for {len(input_data)} rules on policy {policy_key}"
)
for schema in input_data:
# Validate all FKs in the input data exist
associated_storage_config_id = None
if schema.action_type == ActionType.access.value:
# Only validate the associated StorageConfig on access rules
storage_destination_key = schema.storage_destination_key
associated_storage_config: StorageConfig = StorageConfig.get_by(
db=db,
field="key",
value=storage_destination_key,
)
if not associated_storage_config:
logger.warning(
f"No storage config found with key {storage_destination_key}"
)
failure = {
"message": f"A StorageConfig with key {storage_destination_key} does not exist",
"data": dict(
schema
), # Be sure to pass the schema out the same way it came in
}
failed.append(BulkUpdateFailed(**failure))
continue
else:
associated_storage_config_id = associated_storage_config.id
masking_strategy_data = None
if schema.masking_strategy:
masking_strategy_data = schema.masking_strategy.dict()
try:
rule = Rule.create_or_update(
db=db,
data={
"action_type": schema.action_type,
"client_id": client.id,
"key": schema.key,
"name": schema.name,
"policy_id": policy.id,
"storage_destination_id": associated_storage_config_id,
"masking_strategy": masking_strategy_data,
},
)
except KeyOrNameAlreadyExists as exc:
logger.warning(
f"Create/update failed for rule '{schema.key}' on policy {policy_key}: {exc}"
)
failure = {
"message": exc.args[0],
"data": dict(schema),
}
failed.append(BulkUpdateFailed(**failure))
continue
except RuleValidationError as exc:
logger.warning(
f"Create/update failed for rule '{schema.key}' on policy {policy_key}: {exc}"
)
failure = {
"message": exc.args[0],
"data": dict(schema),
}
failed.append(BulkUpdateFailed(**failure))
continue
except ValueError as exc:
logger.warning(
f"Create/update failed for rule '{schema.key}' on policy {policy_key}: {exc}"
)
failure = {
"message": exc.args[0],
"data": dict(schema),
}
failed.append(BulkUpdateFailed(**failure))
continue
else:
created_or_updated.append(rule)
return schemas.BulkPutRuleResponse(succeeded=created_or_updated, failed=failed)
@router.delete(
urls.RULE_DETAIL,
status_code=204,
dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])],
)
def delete_rule(
*,
policy_key: FidesOpsKey,
rule_key: FidesOpsKey,
db: Session = Depends(deps.get_db),
) -> None:
"""
Delete a policy rule.
"""
policy = get_policy_or_error(db, policy_key)
logger.info(f"Finding rule with key '{rule_key}'")
rule = Rule.filter(
db=db, conditions=(Rule.key == rule_key and Rule.policy_id == policy.id)
).first()
if not rule:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND,
detail=f"No Rule found for key {rule_key} on Policy {policy_key}.",
)
logger.info(f"Deleting rule with key '{rule_key}'")
rule.delete(db=db)
@router.patch(
urls.RULE_TARGET_LIST,
status_code=200,
response_model=schemas.BulkPutRuleTargetResponse,
)
def create_or_update_rule_targets(
*,
client: ClientDetail = Security(
verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE]
),
policy_key: FidesOpsKey,
rule_key: FidesOpsKey,
db: Session = Depends(deps.get_db),
input_data: conlist(schemas.RuleTarget, max_items=50) = Body(...), # type: ignore
) -> schemas.BulkPutRuleTargetResponse:
"""
Given a list of Rule data elements, create corresponding Rule objects
or report failure
"""
policy = get_policy_or_error(db, policy_key)
logger.info(f"Finding rule with key '{rule_key}'")
rule = Rule.filter(
db=db, conditions=(Rule.key == rule_key and Rule.policy_id == policy.id)
).first()
if not rule:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND,
detail=f"No Rule found for key {rule_key} on Policy {policy_key}.",
)
created_or_updated = []
failed = []
logger.info(
f"Starting bulk upsert for {len(input_data)} rule targets on rule {rule_key}"
)
for schema in input_data:
try:
target = RuleTarget.create_or_update(
db=db,
data={
"name": schema.name,
"key": schema.key,
"data_category": schema.data_category,
"rule_id": rule.id,
"client_id": client.id,
},
)
except KeyOrNameAlreadyExists as exc:
logger.warning(
f"Create/update failed for rule target {schema.key} on rule {rule_key}: {exc}"
)
failure = {
"message": exc.args[0],
"data": dict(schema),
}
failed.append(BulkUpdateFailed(**failure))
continue
except (
DataCategoryNotSupported,
PolicyValidationError,
RuleTargetValidationError,
) as exc:
logger.warning(
f"Create/update failed for rule target {schema.key} on rule {rule_key}: {exc}"
)
failure = {
"message": exc.args[0],
"data": dict(schema),
}
failed.append(BulkUpdateFailed(**failure))
continue
except IntegrityError as exc:
logger.warning(
f"Create/update failed for rule target {schema.key} on rule {rule_key}: {exc}"
)
failure = {
"message": f"DataCategory {schema.data_category} is already specified on Rule with ID {rule.id}",
"data": dict(schema),
}
failed.append(BulkUpdateFailed(**failure))
else:
created_or_updated.append(target)
return schemas.BulkPutRuleTargetResponse(
succeeded=created_or_updated,
failed=failed,
)
@router.delete(
urls.RULE_TARGET_DETAIL,
status_code=204,
dependencies=[Security(verify_oauth_client, scopes=[scopes.RULE_DELETE])],
)
def delete_rule_target(
*,
policy_key: FidesOpsKey,
rule_key: FidesOpsKey,
rule_target_key: FidesOpsKey,
db: Session = Depends(deps.get_db),
) -> None:
"""
Delete the rule target.
"""
policy = get_policy_or_error(db, policy_key)
logger.info(f"Finding rule with key '{rule_key}'")
rule = Rule.filter(
db=db, conditions=(Rule.key == rule_key and Rule.policy_id == policy.id)
).first()
if not rule:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND,
detail=f"No Rule found for key {rule_key} on Policy {policy_key}.",
)
logger.info(f"Finding rule target with key '{rule_target_key}'")
target = RuleTarget.filter(
db=db,
conditions=(
RuleTarget.key == rule_target_key and RuleTarget.rule_id == rule.id
),
).first()
if not target:
raise HTTPException(
status_code=HTTP_404_NOT_FOUND,
detail=f"No RuleTarget found for key {rule_target_key} at Rule {rule_key} on Policy {policy_key}.",
)
logger.info(f"Deleting rule target with key '{rule_target_key}'")
target.delete(db=db)
| [
"logging.getLogger",
"fastapi.Security",
"pydantic.conlist",
"fidesops.schemas.api.BulkUpdateFailed",
"fastapi_pagination.ext.sqlalchemy.paginate",
"fidesops.models.policy.RuleTarget.create_or_update",
"fidesops.models.policy.Rule.filter",
"fastapi.Body",
"fidesops.models.policy.Policy.get_by",
"fidesops.schemas.policy.BulkPutRuleTargetResponse",
"fidesops.models.policy.RuleTarget.filter",
"starlette.exceptions.HTTPException",
"fidesops.models.policy.Policy.query",
"fidesops.models.policy.Rule.create_or_update",
"fastapi.Depends",
"fidesops.models.storage.StorageConfig.get_by",
"fidesops.schemas.policy.BulkPutPolicyResponse",
"fidesops.schemas.policy.BulkPutRuleResponse",
"fastapi.APIRouter"
] | [((1199, 1252), 'fastapi.APIRouter', 'APIRouter', ([], {'tags': "['Policy']", 'prefix': 'urls.V1_URL_PREFIX'}), "(tags=['Policy'], prefix=urls.V1_URL_PREFIX)\n", (1208, 1252), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((1263, 1290), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1280, 1290), False, 'import logging\n'), ((1525, 1545), 'fastapi.Depends', 'Depends', (['deps.get_db'], {}), '(deps.get_db)\n', (1532, 1545), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((1568, 1577), 'fastapi.Depends', 'Depends', ([], {}), '()\n', (1575, 1577), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((1777, 1796), 'fidesops.models.policy.Policy.query', 'Policy.query', ([], {'db': 'db'}), '(db=db)\n', (1789, 1796), False, 'from fidesops.models.policy import ActionType, Policy, Rule, RuleTarget\n'), ((1808, 1841), 'fastapi_pagination.ext.sqlalchemy.paginate', 'paginate', (['policies'], {'params': 'params'}), '(policies, params=params)\n', (1816, 1841), False, 'from fastapi_pagination.ext.sqlalchemy import paginate\n'), ((2043, 2094), 'fidesops.models.policy.Policy.get_by', 'Policy.get_by', ([], {'db': 'db', 'field': '"""key"""', 'value': 'policy_key'}), "(db=db, field='key', value=policy_key)\n", (2056, 2094), False, 'from fidesops.models.policy import ActionType, Policy, Rule, RuleTarget\n'), ((2531, 2551), 'fastapi.Depends', 'Depends', (['deps.get_db'], {}), '(deps.get_db)\n', (2538, 2551), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((2849, 2919), 'fastapi.Security', 'Security', (['verify_oauth_client'], {'scopes': '[scopes.POLICY_CREATE_OR_UPDATE]'}), '(verify_oauth_client, scopes=[scopes.POLICY_CREATE_OR_UPDATE])\n', (2857, 2919), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((2962, 2982), 'fastapi.Depends', 'Depends', (['deps.get_db'], {}), '(deps.get_db)\n', (2969, 2982), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((3034, 3043), 'fastapi.Body', 'Body', (['...'], {}), '(...)\n', (3038, 3043), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((4490, 4564), 'fidesops.schemas.policy.BulkPutPolicyResponse', 'schemas.BulkPutPolicyResponse', ([], {'succeeded': 'created_or_updated', 'failed': 'failed'}), '(succeeded=created_or_updated, failed=failed)\n', (4519, 4564), True, 'from fidesops.schemas import policy as schemas\n'), ((4758, 4826), 'fastapi.Security', 'Security', (['verify_oauth_client'], {'scopes': '[scopes.RULE_CREATE_OR_UPDATE]'}), '(verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE])\n', (4766, 4826), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((4898, 4918), 'fastapi.Depends', 'Depends', (['deps.get_db'], {}), '(deps.get_db)\n', (4905, 4918), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((4980, 4989), 'fastapi.Body', 'Body', (['...'], {}), '(...)\n', (4984, 4989), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((8440, 8512), 'fidesops.schemas.policy.BulkPutRuleResponse', 'schemas.BulkPutRuleResponse', ([], {'succeeded': 'created_or_updated', 'failed': 'failed'}), '(succeeded=created_or_updated, failed=failed)\n', (8467, 8512), True, 'from fidesops.schemas import policy as schemas\n'), ((8753, 8773), 'fastapi.Depends', 'Depends', (['deps.get_db'], {}), '(deps.get_db)\n', (8760, 8773), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((9503, 9571), 'fastapi.Security', 'Security', (['verify_oauth_client'], {'scopes': '[scopes.RULE_CREATE_OR_UPDATE]'}), '(verify_oauth_client, scopes=[scopes.RULE_CREATE_OR_UPDATE])\n', (9511, 9571), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((9661, 9681), 'fastapi.Depends', 'Depends', (['deps.get_db'], {}), '(deps.get_db)\n', (9668, 9681), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((9743, 9752), 'fastapi.Body', 'Body', (['...'], {}), '(...)\n', (9747, 9752), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((12218, 12296), 'fidesops.schemas.policy.BulkPutRuleTargetResponse', 'schemas.BulkPutRuleTargetResponse', ([], {'succeeded': 'created_or_updated', 'failed': 'failed'}), '(succeeded=created_or_updated, failed=failed)\n', (12251, 12296), True, 'from fidesops.schemas import policy as schemas\n'), ((12608, 12628), 'fastapi.Depends', 'Depends', (['deps.get_db'], {}), '(deps.get_db)\n', (12615, 12628), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((2128, 2227), 'starlette.exceptions.HTTPException', 'HTTPException', ([], {'status_code': 'HTTP_404_NOT_FOUND', 'detail': 'f"""No Policy found for key {policy_key}."""'}), "(status_code=HTTP_404_NOT_FOUND, detail=\n f'No Policy found for key {policy_key}.')\n", (2141, 2227), False, 'from starlette.exceptions import HTTPException\n'), ((2994, 3031), 'pydantic.conlist', 'conlist', (['schemas.Policy'], {'max_items': '(50)'}), '(schemas.Policy, max_items=50)\n', (3001, 3031), False, 'from pydantic import conlist\n'), ((4936, 4977), 'pydantic.conlist', 'conlist', (['schemas.RuleCreate'], {'max_items': '(50)'}), '(schemas.RuleCreate, max_items=50)\n', (4943, 4977), False, 'from pydantic import conlist\n'), ((9084, 9202), 'starlette.exceptions.HTTPException', 'HTTPException', ([], {'status_code': 'HTTP_404_NOT_FOUND', 'detail': 'f"""No Rule found for key {rule_key} on Policy {policy_key}."""'}), "(status_code=HTTP_404_NOT_FOUND, detail=\n f'No Rule found for key {rule_key} on Policy {policy_key}.')\n", (9097, 9202), False, 'from starlette.exceptions import HTTPException\n'), ((9699, 9740), 'pydantic.conlist', 'conlist', (['schemas.RuleTarget'], {'max_items': '(50)'}), '(schemas.RuleTarget, max_items=50)\n', (9706, 9740), False, 'from pydantic import conlist\n'), ((10177, 10295), 'starlette.exceptions.HTTPException', 'HTTPException', ([], {'status_code': 'HTTP_404_NOT_FOUND', 'detail': 'f"""No Rule found for key {rule_key} on Policy {policy_key}."""'}), "(status_code=HTTP_404_NOT_FOUND, detail=\n f'No Rule found for key {rule_key} on Policy {policy_key}.')\n", (10190, 10295), False, 'from starlette.exceptions import HTTPException\n'), ((12940, 13058), 'starlette.exceptions.HTTPException', 'HTTPException', ([], {'status_code': 'HTTP_404_NOT_FOUND', 'detail': 'f"""No Rule found for key {rule_key} on Policy {policy_key}."""'}), "(status_code=HTTP_404_NOT_FOUND, detail=\n f'No Rule found for key {rule_key} on Policy {policy_key}.')\n", (12953, 13058), False, 'from starlette.exceptions import HTTPException\n'), ((13365, 13520), 'starlette.exceptions.HTTPException', 'HTTPException', ([], {'status_code': 'HTTP_404_NOT_FOUND', 'detail': 'f"""No RuleTarget found for key {rule_target_key} at Rule {rule_key} on Policy {policy_key}."""'}), "(status_code=HTTP_404_NOT_FOUND, detail=\n f'No RuleTarget found for key {rule_target_key} at Rule {rule_key} on Policy {policy_key}.'\n )\n", (13378, 13520), False, 'from starlette.exceptions import HTTPException\n'), ((1416, 1474), 'fastapi.Security', 'Security', (['verify_oauth_client'], {'scopes': '[scopes.POLICY_READ]'}), '(verify_oauth_client, scopes=[scopes.POLICY_READ])\n', (1424, 1474), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((2398, 2456), 'fastapi.Security', 'Security', (['verify_oauth_client'], {'scopes': '[scopes.POLICY_READ]'}), '(verify_oauth_client, scopes=[scopes.POLICY_READ])\n', (2406, 2456), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((5841, 5912), 'fidesops.models.storage.StorageConfig.get_by', 'StorageConfig.get_by', ([], {'db': 'db', 'field': '"""key"""', 'value': 'storage_destination_key'}), "(db=db, field='key', value=storage_destination_key)\n", (5861, 5912), False, 'from fidesops.models.storage import StorageConfig\n'), ((6799, 7068), 'fidesops.models.policy.Rule.create_or_update', 'Rule.create_or_update', ([], {'db': 'db', 'data': "{'action_type': schema.action_type, 'client_id': client.id, 'key': schema.\n key, 'name': schema.name, 'policy_id': policy.id,\n 'storage_destination_id': associated_storage_config_id,\n 'masking_strategy': masking_strategy_data}"}), "(db=db, data={'action_type': schema.action_type,\n 'client_id': client.id, 'key': schema.key, 'name': schema.name,\n 'policy_id': policy.id, 'storage_destination_id':\n associated_storage_config_id, 'masking_strategy': masking_strategy_data})\n", (6820, 7068), False, 'from fidesops.models.policy import ActionType, Policy, Rule, RuleTarget\n'), ((8945, 9032), 'fidesops.models.policy.Rule.filter', 'Rule.filter', ([], {'db': 'db', 'conditions': '(Rule.key == rule_key and Rule.policy_id == policy.id)'}), '(db=db, conditions=Rule.key == rule_key and Rule.policy_id ==\n policy.id)\n', (8956, 9032), False, 'from fidesops.models.policy import ActionType, Policy, Rule, RuleTarget\n'), ((8592, 8650), 'fastapi.Security', 'Security', (['verify_oauth_client'], {'scopes': '[scopes.RULE_DELETE]'}), '(verify_oauth_client, scopes=[scopes.RULE_DELETE])\n', (8600, 8650), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((10038, 10125), 'fidesops.models.policy.Rule.filter', 'Rule.filter', ([], {'db': 'db', 'conditions': '(Rule.key == rule_key and Rule.policy_id == policy.id)'}), '(db=db, conditions=Rule.key == rule_key and Rule.policy_id ==\n policy.id)\n', (10049, 10125), False, 'from fidesops.models.policy import ActionType, Policy, Rule, RuleTarget\n'), ((10544, 10717), 'fidesops.models.policy.RuleTarget.create_or_update', 'RuleTarget.create_or_update', ([], {'db': 'db', 'data': "{'name': schema.name, 'key': schema.key, 'data_category': schema.\n data_category, 'rule_id': rule.id, 'client_id': client.id}"}), "(db=db, data={'name': schema.name, 'key': schema\n .key, 'data_category': schema.data_category, 'rule_id': rule.id,\n 'client_id': client.id})\n", (10571, 10717), False, 'from fidesops.models.policy import ActionType, Policy, Rule, RuleTarget\n'), ((12801, 12888), 'fidesops.models.policy.Rule.filter', 'Rule.filter', ([], {'db': 'db', 'conditions': '(Rule.key == rule_key and Rule.policy_id == policy.id)'}), '(db=db, conditions=Rule.key == rule_key and Rule.policy_id ==\n policy.id)\n', (12812, 12888), False, 'from fidesops.models.policy import ActionType, Policy, Rule, RuleTarget\n'), ((13172, 13281), 'fidesops.models.policy.RuleTarget.filter', 'RuleTarget.filter', ([], {'db': 'db', 'conditions': '(RuleTarget.key == rule_target_key and RuleTarget.rule_id == rule.id)'}), '(db=db, conditions=RuleTarget.key == rule_target_key and \n RuleTarget.rule_id == rule.id)\n', (13189, 13281), False, 'from fidesops.models.policy import ActionType, Policy, Rule, RuleTarget\n'), ((12406, 12464), 'fastapi.Security', 'Security', (['verify_oauth_client'], {'scopes': '[scopes.RULE_DELETE]'}), '(verify_oauth_client, scopes=[scopes.RULE_DELETE])\n', (12414, 12464), False, 'from fastapi import APIRouter, Body, Depends, Security\n'), ((4001, 4028), 'fidesops.schemas.api.BulkUpdateFailed', 'BulkUpdateFailed', ([], {}), '(**failure)\n', (4017, 4028), False, 'from fidesops.schemas.api import BulkUpdateFailed\n'), ((4368, 4395), 'fidesops.schemas.api.BulkUpdateFailed', 'BulkUpdateFailed', ([], {}), '(**failure)\n', (4384, 4395), False, 'from fidesops.schemas.api import BulkUpdateFailed\n'), ((6477, 6504), 'fidesops.schemas.api.BulkUpdateFailed', 'BulkUpdateFailed', ([], {}), '(**failure)\n', (6493, 6504), False, 'from fidesops.schemas.api import BulkUpdateFailed\n'), ((7587, 7614), 'fidesops.schemas.api.BulkUpdateFailed', 'BulkUpdateFailed', ([], {}), '(**failure)\n', (7603, 7614), False, 'from fidesops.schemas.api import BulkUpdateFailed\n'), ((7958, 7985), 'fidesops.schemas.api.BulkUpdateFailed', 'BulkUpdateFailed', ([], {}), '(**failure)\n', (7974, 7985), False, 'from fidesops.schemas.api import BulkUpdateFailed\n'), ((8320, 8347), 'fidesops.schemas.api.BulkUpdateFailed', 'BulkUpdateFailed', ([], {}), '(**failure)\n', (8336, 8347), False, 'from fidesops.schemas.api import BulkUpdateFailed\n'), ((11200, 11227), 'fidesops.schemas.api.BulkUpdateFailed', 'BulkUpdateFailed', ([], {}), '(**failure)\n', (11216, 11227), False, 'from fidesops.schemas.api import BulkUpdateFailed\n'), ((11676, 11703), 'fidesops.schemas.api.BulkUpdateFailed', 'BulkUpdateFailed', ([], {}), '(**failure)\n', (11692, 11703), False, 'from fidesops.schemas.api import BulkUpdateFailed\n'), ((12117, 12144), 'fidesops.schemas.api.BulkUpdateFailed', 'BulkUpdateFailed', ([], {}), '(**failure)\n', (12133, 12144), False, 'from fidesops.schemas.api import BulkUpdateFailed\n')] |
# -*- coding: UTF-8 -*-
# Copyright 2010-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
from builtins import object
from django.contrib.contenttypes.models import *
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils.text import format_lazy
from lino.api import dd
from lino.core.gfks import gfk2lookup
from .fields import GenericForeignKey, GenericForeignKeyIdField
class Controllable(dd.Model):
# Translators: will also be concatenated with '(type)' '(object)'
owner_label = _('Controlled by')
controller_is_optional = True
class Meta(object):
abstract = True
owner_type = dd.ForeignKey(
ContentType,
editable=True,
blank=True, null=True,
verbose_name=format_lazy(u"{} {}", owner_label, _('(type)')))
owner_id = GenericForeignKeyIdField(
owner_type,
editable=True,
blank=True, null=True,
verbose_name=format_lazy(u"{} {}", owner_label, _('(object)')))
owner = GenericForeignKey(
'owner_type', 'owner_id',
verbose_name=owner_label)
@classmethod
def update_controller_field(cls, verbose_name=None, **kwargs):
if verbose_name is not None:
dd.update_field(cls, 'owner', verbose_name=verbose_name)
kwargs.update(
verbose_name=format_lazy(u"{} {}",
verbose_name, _('(object)')))
dd.update_field(cls, 'owner_id', **kwargs)
if verbose_name is not None:
kwargs.update(
verbose_name=format_lazy(u"{} {}",
verbose_name, _('(type)')))
dd.update_field(cls, 'owner_type', **kwargs)
def update_owned_instance(self, controllable):
if self.owner:
self.owner.update_owned_instance(controllable)
super(Controllable, self).update_owned_instance(controllable)
def save(self, *args, **kw):
if settings.SITE.loading_from_dump:
super(Controllable, self).save(*args, **kw)
else:
if self.owner:
self.owner.update_owned_instance(self)
super(Controllable, self).save(*args, **kw)
if self.owner:
self.owner.after_update_owned_instance(self)
def controlled_rows(self, model, **kwargs):
gfk = self._meta.get_field('owner')
kwargs = gfk2lookup(gfk, self, **kwargs)
return model.objects.filter(**kwargs)
| [
"django.utils.translation.ugettext_lazy",
"lino.core.gfks.gfk2lookup",
"lino.api.dd.update_field"
] | [((568, 586), 'django.utils.translation.ugettext_lazy', '_', (['"""Controlled by"""'], {}), "('Controlled by')\n", (569, 586), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1465, 1507), 'lino.api.dd.update_field', 'dd.update_field', (['cls', '"""owner_id"""'], {}), "(cls, 'owner_id', **kwargs)\n", (1480, 1507), False, 'from lino.api import dd\n'), ((1679, 1723), 'lino.api.dd.update_field', 'dd.update_field', (['cls', '"""owner_type"""'], {}), "(cls, 'owner_type', **kwargs)\n", (1694, 1723), False, 'from lino.api import dd\n'), ((2412, 2443), 'lino.core.gfks.gfk2lookup', 'gfk2lookup', (['gfk', 'self'], {}), '(gfk, self, **kwargs)\n', (2422, 2443), False, 'from lino.core.gfks import gfk2lookup\n'), ((1272, 1328), 'lino.api.dd.update_field', 'dd.update_field', (['cls', '"""owner"""'], {'verbose_name': 'verbose_name'}), "(cls, 'owner', verbose_name=verbose_name)\n", (1287, 1328), False, 'from lino.api import dd\n'), ((835, 846), 'django.utils.translation.ugettext_lazy', '_', (['"""(type)"""'], {}), "('(type)')\n", (836, 846), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1021, 1034), 'django.utils.translation.ugettext_lazy', '_', (['"""(object)"""'], {}), "('(object)')\n", (1022, 1034), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1441, 1454), 'django.utils.translation.ugettext_lazy', '_', (['"""(object)"""'], {}), "('(object)')\n", (1442, 1454), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1657, 1668), 'django.utils.translation.ugettext_lazy', '_', (['"""(type)"""'], {}), "('(type)')\n", (1658, 1668), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
import time
import psutil
import pymysql
from fastapi import APIRouter
from api.utils import response_code
router = APIRouter()
@router.get('/dashboard/getinfo')
def getinfo():
from init_global import g
res = {}
db = g.db_pool.connection()
cur = db.cursor()
cur.execute(f'select count(app_name) from app_list')
res['app_count'] = cur.fetchall()[0][0]
cur.execute(f'select count(app_name) from app_list where status="running"')
res['app_run_count'] = cur.fetchall()[0][0]
res['image_count'] = len(g.dc.images.list())
res['networks_count'] = len(g.dc.networks.list())
cur = db.cursor(cursor=pymysql.cursors.DictCursor)
cur.execute(f'select * from app_list order by start_time desc limit 10')
res['recent_event'] = cur.fetchall()
db.close()
return response_code.resp_200(data={"res": res})
def get_performance():
res = {}
# cpu
cpuCount = psutil.cpu_count(logical=False) # CPU核心
cpuPercent = psutil.cpu_percent(0.5) # 使用率
cpufree = round(100 - cpuPercent, 2) # CPU空余
# 内存
m = psutil.virtual_memory() # 内存信息
memoryTotal = round(m.total / (1024.0 * 1024.0 * 1024.0), 2) # 总内存
memoryUsed = round(m.used / (1024.0 * 1024.0 * 1024.0), 2) # 已用内存
memoryFree = round(memoryTotal - memoryUsed, 2) # 剩余内存
# 磁盘
io = psutil.disk_partitions()
diskCount = len(io)
diskTotal = 0 # 总储存空间大小
diskUsed = 0 # 已用
diskFree = 0 # 剩余
for i in io:
try:
o = psutil.disk_usage(i.mountpoint)
diskTotal += int(o.total / (1024.0 * 1024.0 * 1024.0))
diskUsed += int(o.used / (1024.0 * 1024.0 * 1024.0))
diskFree += int(o.free / (1024.0 * 1024.0 * 1024.0))
except:
pass
res['cpu'] = cpuPercent
res['mem'] = m.percent
res['disk'] = o.percent
res['memoryTotal'] = memoryTotal
res['memoryUsed'] = memoryUsed
res['diskTotal'] = diskTotal
res['diskUsed'] = diskUsed
return res
def push_realinfo():
from init_global import g
from main import socket_manager as sm
print(g.person_online)
while g.person_online:
res = get_performance()
# print(res)
g.push_loop.run_until_complete(sm.emit('dashboard', {'data': res}))
time.sleep(3)
| [
"init_global.g.db_pool.connection",
"psutil.cpu_percent",
"init_global.g.dc.networks.list",
"psutil.disk_usage",
"main.socket_manager.emit",
"api.utils.response_code.resp_200",
"psutil.virtual_memory",
"time.sleep",
"psutil.disk_partitions",
"fastapi.APIRouter",
"psutil.cpu_count",
"init_global.g.dc.images.list"
] | [((119, 130), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (128, 130), False, 'from fastapi import APIRouter\n'), ((234, 256), 'init_global.g.db_pool.connection', 'g.db_pool.connection', ([], {}), '()\n', (254, 256), False, 'from init_global import g\n'), ((810, 851), 'api.utils.response_code.resp_200', 'response_code.resp_200', ([], {'data': "{'res': res}"}), "(data={'res': res})\n", (832, 851), False, 'from api.utils import response_code\n'), ((915, 946), 'psutil.cpu_count', 'psutil.cpu_count', ([], {'logical': '(False)'}), '(logical=False)\n', (931, 946), False, 'import psutil\n'), ((973, 996), 'psutil.cpu_percent', 'psutil.cpu_percent', (['(0.5)'], {}), '(0.5)\n', (991, 996), False, 'import psutil\n'), ((1071, 1094), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (1092, 1094), False, 'import psutil\n'), ((1324, 1348), 'psutil.disk_partitions', 'psutil.disk_partitions', ([], {}), '()\n', (1346, 1348), False, 'import psutil\n'), ((537, 555), 'init_global.g.dc.images.list', 'g.dc.images.list', ([], {}), '()\n', (553, 555), False, 'from init_global import g\n'), ((589, 609), 'init_global.g.dc.networks.list', 'g.dc.networks.list', ([], {}), '()\n', (607, 609), False, 'from init_global import g\n'), ((2276, 2289), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2286, 2289), False, 'import time\n'), ((1494, 1525), 'psutil.disk_usage', 'psutil.disk_usage', (['i.mountpoint'], {}), '(i.mountpoint)\n', (1511, 1525), False, 'import psutil\n'), ((2231, 2266), 'main.socket_manager.emit', 'sm.emit', (['"""dashboard"""', "{'data': res}"], {}), "('dashboard', {'data': res})\n", (2238, 2266), True, 'from main import socket_manager as sm\n')] |
import torch
import torch.nn as nn
import numpy as np
import math
class ForwardKinematics:
def __init__(self, args, edges):
self.topology = [-1] * (len(edges) + 1)
self.rotation_map = []
for i, edge in enumerate(edges):
self.topology[edge[1]] = edge[0]
self.rotation_map.append(edge[1])
self.world = args.fk_world
self.pos_repr = args.pos_repr
self.quater = args.rotation == 'quaternion'
def forward_from_raw(self, raw, offset, world=None, quater=None):
if world is None: world = self.world
if quater is None: quater = self.quater
if self.pos_repr == '3d':
position = raw[:, -3:, :]
rotation = raw[:, :-3, :]
elif self.pos_repr == '4d':
raise Exception('Not support')
if quater:
rotation = rotation.reshape((rotation.shape[0], -1, 4, rotation.shape[-1]))
identity = torch.tensor((1, 0, 0, 0), dtype=torch.float, device=raw.device)
else:
rotation = rotation.reshape((rotation.shape[0], -1, 3, rotation.shape[-1]))
identity = torch.zeros((3, ), dtype=torch.float, device=raw.device)
identity = identity.reshape((1, 1, -1, 1))
new_shape = list(rotation.shape)
new_shape[1] += 1
new_shape[2] = 1
rotation_final = identity.repeat(new_shape)
for i, j in enumerate(self.rotation_map):
rotation_final[:, j, :, :] = rotation[:, i, :, :]
return self.forward(rotation_final, position, offset, world=world, quater=quater)
'''
rotation should have shape batch_size * Joint_num * (3/4) * Time
position should have shape batch_size * 3 * Time
offset should have shape batch_size * Joint_num * 3
output have shape batch_size * Time * Joint_num * 3
'''
def forward(self, rotation: torch.Tensor, position: torch.Tensor, offset: torch.Tensor, order='xyz', quater=False, world=True):
if not quater and rotation.shape[-2] != 3: raise Exception('Unexpected shape of rotation')
if quater and rotation.shape[-2] != 4: raise Exception('Unexpected shape of rotation')
rotation = rotation.permute(0, 3, 1, 2)
position = position.permute(0, 2, 1)
result = torch.empty(rotation.shape[:-1] + (3, ), device=position.device)
norm = torch.norm(rotation, dim=-1, keepdim=True)
#norm[norm < 1e-10] = 1
rotation = rotation / norm
if quater:
transform = self.transform_from_quaternion(rotation)
else:
transform = self.transform_from_euler(rotation, order)
offset = offset.reshape((-1, 1, offset.shape[-2], offset.shape[-1], 1))
result[..., 0, :] = position
for i, pi in enumerate(self.topology):
if pi == -1:
assert i == 0
continue
transform[..., i, :, :] = torch.matmul(transform[..., pi, :, :], transform[..., i, :, :])
result[..., i, :] = torch.matmul(transform[..., i, :, :], offset[..., i, :, :]).squeeze()
if world: result[..., i, :] += result[..., pi, :]
return result
def from_local_to_world(self, res: torch.Tensor):
res = res.clone()
for i, pi in enumerate(self.topology):
if pi == 0 or pi == -1:
continue
res[..., i, :] += res[..., pi, :]
return res
@staticmethod
def transform_from_euler(rotation, order):
rotation = rotation / 180 * math.pi
transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 1], order[1]),
ForwardKinematics.transform_from_axis(rotation[..., 2], order[2]))
transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 0], order[0]), transform)
return transform
@staticmethod
def transform_from_axis(euler, axis):
transform = torch.empty(euler.shape[0:3] + (3, 3), device=euler.device)
cos = torch.cos(euler)
sin = torch.sin(euler)
cord = ord(axis) - ord('x')
transform[..., cord, :] = transform[..., :, cord] = 0
transform[..., cord, cord] = 1
if axis == 'x':
transform[..., 1, 1] = transform[..., 2, 2] = cos
transform[..., 1, 2] = -sin
transform[..., 2, 1] = sin
if axis == 'y':
transform[..., 0, 0] = transform[..., 2, 2] = cos
transform[..., 0, 2] = sin
transform[..., 2, 0] = -sin
if axis == 'z':
transform[..., 0, 0] = transform[..., 1, 1] = cos
transform[..., 0, 1] = -sin
transform[..., 1, 0] = sin
return transform
@staticmethod
def transform_from_quaternion(quater: torch.Tensor):
qw = quater[..., 0]
qx = quater[..., 1]
qy = quater[..., 2]
qz = quater[..., 3]
x2 = qx + qx
y2 = qy + qy
z2 = qz + qz
xx = qx * x2
yy = qy * y2
wx = qw * x2
xy = qx * y2
yz = qy * z2
wy = qw * y2
xz = qx * z2
zz = qz * z2
wz = qw * z2
m = torch.empty(quater.shape[:-1] + (3, 3), device=quater.device)
m[..., 0, 0] = 1.0 - (yy + zz)
m[..., 0, 1] = xy - wz
m[..., 0, 2] = xz + wy
m[..., 1, 0] = xy + wz
m[..., 1, 1] = 1.0 - (xx + zz)
m[..., 1, 2] = yz - wx
m[..., 2, 0] = xz - wy
m[..., 2, 1] = yz + wx
m[..., 2, 2] = 1.0 - (xx + yy)
return m
class InverseKinematics:
def __init__(self, rotations: torch.Tensor, positions: torch.Tensor, offset, parents, constrains):
self.rotations = rotations
self.rotations.requires_grad_(True)
self.position = positions
self.position.requires_grad_(True)
self.parents = parents
self.offset = offset
self.constrains = constrains
self.optimizer = torch.optim.Adam([self.position, self.rotations], lr=1e-3, betas=(0.9, 0.999))
self.crit = nn.MSELoss()
def step(self):
self.optimizer.zero_grad()
glb = self.forward(self.rotations, self.position, self.offset, order='', quater=True, world=True)
loss = self.crit(glb, self.constrains)
loss.backward()
self.optimizer.step()
self.glb = glb
return loss.item()
def tloss(self, time):
return self.crit(self.glb[time, :], self.constrains[time, :])
def all_loss(self):
res = [self.tloss(t).detach().numpy() for t in range(self.constrains.shape[0])]
return np.array(res)
'''
rotation should have shape batch_size * Joint_num * (3/4) * Time
position should have shape batch_size * 3 * Time
offset should have shape batch_size * Joint_num * 3
output have shape batch_size * Time * Joint_num * 3
'''
def forward(self, rotation: torch.Tensor, position: torch.Tensor, offset: torch.Tensor, order='xyz', quater=False,
world=True):
'''
if not quater and rotation.shape[-2] != 3: raise Exception('Unexpected shape of rotation')
if quater and rotation.shape[-2] != 4: raise Exception('Unexpected shape of rotation')
rotation = rotation.permute(0, 3, 1, 2)
position = position.permute(0, 2, 1)
'''
result = torch.empty(rotation.shape[:-1] + (3,), device=position.device)
norm = torch.norm(rotation, dim=-1, keepdim=True)
rotation = rotation / norm
if quater:
transform = self.transform_from_quaternion(rotation)
else:
transform = self.transform_from_euler(rotation, order)
offset = offset.reshape((-1, 1, offset.shape[-2], offset.shape[-1], 1))
result[..., 0, :] = position
for i, pi in enumerate(self.parents):
if pi == -1:
assert i == 0
continue
result[..., i, :] = torch.matmul(transform[..., pi, :, :], offset[..., i, :, :]).squeeze()
transform[..., i, :, :] = torch.matmul(transform[..., pi, :, :], transform[..., i, :, :])
if world: result[..., i, :] += result[..., pi, :]
return result
@staticmethod
def transform_from_euler(rotation, order):
rotation = rotation / 180 * math.pi
transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 1], order[1]),
ForwardKinematics.transform_from_axis(rotation[..., 2], order[2]))
transform = torch.matmul(ForwardKinematics.transform_from_axis(rotation[..., 0], order[0]), transform)
return transform
@staticmethod
def transform_from_axis(euler, axis):
transform = torch.empty(euler.shape[0:3] + (3, 3), device=euler.device)
cos = torch.cos(euler)
sin = torch.sin(euler)
cord = ord(axis) - ord('x')
transform[..., cord, :] = transform[..., :, cord] = 0
transform[..., cord, cord] = 1
if axis == 'x':
transform[..., 1, 1] = transform[..., 2, 2] = cos
transform[..., 1, 2] = -sin
transform[..., 2, 1] = sin
if axis == 'y':
transform[..., 0, 0] = transform[..., 2, 2] = cos
transform[..., 0, 2] = sin
transform[..., 2, 0] = -sin
if axis == 'z':
transform[..., 0, 0] = transform[..., 1, 1] = cos
transform[..., 0, 1] = -sin
transform[..., 1, 0] = sin
return transform
@staticmethod
def transform_from_quaternion(quater: torch.Tensor):
qw = quater[..., 0]
qx = quater[..., 1]
qy = quater[..., 2]
qz = quater[..., 3]
x2 = qx + qx
y2 = qy + qy
z2 = qz + qz
xx = qx * x2
yy = qy * y2
wx = qw * x2
xy = qx * y2
yz = qy * z2
wy = qw * y2
xz = qx * z2
zz = qz * z2
wz = qw * z2
m = torch.empty(quater.shape[:-1] + (3, 3), device=quater.device)
m[..., 0, 0] = 1.0 - (yy + zz)
m[..., 0, 1] = xy - wz
m[..., 0, 2] = xz + wy
m[..., 1, 0] = xy + wz
m[..., 1, 1] = 1.0 - (xx + zz)
m[..., 1, 2] = yz - wx
m[..., 2, 0] = xz - wy
m[..., 2, 1] = yz + wx
m[..., 2, 2] = 1.0 - (xx + yy)
return m
| [
"torch.optim.Adam",
"torch.sin",
"torch.nn.MSELoss",
"torch.norm",
"torch.cos",
"numpy.array",
"torch.tensor",
"torch.matmul",
"torch.empty",
"torch.zeros"
] | [((2281, 2344), 'torch.empty', 'torch.empty', (['(rotation.shape[:-1] + (3,))'], {'device': 'position.device'}), '(rotation.shape[:-1] + (3,), device=position.device)\n', (2292, 2344), False, 'import torch\n'), ((2363, 2405), 'torch.norm', 'torch.norm', (['rotation'], {'dim': '(-1)', 'keepdim': '(True)'}), '(rotation, dim=-1, keepdim=True)\n', (2373, 2405), False, 'import torch\n'), ((3956, 4015), 'torch.empty', 'torch.empty', (['(euler.shape[0:3] + (3, 3))'], {'device': 'euler.device'}), '(euler.shape[0:3] + (3, 3), device=euler.device)\n', (3967, 4015), False, 'import torch\n'), ((4030, 4046), 'torch.cos', 'torch.cos', (['euler'], {}), '(euler)\n', (4039, 4046), False, 'import torch\n'), ((4061, 4077), 'torch.sin', 'torch.sin', (['euler'], {}), '(euler)\n', (4070, 4077), False, 'import torch\n'), ((5192, 5253), 'torch.empty', 'torch.empty', (['(quater.shape[:-1] + (3, 3))'], {'device': 'quater.device'}), '(quater.shape[:-1] + (3, 3), device=quater.device)\n', (5203, 5253), False, 'import torch\n'), ((5985, 6064), 'torch.optim.Adam', 'torch.optim.Adam', (['[self.position, self.rotations]'], {'lr': '(0.001)', 'betas': '(0.9, 0.999)'}), '([self.position, self.rotations], lr=0.001, betas=(0.9, 0.999))\n', (6001, 6064), False, 'import torch\n'), ((6084, 6096), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (6094, 6096), True, 'import torch.nn as nn\n'), ((6636, 6649), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (6644, 6649), True, 'import numpy as np\n'), ((7394, 7457), 'torch.empty', 'torch.empty', (['(rotation.shape[:-1] + (3,))'], {'device': 'position.device'}), '(rotation.shape[:-1] + (3,), device=position.device)\n', (7405, 7457), False, 'import torch\n'), ((7474, 7516), 'torch.norm', 'torch.norm', (['rotation'], {'dim': '(-1)', 'keepdim': '(True)'}), '(rotation, dim=-1, keepdim=True)\n', (7484, 7516), False, 'import torch\n'), ((8780, 8839), 'torch.empty', 'torch.empty', (['(euler.shape[0:3] + (3, 3))'], {'device': 'euler.device'}), '(euler.shape[0:3] + (3, 3), device=euler.device)\n', (8791, 8839), False, 'import torch\n'), ((8854, 8870), 'torch.cos', 'torch.cos', (['euler'], {}), '(euler)\n', (8863, 8870), False, 'import torch\n'), ((8885, 8901), 'torch.sin', 'torch.sin', (['euler'], {}), '(euler)\n', (8894, 8901), False, 'import torch\n'), ((10016, 10077), 'torch.empty', 'torch.empty', (['(quater.shape[:-1] + (3, 3))'], {'device': 'quater.device'}), '(quater.shape[:-1] + (3, 3), device=quater.device)\n', (10027, 10077), False, 'import torch\n'), ((950, 1014), 'torch.tensor', 'torch.tensor', (['(1, 0, 0, 0)'], {'dtype': 'torch.float', 'device': 'raw.device'}), '((1, 0, 0, 0), dtype=torch.float, device=raw.device)\n', (962, 1014), False, 'import torch\n'), ((1140, 1195), 'torch.zeros', 'torch.zeros', (['(3,)'], {'dtype': 'torch.float', 'device': 'raw.device'}), '((3,), dtype=torch.float, device=raw.device)\n', (1151, 1195), False, 'import torch\n'), ((2925, 2988), 'torch.matmul', 'torch.matmul', (['transform[..., pi, :, :]', 'transform[..., i, :, :]'], {}), '(transform[..., pi, :, :], transform[..., i, :, :])\n', (2937, 2988), False, 'import torch\n'), ((8105, 8168), 'torch.matmul', 'torch.matmul', (['transform[..., pi, :, :]', 'transform[..., i, :, :]'], {}), '(transform[..., pi, :, :], transform[..., i, :, :])\n', (8117, 8168), False, 'import torch\n'), ((3021, 3080), 'torch.matmul', 'torch.matmul', (['transform[..., i, :, :]', 'offset[..., i, :, :]'], {}), '(transform[..., i, :, :], offset[..., i, :, :])\n', (3033, 3080), False, 'import torch\n'), ((7996, 8056), 'torch.matmul', 'torch.matmul', (['transform[..., pi, :, :]', 'offset[..., i, :, :]'], {}), '(transform[..., pi, :, :], offset[..., i, :, :])\n', (8008, 8056), False, 'import torch\n')] |
import pygame, math
from game import map, ui
window = pygame.display.set_mode([800, 600])
ui.window = window
screen = "game"
s = {"fullscreen": False}
running = True
gamedata = {"level": 0, "coal": 0, "iron": 1, "copper":0}
tiles = pygame.sprite.Group()
rails = pygame.sprite.Group()
carts = pygame.sprite.Group()
interactables = pygame.sprite.Group()
listmap = []
clock = pygame.time.Clock()
selected = pygame.image.load("./resources/images/selected.png")
selected2 = pygame.image.load("./resources/images/selected2.png")
box = pygame.image.load("./resources/images/box.png")
uibox = pygame.image.load("./resources/images/ui box.png")
class Mouse(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.surface.Surface([1, 1])
self.rect = self.image.get_rect()
self.rect.topleft = [0, 0]
self.clickedcart = None
self.hoveritem = None
self.tl = self.rect.topleft
self.mode = "select"
def pos(self, position):
self.rect.topleft = position
self.tl = self.rect.topleft
m = Mouse()
def snaptogrid(pos):
return [int(math.floor(pos[0] / 40)), int(math.floor(pos[1] / 40))]
def loadlevel(number):
global tiles, rails, carts, gamedata, listmap, interactables
tiles, rails, interactables, listmap = map.loadmap(int(number))
carts.empty()
gamedata["level"] = number
gamedata["coal"] = 0
gamedata["iron"] = 1
gamedata["copper"] = 0
loadlevel(0)
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
m.pos(pygame.mouse.get_pos())
if screen == "game":
if pygame.sprite.spritecollide(m, carts, False) and m.mode == "select":
carts.update("select", m, listmap)
if m.clickedcart != None:
m.mode = "action"
elif m.mode == "action" and m.clickedcart != None and listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0:
m.clickedcart.pathfind(listmap, snaptogrid(m.tl))
m.clickedcart = None
m.mode = "select"
elif event.type == pygame.MOUSEMOTION:
m.pos(pygame.mouse.get_pos())
if screen == "game":
m.hoveritem = None
if len(pygame.sprite.spritecollide(m, carts, False)) > 0:
m.hoveritem = pygame.sprite.spritecollide(m, carts, False)[0]
elif len(pygame.sprite.spritecollide(m, interactables, False)) > 0:
m.hoveritem = pygame.sprite.spritecollide(m, interactables, False)[0]
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
carts.add(map.Cart(snaptogrid(m.tl), "miner"))
if screen == "game":
window.fill([100, 100, 100])
tiles.draw(window)
carts.draw(window)
carts.update("update", m, listmap)
if not m.hoveritem == None and not m.mode == "action":
window.blit(box, [m.rect.left+10, m.rect.top+10])
ui.Resize(30)
ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25])
if m.hoveritem.type.startswith("mine") and m.hoveritem not in carts:
ui.Resize(18)
ui.Text("Carts Inside: " + str(m.hoveritem.data["carts"]), [m.rect.left+27, m.rect.top+47])
ui.Text("Max Carts: " + str(m.hoveritem.data["max"]), [m.rect.left+27, m.rect.top+60])
if not m.clickedcart == None:
window.blit(selected2, [m.clickedcart.rect.left-2, m.clickedcart.rect.top-2])
if m.mode == "action":
window.blit(box, [m.rect.left+10, m.rect.top+10])
ui.Resize(30)
try:
ui.Text(m.hoveritem.type.upper(), [m.rect.left+27, m.rect.top+25])
except:
ui.Text(m.clickedcart.type.upper(), [m.rect.left+27, m.rect.top+25])
if listmap[snaptogrid(m.tl)[0]][snaptogrid(m.tl)[1]] > 0:
ui.Resize(22)
ui.Text("Click to move", [m.rect.left+27, m.rect.top+45])
ui.Text("Cart Here", [m.rect.left+27, m.rect.top+60])
window.blit(selected, [snaptogrid(m.tl)[0]*40-2, snaptogrid(m.tl)[1]*40-2])
window.blit(uibox, [555, 475])
pygame.display.flip()
clock.tick(60)
fps = clock.get_fps()
pygame.quit()
| [
"pygame.quit",
"pygame.sprite.spritecollide",
"pygame.event.get",
"pygame.surface.Surface",
"pygame.sprite.Group",
"pygame.display.set_mode",
"pygame.display.flip",
"pygame.sprite.Sprite.__init__",
"math.floor",
"pygame.mouse.get_pos",
"game.ui.Text",
"pygame.time.Clock",
"pygame.image.load",
"game.ui.Resize"
] | [((55, 90), 'pygame.display.set_mode', 'pygame.display.set_mode', (['[800, 600]'], {}), '([800, 600])\n', (78, 90), False, 'import pygame, math\n'), ((233, 254), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (252, 254), False, 'import pygame, math\n'), ((263, 284), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (282, 284), False, 'import pygame, math\n'), ((293, 314), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (312, 314), False, 'import pygame, math\n'), ((331, 352), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (350, 352), False, 'import pygame, math\n'), ((374, 393), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (391, 393), False, 'import pygame, math\n'), ((405, 457), 'pygame.image.load', 'pygame.image.load', (['"""./resources/images/selected.png"""'], {}), "('./resources/images/selected.png')\n", (422, 457), False, 'import pygame, math\n'), ((470, 523), 'pygame.image.load', 'pygame.image.load', (['"""./resources/images/selected2.png"""'], {}), "('./resources/images/selected2.png')\n", (487, 523), False, 'import pygame, math\n'), ((530, 577), 'pygame.image.load', 'pygame.image.load', (['"""./resources/images/box.png"""'], {}), "('./resources/images/box.png')\n", (547, 577), False, 'import pygame, math\n'), ((586, 636), 'pygame.image.load', 'pygame.image.load', (['"""./resources/images/ui box.png"""'], {}), "('./resources/images/ui box.png')\n", (603, 636), False, 'import pygame, math\n'), ((4539, 4552), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (4550, 4552), False, 'import pygame, math\n'), ((1540, 1558), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (1556, 1558), False, 'import pygame, math\n'), ((4472, 4493), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (4491, 4493), False, 'import pygame, math\n'), ((705, 740), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (734, 740), False, 'import pygame, math\n'), ((762, 792), 'pygame.surface.Surface', 'pygame.surface.Surface', (['[1, 1]'], {}), '([1, 1])\n', (784, 792), False, 'import pygame, math\n'), ((1153, 1176), 'math.floor', 'math.floor', (['(pos[0] / 40)'], {}), '(pos[0] / 40)\n', (1163, 1176), False, 'import pygame, math\n'), ((1183, 1206), 'math.floor', 'math.floor', (['(pos[1] / 40)'], {}), '(pos[1] / 40)\n', (1193, 1206), False, 'import pygame, math\n'), ((3190, 3203), 'game.ui.Resize', 'ui.Resize', (['(30)'], {}), '(30)\n', (3199, 3203), False, 'from game import map, ui\n'), ((3380, 3393), 'game.ui.Resize', 'ui.Resize', (['(18)'], {}), '(18)\n', (3389, 3393), False, 'from game import map, ui\n'), ((3850, 3863), 'game.ui.Resize', 'ui.Resize', (['(30)'], {}), '(30)\n', (3859, 3863), False, 'from game import map, ui\n'), ((1695, 1717), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (1715, 1717), False, 'import pygame, math\n'), ((4179, 4192), 'game.ui.Resize', 'ui.Resize', (['(22)'], {}), '(22)\n', (4188, 4192), False, 'from game import map, ui\n'), ((4213, 4274), 'game.ui.Text', 'ui.Text', (['"""Click to move"""', '[m.rect.left + 27, m.rect.top + 45]'], {}), "('Click to move', [m.rect.left + 27, m.rect.top + 45])\n", (4220, 4274), False, 'from game import map, ui\n'), ((4291, 4348), 'game.ui.Text', 'ui.Text', (['"""Cart Here"""', '[m.rect.left + 27, m.rect.top + 60]'], {}), "('Cart Here', [m.rect.left + 27, m.rect.top + 60])\n", (4298, 4348), False, 'from game import map, ui\n'), ((1771, 1815), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['m', 'carts', '(False)'], {}), '(m, carts, False)\n', (1798, 1815), False, 'import pygame, math\n'), ((2322, 2344), 'pygame.mouse.get_pos', 'pygame.mouse.get_pos', ([], {}), '()\n', (2342, 2344), False, 'import pygame, math\n'), ((2437, 2481), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['m', 'carts', '(False)'], {}), '(m, carts, False)\n', (2464, 2481), False, 'import pygame, math\n'), ((2522, 2566), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['m', 'carts', '(False)'], {}), '(m, carts, False)\n', (2549, 2566), False, 'import pygame, math\n'), ((2595, 2647), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['m', 'interactables', '(False)'], {}), '(m, interactables, False)\n', (2622, 2647), False, 'import pygame, math\n'), ((2688, 2740), 'pygame.sprite.spritecollide', 'pygame.sprite.spritecollide', (['m', 'interactables', '(False)'], {}), '(m, interactables, False)\n', (2715, 2740), False, 'import pygame, math\n')] |
"""
Algorithms for solving Parametric Risch Differential Equations.
The methods used for solving Parametric Risch Differential Equations parallel
those for solving Risch Differential Equations. See the outline in the
docstring of rde.py for more information.
The Parametric Risch Differential Equation problem is, given f, g1, ..., gm in
K(t), to determine if there exist y in K(t) and c1, ..., cm in Const(K) such
that Dy + f*y == Sum(ci*gi, (i, 1, m)), and to find such y and ci if they exist.
For the algorithms here G is a list of tuples of factions of the terms on the
right hand side of the equation (i.e., gi in k(t)), and Q is a list of terms on
the right hand side of the equation (i.e., qi in k[t]). See the docstring of
each function for more information.
"""
from __future__ import print_function, division
from sympy.core import Dummy, ilcm, Add, Mul, Pow, S
from sympy.core.compatibility import reduce, range
from sympy.integrals.rde import (order_at, order_at_oo, weak_normalizer,
bound_degree)
from sympy.integrals.risch import (gcdex_diophantine, frac_in, derivation,
residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel,
recognize_log_derivative)
from sympy.matrices import zeros, eye
from sympy.polys import Poly, lcm, cancel, sqf_list
from sympy.polys.polymatrix import PolyMatrix as Matrix
from sympy.solvers import solve
def prde_normal_denom(fa, fd, G, DE):
"""
Parametric Risch Differential Equation - Normal part of the denominator.
Given a derivation D on k[t] and f, g1, ..., gm in k(t) with f weakly
normalized with respect to t, return the tuple (a, b, G, h) such that
a, h in k[t], b in k<t>, G = [g1, ..., gm] in k(t)^m, and for any solution
c1, ..., cm in Const(k) and y in k(t) of Dy + f*y == Sum(ci*gi, (i, 1, m)),
q == y*h in k<t> satisfies a*Dq + b*q == Sum(ci*Gi, (i, 1, m)).
"""
dn, ds = splitfactor(fd, DE)
Gas, Gds = list(zip(*G))
gd = reduce(lambda i, j: i.lcm(j), Gds, Poly(1, DE.t))
en, es = splitfactor(gd, DE)
p = dn.gcd(en)
h = en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t)))
a = dn*h
c = a*h
ba = a*fa - dn*derivation(h, DE)*fd
ba, bd = ba.cancel(fd, include=True)
G = [(c*A).cancel(D, include=True) for A, D in G]
return (a, (ba, bd), G, h)
def real_imag(ba, bd, gen):
"""
Helper function, to get the real and imaginary part of a rational function
evaluated at sqrt(-1) without actually evaluating it at sqrt(-1)
Separates the even and odd power terms by checking the degree of terms wrt
mod 4. Returns a tuple (ba[0], ba[1], bd) where ba[0] is real part
of the numerator ba[1] is the imaginary part and bd is the denominator
of the rational function.
"""
bd = bd.as_poly(gen).as_dict()
ba = ba.as_poly(gen).as_dict()
denom_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in bd.items()]
denom_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in bd.items()]
bd_real = sum(r for r in denom_real)
bd_imag = sum(r for r in denom_imag)
num_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in ba.items()]
num_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in ba.items()]
ba_real = sum(r for r in num_real)
ba_imag = sum(r for r in num_imag)
ba = ((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real - ba_real*bd_imag).as_poly(gen))
bd = (bd_real*bd_real + bd_imag*bd_imag).as_poly(gen)
return (ba[0], ba[1], bd)
def prde_special_denom(a, ba, bd, G, DE, case='auto'):
"""
Parametric Risch Differential Equation - Special part of the denominator.
case is one of {'exp', 'tan', 'primitive'} for the hyperexponential,
hypertangent, and primitive cases, respectively. For the hyperexponential
(resp. hypertangent) case, given a derivation D on k[t] and a in k[t],
b in k<t>, and g1, ..., gm in k(t) with Dt/t in k (resp. Dt/(t**2 + 1) in
k, sqrt(-1) not in k), a != 0, and gcd(a, t) == 1 (resp.
gcd(a, t**2 + 1) == 1), return the tuple (A, B, GG, h) such that A, B, h in
k[t], GG = [gg1, ..., ggm] in k(t)^m, and for any solution c1, ..., cm in
Const(k) and q in k<t> of a*Dq + b*q == Sum(ci*gi, (i, 1, m)), r == q*h in
k[t] satisfies A*Dr + B*r == Sum(ci*ggi, (i, 1, m)).
For case == 'primitive', k<t> == k[t], so it returns (a, b, G, 1) in this
case.
"""
# TODO: Merge this with the very similar special_denom() in rde.py
if case == 'auto':
case = DE.case
if case == 'exp':
p = Poly(DE.t, DE.t)
elif case == 'tan':
p = Poly(DE.t**2 + 1, DE.t)
elif case in ['primitive', 'base']:
B = ba.quo(bd)
return (a, B, G, Poly(1, DE.t))
else:
raise ValueError("case must be one of {'exp', 'tan', 'primitive', "
"'base'}, not %s." % case)
nb = order_at(ba, p, DE.t) - order_at(bd, p, DE.t)
nc = min([order_at(Ga, p, DE.t) - order_at(Gd, p, DE.t) for Ga, Gd in G])
n = min(0, nc - min(0, nb))
if not nb:
# Possible cancellation.
if case == 'exp':
dcoeff = DE.d.quo(Poly(DE.t, DE.t))
with DecrementLevel(DE): # We are guaranteed to not have problems,
# because case != 'base'.
alphaa, alphad = frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t)
etaa, etad = frac_in(dcoeff, DE.t)
A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE)
if A is not None:
Q, m, z = A
if Q == 1:
n = min(n, m)
elif case == 'tan':
dcoeff = DE.d.quo(Poly(DE.t**2 + 1, DE.t))
with DecrementLevel(DE): # We are guaranteed to not have problems,
# because case != 'base'.
betaa, alphaa, alphad = real_imag(ba, bd*a, DE.t)
betad = alphad
etaa, etad = frac_in(dcoeff, DE.t)
if recognize_log_derivative(2*betaa, betad, DE):
A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE)
B = parametric_log_deriv(betaa, betad, etaa, etad, DE)
if A is not None and B is not None:
Q, s, z = A
# TODO: Add test
if Q == 1:
n = min(n, s/2)
N = max(0, -nb)
pN = p**N
pn = p**-n # This is 1/h
A = a*pN
B = ba*pN.quo(bd) + Poly(n, DE.t)*a*derivation(p, DE).quo(p)*pN
G = [(Ga*pN*pn).cancel(Gd, include=True) for Ga, Gd in G]
h = pn
# (a*p**N, (b + n*a*Dp/p)*p**N, g1*p**(N - n), ..., gm*p**(N - n), p**-n)
return (A, B, G, h)
def prde_linear_constraints(a, b, G, DE):
"""
Parametric Risch Differential Equation - Generate linear constraints on the constants.
Given a derivation D on k[t], a, b, in k[t] with gcd(a, b) == 1, and
G = [g1, ..., gm] in k(t)^m, return Q = [q1, ..., qm] in k[t]^m and a
matrix M with entries in k(t) such that for any solution c1, ..., cm in
Const(k) and p in k[t] of a*Dp + b*p == Sum(ci*gi, (i, 1, m)),
(c1, ..., cm) is a solution of Mx == 0, and p and the ci satisfy
a*Dp + b*p == Sum(ci*qi, (i, 1, m)).
Because M has entries in k(t), and because Matrix doesn't play well with
Poly, M will be a Matrix of Basic expressions.
"""
m = len(G)
Gns, Gds = list(zip(*G))
d = reduce(lambda i, j: i.lcm(j), Gds)
d = Poly(d, field=True)
Q = [(ga*(d).quo(gd)).div(d) for ga, gd in G]
if not all([ri.is_zero for _, ri in Q]):
N = max([ri.degree(DE.t) for _, ri in Q])
M = Matrix(N + 1, m, lambda i, j: Q[j][1].nth(i))
else:
M = Matrix(0, m, []) # No constraints, return the empty matrix.
qs, _ = list(zip(*Q))
return (qs, M)
def poly_linear_constraints(p, d):
"""
Given p = [p1, ..., pm] in k[t]^m and d in k[t], return
q = [q1, ..., qm] in k[t]^m and a matrix M with entries in k such
that Sum(ci*pi, (i, 1, m)), for c1, ..., cm in k, is divisible
by d if and only if (c1, ..., cm) is a solution of Mx = 0, in
which case the quotient is Sum(ci*qi, (i, 1, m)).
"""
m = len(p)
q, r = zip(*[pi.div(d) for pi in p])
if not all([ri.is_zero for ri in r]):
n = max([ri.degree() for ri in r])
M = Matrix(n + 1, m, lambda i, j: r[j].nth(i))
else:
M = Matrix(0, m, []) # No constraints.
return q, M
def constant_system(A, u, DE):
"""
Generate a system for the constant solutions.
Given a differential field (K, D) with constant field C = Const(K), a Matrix
A, and a vector (Matrix) u with coefficients in K, returns the tuple
(B, v, s), where B is a Matrix with coefficients in C and v is a vector
(Matrix) such that either v has coefficients in C, in which case s is True
and the solutions in C of Ax == u are exactly all the solutions of Bx == v,
or v has a non-constant coefficient, in which case s is False Ax == u has no
constant solution.
This algorithm is used both in solving parametric problems and in
determining if an element a of K is a derivative of an element of K or the
logarithmic derivative of a K-radical using the structure theorem approach.
Because Poly does not play well with Matrix yet, this algorithm assumes that
all matrix entries are Basic expressions.
"""
if not A:
return A, u
Au = A.row_join(u)
Au = Au.rref(simplify=cancel, normalize_last=False)[0]
# Warning: This will NOT return correct results if cancel() cannot reduce
# an identically zero expression to 0. The danger is that we might
# incorrectly prove that an integral is nonelementary (such as
# risch_integrate(exp((sin(x)**2 + cos(x)**2 - 1)*x**2), x).
# But this is a limitation in computer algebra in general, and implicit
# in the correctness of the Risch Algorithm is the computability of the
# constant field (actually, this same correctness problem exists in any
# algorithm that uses rref()).
#
# We therefore limit ourselves to constant fields that are computable
# via the cancel() function, in order to prevent a speed bottleneck from
# calling some more complex simplification function (rational function
# coefficients will fall into this class). Furthermore, (I believe) this
# problem will only crop up if the integral explicitly contains an
# expression in the constant field that is identically zero, but cannot
# be reduced to such by cancel(). Therefore, a careful user can avoid this
# problem entirely by being careful with the sorts of expressions that
# appear in his integrand in the variables other than the integration
# variable (the structure theorems should be able to completely decide these
# problems in the integration variable).
Au = Au.applyfunc(cancel)
A, u = Au[:, :-1], Au[:, -1]
for j in range(A.cols):
for i in range(A.rows):
if A[i, j].has(*DE.T):
# This assumes that const(F(t0, ..., tn) == const(K) == F
Ri = A[i, :]
# Rm+1; m = A.rows
Rm1 = Ri.applyfunc(lambda x: derivation(x, DE, basic=True)/
derivation(A[i, j], DE, basic=True))
Rm1 = Rm1.applyfunc(cancel)
um1 = cancel(derivation(u[i], DE, basic=True)/
derivation(A[i, j], DE, basic=True))
for s in range(A.rows):
# A[s, :] = A[s, :] - A[s, i]*A[:, m+1]
Asj = A[s, j]
A.row_op(s, lambda r, jj: cancel(r - Asj*Rm1[jj]))
# u[s] = u[s] - A[s, j]*u[m+1
u.row_op(s, lambda r, jj: cancel(r - Asj*um1))
A = A.col_join(Rm1)
u = u.col_join(Matrix([um1]))
return (A, u)
def prde_spde(a, b, Q, n, DE):
"""
Special Polynomial Differential Equation algorithm: Parametric Version.
Given a derivation D on k[t], an integer n, and a, b, q1, ..., qm in k[t]
with deg(a) > 0 and gcd(a, b) == 1, return (A, B, Q, R, n1), with
Qq = [q1, ..., qm] and R = [r1, ..., rm], such that for any solution
c1, ..., cm in Const(k) and q in k[t] of degree at most n of
a*Dq + b*q == Sum(ci*gi, (i, 1, m)), p = (q - Sum(ci*ri, (i, 1, m)))/a has
degree at most n1 and satisfies A*Dp + B*p == Sum(ci*qi, (i, 1, m))
"""
R, Z = list(zip(*[gcdex_diophantine(b, a, qi) for qi in Q]))
A = a
B = b + derivation(a, DE)
Qq = [zi - derivation(ri, DE) for ri, zi in zip(R, Z)]
R = list(R)
n1 = n - a.degree(DE.t)
return (A, B, Qq, R, n1)
def prde_no_cancel_b_large(b, Q, n, DE):
"""
Parametric Poly Risch Differential Equation - No cancellation: deg(b) large enough.
Given a derivation D on k[t], n in ZZ, and b, q1, ..., qm in k[t] with
b != 0 and either D == d/dt or deg(b) > max(0, deg(D) - 1), returns
h1, ..., hr in k[t] and a matrix A with coefficients in Const(k) such that
if c1, ..., cm in Const(k) and q in k[t] satisfy deg(q) <= n and
Dq + b*q == Sum(ci*qi, (i, 1, m)), then q = Sum(dj*hj, (j, 1, r)), where
d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0.
"""
db = b.degree(DE.t)
m = len(Q)
H = [Poly(0, DE.t)]*m
for N in range(n, -1, -1): # [n, ..., 0]
for i in range(m):
si = Q[i].nth(N + db)/b.LC()
sitn = Poly(si*DE.t**N, DE.t)
H[i] = H[i] + sitn
Q[i] = Q[i] - derivation(sitn, DE) - b*sitn
if all(qi.is_zero for qi in Q):
dc = -1
M = zeros(0, 2)
else:
dc = max([qi.degree(DE.t) for qi in Q])
M = Matrix(dc + 1, m, lambda i, j: Q[j].nth(i))
A, u = constant_system(M, zeros(dc + 1, 1), DE)
c = eye(m)
A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c))
return (H, A)
def prde_no_cancel_b_small(b, Q, n, DE):
"""
Parametric Poly Risch Differential Equation - No cancellation: deg(b) small enough.
Given a derivation D on k[t], n in ZZ, and b, q1, ..., qm in k[t] with
deg(b) < deg(D) - 1 and either D == d/dt or deg(D) >= 2, returns
h1, ..., hr in k[t] and a matrix A with coefficients in Const(k) such that
if c1, ..., cm in Const(k) and q in k[t] satisfy deg(q) <= n and
Dq + b*q == Sum(ci*qi, (i, 1, m)) then q = Sum(dj*hj, (j, 1, r)) where
d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0.
"""
m = len(Q)
H = [Poly(0, DE.t)]*m
for N in range(n, 0, -1): # [n, ..., 1]
for i in range(m):
si = Q[i].nth(N + DE.d.degree(DE.t) - 1)/(N*DE.d.LC())
sitn = Poly(si*DE.t**N, DE.t)
H[i] = H[i] + sitn
Q[i] = Q[i] - derivation(sitn, DE) - b*sitn
if b.degree(DE.t) > 0:
for i in range(m):
si = Poly(Q[i].nth(b.degree(DE.t))/b.LC(), DE.t)
H[i] = H[i] + si
Q[i] = Q[i] - derivation(si, DE) - b*si
if all(qi.is_zero for qi in Q):
dc = -1
M = Matrix()
else:
dc = max([qi.degree(DE.t) for qi in Q])
M = Matrix(dc + 1, m, lambda i, j: Q[j].nth(i))
A, u = constant_system(M, zeros(dc + 1, 1), DE)
c = eye(m)
A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c))
return (H, A)
# else: b is in k, deg(qi) < deg(Dt)
t = DE.t
if DE.case != 'base':
with DecrementLevel(DE):
t0 = DE.t # k = k0(t0)
ba, bd = frac_in(b, t0, field=True)
Q0 = [frac_in(qi.TC(), t0, field=True) for qi in Q]
f, B = param_rischDE(ba, bd, Q0, DE)
# f = [f1, ..., fr] in k^r and B is a matrix with
# m + r columns and entries in Const(k) = Const(k0)
# such that Dy0 + b*y0 = Sum(ci*qi, (i, 1, m)) has
# a solution y0 in k with c1, ..., cm in Const(k)
# if and only y0 = Sum(dj*fj, (j, 1, r)) where
# d1, ..., dr ar in Const(k) and
# B*Matrix([c1, ..., cm, d1, ..., dr]) == 0.
# Transform fractions (fa, fd) in f into constant
# polynomials fa/fd in k[t].
# (Is there a better way?)
f = [Poly(fa.as_expr()/fd.as_expr(), t, field=True)
for fa, fd in f]
else:
# Base case. Dy == 0 for all y in k and b == 0.
# Dy + b*y = Sum(ci*qi) is solvable if and only if
# Sum(ci*qi) == 0 in which case the solutions are
# y = d1*f1 for f1 = 1 and any d1 in Const(k) = k.
f = [Poly(1, t, field=True)] # r = 1
B = Matrix([[qi.TC() for qi in Q] + [S(0)]])
# The condition for solvability is
# B*Matrix([c1, ..., cm, d1]) == 0
# There are no constraints on d1.
# Coefficients of t^j (j > 0) in Sum(ci*qi) must be zero.
d = max([qi.degree(DE.t) for qi in Q])
if d > 0:
M = Matrix(d, m, lambda i, j: Q[j].nth(i + 1))
A, _ = constant_system(M, zeros(d, 1), DE)
else:
# No constraints on the hj.
A = Matrix(0, m, [])
# Solutions of the original equation are
# y = Sum(dj*fj, (j, 1, r) + Sum(ei*hi, (i, 1, m)),
# where ei == ci (i = 1, ..., m), when
# A*Matrix([c1, ..., cm]) == 0 and
# B*Matrix([c1, ..., cm, d1, ..., dr]) == 0
# Build combined constraint matrix with m + r + m columns.
r = len(f)
I = eye(m)
A = A.row_join(zeros(A.rows, r + m))
B = B.row_join(zeros(B.rows, m))
C = I.row_join(zeros(m, r)).row_join(-I)
return f + H, A.col_join(B).col_join(C)
def prde_cancel_liouvillian(b, Q, n, DE):
"""
Pg, 237.
"""
H = []
# Why use DecrementLevel? Below line answers that:
# Assuming that we can solve such problems over 'k' (not k[t])
if DE.case == 'primitive':
with DecrementLevel(DE):
ba, bd = frac_in(b, DE.t, field=True)
for i in range(n, -1, -1):
if DE.case == 'exp': # this re-checking can be avoided
with DecrementLevel(DE):
ba, bd = frac_in(b + i*derivation(DE.t, DE)/DE.t,
DE.t, field=True)
with DecrementLevel(DE):
Qy = [frac_in(q.nth(i), DE.t, field=True) for q in Q]
fi, Ai = param_rischDE(ba, bd, Qy, DE)
fi = [Poly(fa.as_expr()/fd.as_expr(), DE.t, field=True)
for fa, fd in fi]
ri = len(fi)
if i == n:
M = Ai
else:
M = Ai.col_join(M.row_join(zeros(M.rows, ri)))
Fi, hi = [None]*ri, [None]*ri
# from eq. on top of p.238 (unnumbered)
for j in range(ri):
hji = fi[j]*DE.t**i
hi[j] = hji
# building up Sum(djn*(D(fjn*t^n) - b*fjnt^n))
Fi[j] = -(derivation(hji, DE) - b*hji)
H += hi
# in the next loop instead of Q it has
# to be Q + Fi taking its place
Q = Q + Fi
return (H, M)
def param_poly_rischDE(a, b, q, n, DE):
"""Polynomial solutions of a parametric Risch differential equation.
Given a derivation D in k[t], a, b in k[t] relatively prime, and q
= [q1, ..., qm] in k[t]^m, return h = [h1, ..., hr] in k[t]^r and
a matrix A with m + r columns and entries in Const(k) such that
a*Dp + b*p = Sum(ci*qi, (i, 1, m)) has a solution p of degree <= n
in k[t] with c1, ..., cm in Const(k) if and only if p = Sum(dj*hj,
(j, 1, r)) where d1, ..., dr are in Const(k) and (c1, ..., cm,
d1, ..., dr) is a solution of Ax == 0.
"""
m = len(q)
if n < 0:
# Only the trivial zero solution is possible.
# Find relations between the qi.
if all([qi.is_zero for qi in q]):
return [], zeros(1, m) # No constraints.
N = max([qi.degree(DE.t) for qi in q])
M = Matrix(N + 1, m, lambda i, j: q[j].nth(i))
A, _ = constant_system(M, zeros(M.rows, 1), DE)
return [], A
if a.is_ground:
# Normalization: a = 1.
a = a.LC()
b, q = b.quo_ground(a), [qi.quo_ground(a) for qi in q]
if not b.is_zero and (DE.case == 'base' or
b.degree() > max(0, DE.d.degree() - 1)):
return prde_no_cancel_b_large(b, q, n, DE)
elif ((b.is_zero or b.degree() < DE.d.degree() - 1)
and (DE.case == 'base' or DE.d.degree() >= 2)):
return prde_no_cancel_b_small(b, q, n, DE)
elif (DE.d.degree() >= 2 and
b.degree() == DE.d.degree() - 1 and
n > -b.as_poly().LC()/DE.d.as_poly().LC()):
raise NotImplementedError("prde_no_cancel_b_equal() is "
"not yet implemented.")
else:
# Liouvillian cases
if DE.case == 'primitive' or DE.case == 'exp':
return prde_cancel_liouvillian(b, q, n, DE)
else:
raise NotImplementedError("non-linear and hypertangent "
"cases have not yet been implemented")
# else: deg(a) > 0
# Iterate SPDE as long as possible cumulating coefficient
# and terms for the recovery of original solutions.
alpha, beta = 1, [0]*m
while n >= 0: # and a, b relatively prime
a, b, q, r, n = prde_spde(a, b, q, n, DE)
beta = [betai + alpha*ri for betai, ri in zip(beta, r)]
alpha *= a
# Solutions p of a*Dp + b*p = Sum(ci*qi) correspond to
# solutions alpha*p + Sum(ci*betai) of the initial equation.
d = a.gcd(b)
if not d.is_ground:
break
# a*Dp + b*p = Sum(ci*qi) may have a polynomial solution
# only if the sum is divisible by d.
qq, M = poly_linear_constraints(q, d)
# qq = [qq1, ..., qqm] where qqi = qi.quo(d).
# M is a matrix with m columns an entries in k.
# Sum(fi*qi, (i, 1, m)), where f1, ..., fm are elements of k, is
# divisible by d if and only if M*Matrix([f1, ..., fm]) == 0,
# in which case the quotient is Sum(fi*qqi).
A, _ = constant_system(M, zeros(M.rows, 1), DE)
# A is a matrix with m columns and entries in Const(k).
# Sum(ci*qqi) is Sum(ci*qi).quo(d), and the remainder is zero
# for c1, ..., cm in Const(k) if and only if
# A*Matrix([c1, ...,cm]) == 0.
V = A.nullspace()
# V = [v1, ..., vu] where each vj is a column matrix with
# entries aj1, ..., ajm in Const(k).
# Sum(aji*qi) is divisible by d with exact quotient Sum(aji*qqi).
# Sum(ci*qi) is divisible by d if and only if ci = Sum(dj*aji)
# (i = 1, ..., m) for some d1, ..., du in Const(k).
# In that case, solutions of
# a*Dp + b*p = Sum(ci*qi) = Sum(dj*Sum(aji*qi))
# are the same as those of
# (a/d)*Dp + (b/d)*p = Sum(dj*rj)
# where rj = Sum(aji*qqi).
if not V: # No non-trivial solution.
return [], eye(m) # Could return A, but this has
# the minimum number of rows.
Mqq = Matrix([qq]) # A single row.
r = [(Mqq*vj)[0] for vj in V] # [r1, ..., ru]
# Solutions of (a/d)*Dp + (b/d)*p = Sum(dj*rj) correspond to
# solutions alpha*p + Sum(Sum(dj*aji)*betai) of the initial
# equation. These are equal to alpha*p + Sum(dj*fj) where
# fj = Sum(aji*betai).
Mbeta = Matrix([beta])
f = [(Mbeta*vj)[0] for vj in V] # [f1, ..., fu]
#
# Solve the reduced equation recursively.
#
g, B = param_poly_rischDE(a.quo(d), b.quo(d), r, n, DE)
# g = [g1, ..., gv] in k[t]^v and and B is a matrix with u + v
# columns and entries in Const(k) such that
# (a/d)*Dp + (b/d)*p = Sum(dj*rj) has a solution p of degree <= n
# in k[t] if and only if p = Sum(ek*gk) where e1, ..., ev are in
# Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0.
# The solutions of the original equation are then
# Sum(dj*fj, (j, 1, u)) + alpha*Sum(ek*gk, (k, 1, v)).
# Collect solution components.
h = f + [alpha*gk for gk in g]
# Build combined relation matrix.
A = -eye(m)
for vj in V:
A = A.row_join(vj)
A = A.row_join(zeros(m, len(g)))
A = A.col_join(zeros(B.rows, m).row_join(B))
return h, A
def param_rischDE(fa, fd, G, DE):
"""
Solve a Parametric Risch Differential Equation: Dy + f*y == Sum(ci*Gi, (i, 1, m)).
Given a derivation D in k(t), f in k(t), and G
= [G1, ..., Gm] in k(t)^m, return h = [h1, ..., hr] in k(t)^r and
a matrix A with m + r columns and entries in Const(k) such that
Dy + f*y = Sum(ci*Gi, (i, 1, m)) has a solution y
in k(t) with c1, ..., cm in Const(k) if and only if y = Sum(dj*hj,
(j, 1, r)) where d1, ..., dr are in Const(k) and (c1, ..., cm,
d1, ..., dr) is a solution of Ax == 0.
Elements of k(t) are tuples (a, d) with a and d in k[t].
"""
m = len(G)
q, (fa, fd) = weak_normalizer(fa, fd, DE)
# Solutions of the weakly normalized equation Dz + f*z = q*Sum(ci*Gi)
# correspond to solutions y = z/q of the original equation.
gamma = q
G = [(q*ga).cancel(gd, include=True) for ga, gd in G]
a, (ba, bd), G, hn = prde_normal_denom(fa, fd, G, DE)
# Solutions q in k<t> of a*Dq + b*q = Sum(ci*Gi) correspond
# to solutions z = q/hn of the weakly normalized equation.
gamma *= hn
A, B, G, hs = prde_special_denom(a, ba, bd, G, DE)
# Solutions p in k[t] of A*Dp + B*p = Sum(ci*Gi) correspond
# to solutions q = p/hs of the previous equation.
gamma *= hs
g = A.gcd(B)
a, b, g = A.quo(g), B.quo(g), [gia.cancel(gid*g, include=True) for
gia, gid in G]
# a*Dp + b*p = Sum(ci*gi) may have a polynomial solution
# only if the sum is in k[t].
q, M = prde_linear_constraints(a, b, g, DE)
# q = [q1, ..., qm] where qi in k[t] is the polynomial component
# of the partial fraction expansion of gi.
# M is a matrix with m columns and entries in k.
# Sum(fi*gi, (i, 1, m)), where f1, ..., fm are elements of k,
# is a polynomial if and only if M*Matrix([f1, ..., fm]) == 0,
# in which case the sum is equal to Sum(fi*qi).
M, _ = constant_system(M, zeros(M.rows, 1), DE)
# M is a matrix with m columns and entries in Const(k).
# Sum(ci*gi) is in k[t] for c1, ..., cm in Const(k)
# if and only if M*Matrix([c1, ..., cm]) == 0,
# in which case the sum is Sum(ci*qi).
## Reduce number of constants at this point
V = M.nullspace()
# V = [v1, ..., vu] where each vj is a column matrix with
# entries aj1, ..., ajm in Const(k).
# Sum(aji*gi) is in k[t] and equal to Sum(aji*qi) (j = 1, ..., u).
# Sum(ci*gi) is in k[t] if and only is ci = Sum(dj*aji)
# (i = 1, ..., m) for some d1, ..., du in Const(k).
# In that case,
# Sum(ci*gi) = Sum(ci*qi) = Sum(dj*Sum(aji*qi)) = Sum(dj*rj)
# where rj = Sum(aji*qi) (j = 1, ..., u) in k[t].
if not V: # No non-trivial solution
return [], eye(m)
Mq = Matrix([q]) # A single row.
r = [(Mq*vj)[0] for vj in V] # [r1, ..., ru]
# Solutions of a*Dp + b*p = Sum(dj*rj) correspond to solutions
# y = p/gamma of the initial equation with ci = Sum(dj*aji).
try:
# We try n=5. At least for prde_spde, it will always
# terminate no matter what n is.
n = bound_degree(a, b, r, DE, parametric=True)
except NotImplementedError:
# A temporary bound is set. Eventually, it will be removed.
# the currently added test case takes large time
# even with n=5, and much longer with large n's.
n = 5
h, B = param_poly_rischDE(a, b, r, n, DE)
# h = [h1, ..., hv] in k[t]^v and and B is a matrix with u + v
# columns and entries in Const(k) such that
# a*Dp + b*p = Sum(dj*rj) has a solution p of degree <= n
# in k[t] if and only if p = Sum(ek*hk) where e1, ..., ev are in
# Const(k) and B*Matrix([d1, ..., du, e1, ..., ev]) == 0.
# The solutions of the original equation for ci = Sum(dj*aji)
# (i = 1, ..., m) are then y = Sum(ek*hk, (k, 1, v))/gamma.
## Build combined relation matrix with m + u + v columns.
A = -eye(m)
for vj in V:
A = A.row_join(vj)
A = A.row_join(zeros(m, len(h)))
A = A.col_join(zeros(B.rows, m).row_join(B))
## Eliminate d1, ..., du.
W = A.nullspace()
# W = [w1, ..., wt] where each wl is a column matrix with
# entries blk (k = 1, ..., m + u + v) in Const(k).
# The vectors (bl1, ..., blm) generate the space of those
# constant families (c1, ..., cm) for which a solution of
# the equation Dy + f*y == Sum(ci*Gi) exists. They generate
# the space and form a basis except possibly when Dy + f*y == 0
# is solvable in k(t}. The corresponding solutions are
# y = Sum(blk'*hk, (k, 1, v))/gamma, where k' = k + m + u.
v = len(h)
M = Matrix([wl[:m] + wl[-v:] for wl in W]) # excise dj's.
N = M.nullspace()
# N = [n1, ..., ns] where the ni in Const(k)^(m + v) are column
# vectors generating the space of linear relations between
# c1, ..., cm, e1, ..., ev.
C = Matrix([ni[:] for ni in N]) # rows n1, ..., ns.
return [hk.cancel(gamma, include=True) for hk in h], C
def limited_integrate_reduce(fa, fd, G, DE):
"""
Simpler version of step 1 & 2 for the limited integration problem.
Given a derivation D on k(t) and f, g1, ..., gn in k(t), return
(a, b, h, N, g, V) such that a, b, h in k[t], N is a non-negative integer,
g in k(t), V == [v1, ..., vm] in k(t)^m, and for any solution v in k(t),
c1, ..., cm in C of f == Dv + Sum(ci*wi, (i, 1, m)), p = v*h is in k<t>, and
p and the ci satisfy a*Dp + b*p == g + Sum(ci*vi, (i, 1, m)). Furthermore,
if S1irr == Sirr, then p is in k[t], and if t is nonlinear or Liouvillian
over k, then deg(p) <= N.
So that the special part is always computed, this function calls the more
general prde_special_denom() automatically if it cannot determine that
S1irr == Sirr. Furthermore, it will automatically call bound_degree() when
t is linear and non-Liouvillian, which for the transcendental case, implies
that Dt == a*t + b with for some a, b in k*.
"""
dn, ds = splitfactor(fd, DE)
E = [splitfactor(gd, DE) for _, gd in G]
En, Es = list(zip(*E))
c = reduce(lambda i, j: i.lcm(j), (dn,) + En) # lcm(dn, en1, ..., enm)
hn = c.gcd(c.diff(DE.t))
a = hn
b = -derivation(hn, DE)
N = 0
# These are the cases where we know that S1irr = Sirr, but there could be
# others, and this algorithm will need to be extended to handle them.
if DE.case in ['base', 'primitive', 'exp', 'tan']:
hs = reduce(lambda i, j: i.lcm(j), (ds,) + Es) # lcm(ds, es1, ..., esm)
a = hn*hs
b -= (hn*derivation(hs, DE)).quo(hs)
mu = min(order_at_oo(fa, fd, DE.t), min([order_at_oo(ga, gd, DE.t) for
ga, gd in G]))
# So far, all the above are also nonlinear or Liouvillian, but if this
# changes, then this will need to be updated to call bound_degree()
# as per the docstring of this function (DE.case == 'other_linear').
N = hn.degree(DE.t) + hs.degree(DE.t) + max(0, 1 - DE.d.degree(DE.t) - mu)
else:
# TODO: implement this
raise NotImplementedError
V = [(-a*hn*ga).cancel(gd, include=True) for ga, gd in G]
return (a, b, a, N, (a*hn*fa).cancel(fd, include=True), V)
def limited_integrate(fa, fd, G, DE):
"""
Solves the limited integration problem: f = Dv + Sum(ci*wi, (i, 1, n))
"""
fa, fd = fa*Poly(1/fd.LC(), DE.t), fd.monic()
# interpretting limited integration problem as a
# parametric Risch DE problem
Fa = Poly(0, DE.t)
Fd = Poly(1, DE.t)
G = [(fa, fd)] + G
h, A = param_rischDE(Fa, Fd, G, DE)
V = A.nullspace()
V = [v for v in V if v[0] != 0]
if not V:
return None
else:
# we can take any vector from V, we take V[0]
c0 = V[0][0]
# v = [-1, c1, ..., cm, d1, ..., dr]
v = V[0]/(-c0)
r = len(h)
m = len(v) - r - 1
C = list(v[1: m + 1])
y = -sum([v[m + 1 + i]*h[i][0].as_expr()/h[i][1].as_expr() \
for i in range(r)])
y_num, y_den = y.as_numer_denom()
Ya, Yd = Poly(y_num, DE.t), Poly(y_den, DE.t)
Y = Ya*Poly(1/Yd.LC(), DE.t), Yd.monic()
return Y, C
def parametric_log_deriv_heu(fa, fd, wa, wd, DE, c1=None):
"""
Parametric logarithmic derivative heuristic.
Given a derivation D on k[t], f in k(t), and a hyperexponential monomial
theta over k(t), raises either NotImplementedError, in which case the
heuristic failed, or returns None, in which case it has proven that no
solution exists, or returns a solution (n, m, v) of the equation
n*f == Dv/v + m*Dtheta/theta, with v in k(t)* and n, m in ZZ with n != 0.
If this heuristic fails, the structure theorem approach will need to be
used.
The argument w == Dtheta/theta
"""
# TODO: finish writing this and write tests
c1 = c1 or Dummy('c1')
p, a = fa.div(fd)
q, b = wa.div(wd)
B = max(0, derivation(DE.t, DE).degree(DE.t) - 1)
C = max(p.degree(DE.t), q.degree(DE.t))
if q.degree(DE.t) > B:
eqs = [p.nth(i) - c1*q.nth(i) for i in range(B + 1, C + 1)]
s = solve(eqs, c1)
if not s or not s[c1].is_Rational:
# deg(q) > B, no solution for c.
return None
M, N = s[c1].as_numer_denom()
nfmwa = N*fa*wd - M*wa*fd
nfmwd = fd*wd
Qv = is_log_deriv_k_t_radical_in_field(N*fa*wd - M*wa*fd, fd*wd, DE,
'auto')
if Qv is None:
# (N*f - M*w) is not the logarithmic derivative of a k(t)-radical.
return None
Q, v = Qv
if Q.is_zero or v.is_zero:
return None
return (Q*N, Q*M, v)
if p.degree(DE.t) > B:
return None
c = lcm(fd.as_poly(DE.t).LC(), wd.as_poly(DE.t).LC())
l = fd.monic().lcm(wd.monic())*Poly(c, DE.t)
ln, ls = splitfactor(l, DE)
z = ls*ln.gcd(ln.diff(DE.t))
if not z.has(DE.t):
# TODO: We treat this as 'no solution', until the structure
# theorem version of parametric_log_deriv is implemented.
return None
u1, r1 = (fa*l.quo(fd)).div(z) # (l*f).div(z)
u2, r2 = (wa*l.quo(wd)).div(z) # (l*w).div(z)
eqs = [r1.nth(i) - c1*r2.nth(i) for i in range(z.degree(DE.t))]
s = solve(eqs, c1)
if not s or not s[c1].is_Rational:
# deg(q) <= B, no solution for c.
return None
M, N = s[c1].as_numer_denom()
nfmwa = N.as_poly(DE.t)*fa*wd - M.as_poly(DE.t)*wa*fd
nfmwd = fd*wd
Qv = is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd, DE)
if Qv is None:
# (N*f - M*w) is not the logarithmic derivative of a k(t)-radical.
return None
Q, v = Qv
if Q.is_zero or v.is_zero:
return None
return (Q*N, Q*M, v)
def parametric_log_deriv(fa, fd, wa, wd, DE):
# TODO: Write the full algorithm using the structure theorems.
# try:
A = parametric_log_deriv_heu(fa, fd, wa, wd, DE)
# except NotImplementedError:
# Heuristic failed, we have to use the full method.
# TODO: This could be implemented more efficiently.
# It isn't too worrisome, because the heuristic handles most difficult
# cases.
return A
def is_deriv_k(fa, fd, DE):
r"""
Checks if Df/f is the derivative of an element of k(t).
a in k(t) is the derivative of an element of k(t) if there exists b in k(t)
such that a = Db. Either returns (ans, u), such that Df/f == Du, or None,
which means that Df/f is not the derivative of an element of k(t). ans is
a list of tuples such that Add(*[i*j for i, j in ans]) == u. This is useful
for seeing exactly which elements of k(t) produce u.
This function uses the structure theorem approach, which says that for any
f in K, Df/f is the derivative of a element of K if and only if there are ri
in QQ such that::
--- --- Dt
\ r * Dt + \ r * i Df
/ i i / i --- = --.
--- --- t f
i in L i in E i
K/C(x) K/C(x)
Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is
transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i
in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic
monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i
is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some
a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of
hyperexponential monomials of K over C(x)). If K is an elementary extension
over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the
transcendence degree of K over C(x). Furthermore, because Const_D(K) ==
Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and
deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x)
and L_K/C(x) are disjoint.
The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed
recursively using this same function. Therefore, it is required to pass
them as indices to D (or T). E_args are the arguments of the
hyperexponentials indexed by E_K (i.e., if i is in E_K, then T[i] ==
exp(E_args[i])). This is needed to compute the final answer u such that
Df/f == Du.
log(f) will be the same as u up to a additive constant. This is because
they will both behave the same as monomials. For example, both log(x) and
log(2*x) == log(x) + log(2) satisfy Dt == 1/x, because log(2) is constant.
Therefore, the term const is returned. const is such that
log(const) + f == u. This is calculated by dividing the arguments of one
logarithm from the other. Therefore, it is necessary to pass the arguments
of the logarithmic terms in L_args.
To handle the case where we are given Df/f, not f, use is_deriv_k_in_field().
See also
========
is_log_deriv_k_t_radical_in_field, is_log_deriv_k_t_radical
"""
# Compute Df/f
dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)), fd*fa
dfa, dfd = dfa.cancel(dfd, include=True)
# Our assumption here is that each monomial is recursively transcendental
if len(DE.exts) != len(DE.D):
if [i for i in DE.cases if i == 'tan'] or \
(set([i for i in DE.cases if i == 'primitive']) -
set(DE.indices('log'))):
raise NotImplementedError("Real version of the structure "
"theorems with hypertangent support is not yet implemented.")
# TODO: What should really be done in this case?
raise NotImplementedError("Nonelementary extensions not supported "
"in the structure theorems.")
E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')]
L_part = [DE.D[i].as_expr() for i in DE.indices('log')]
lhs = Matrix([E_part + L_part])
rhs = Matrix([dfa.as_expr()/dfd.as_expr()])
A, u = constant_system(lhs, rhs, DE)
if not all(derivation(i, DE, basic=True).is_zero for i in u) or not A:
# If the elements of u are not all constant
# Note: See comment in constant_system
# Also note: derivation(basic=True) calls cancel()
return None
else:
if not all(i.is_Rational for i in u):
raise NotImplementedError("Cannot work with non-rational "
"coefficients in this case.")
else:
terms = ([DE.extargs[i] for i in DE.indices('exp')] +
[DE.T[i] for i in DE.indices('log')])
ans = list(zip(terms, u))
result = Add(*[Mul(i, j) for i, j in ans])
argterms = ([DE.T[i] for i in DE.indices('exp')] +
[DE.extargs[i] for i in DE.indices('log')])
l = []
ld = []
for i, j in zip(argterms, u):
# We need to get around things like sqrt(x**2) != x
# and also sqrt(x**2 + 2*x + 1) != x + 1
# Issue 10798: i need not be a polynomial
i, d = i.as_numer_denom()
icoeff, iterms = sqf_list(i)
l.append(Mul(*([Pow(icoeff, j)] + [Pow(b, e*j) for b, e in iterms])))
dcoeff, dterms = sqf_list(d)
ld.append(Mul(*([Pow(dcoeff, j)] + [Pow(b, e*j) for b, e in dterms])))
const = cancel(fa.as_expr()/fd.as_expr()/Mul(*l)*Mul(*ld))
return (ans, result, const)
def is_log_deriv_k_t_radical(fa, fd, DE, Df=True):
r"""
Checks if Df is the logarithmic derivative of a k(t)-radical.
b in k(t) can be written as the logarithmic derivative of a k(t) radical if
there exist n in ZZ and u in k(t) with n, u != 0 such that n*b == Du/u.
Either returns (ans, u, n, const) or None, which means that Df cannot be
written as the logarithmic derivative of a k(t)-radical. ans is a list of
tuples such that Mul(*[i**j for i, j in ans]) == u. This is useful for
seeing exactly what elements of k(t) produce u.
This function uses the structure theorem approach, which says that for any
f in K, Df is the logarithmic derivative of a K-radical if and only if there
are ri in QQ such that::
--- --- Dt
\ r * Dt + \ r * i
/ i i / i --- = Df.
--- --- t
i in L i in E i
K/C(x) K/C(x)
Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is
transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i
in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic
monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i
is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some
a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of
hyperexponential monomials of K over C(x)). If K is an elementary extension
over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the
transcendence degree of K over C(x). Furthermore, because Const_D(K) ==
Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and
deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x)
and L_K/C(x) are disjoint.
The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed
recursively using this same function. Therefore, it is required to pass
them as indices to D (or T). L_args are the arguments of the logarithms
indexed by L_K (i.e., if i is in L_K, then T[i] == log(L_args[i])). This is
needed to compute the final answer u such that n*f == Du/u.
exp(f) will be the same as u up to a multiplicative constant. This is
because they will both behave the same as monomials. For example, both
exp(x) and exp(x + 1) == E*exp(x) satisfy Dt == t. Therefore, the term const
is returned. const is such that exp(const)*f == u. This is calculated by
subtracting the arguments of one exponential from the other. Therefore, it
is necessary to pass the arguments of the exponential terms in E_args.
To handle the case where we are given Df, not f, use
is_log_deriv_k_t_radical_in_field().
See also
========
is_log_deriv_k_t_radical_in_field, is_deriv_k
"""
H = []
if Df:
dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)).cancel(fd**2,
include=True)
else:
dfa, dfd = fa, fd
# Our assumption here is that each monomial is recursively transcendental
if len(DE.exts) != len(DE.D):
if [i for i in DE.cases if i == 'tan'] or \
(set([i for i in DE.cases if i == 'primitive']) -
set(DE.indices('log'))):
raise NotImplementedError("Real version of the structure "
"theorems with hypertangent support is not yet implemented.")
# TODO: What should really be done in this case?
raise NotImplementedError("Nonelementary extensions not supported "
"in the structure theorems.")
E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.indices('exp')]
L_part = [DE.D[i].as_expr() for i in DE.indices('log')]
lhs = Matrix([E_part + L_part])
rhs = Matrix([dfa.as_expr()/dfd.as_expr()])
A, u = constant_system(lhs, rhs, DE)
if not all(derivation(i, DE, basic=True).is_zero for i in u) or not A:
# If the elements of u are not all constant
# Note: See comment in constant_system
# Also note: derivation(basic=True) calls cancel()
return None
else:
if not all(i.is_Rational for i in u):
# TODO: But maybe we can tell if they're not rational, like
# log(2)/log(3). Also, there should be an option to continue
# anyway, even if the result might potentially be wrong.
raise NotImplementedError("Cannot work with non-rational "
"coefficients in this case.")
else:
n = reduce(ilcm, [i.as_numer_denom()[1] for i in u])
u *= n
terms = ([DE.T[i] for i in DE.indices('exp')] +
[DE.extargs[i] for i in DE.indices('log')])
ans = list(zip(terms, u))
result = Mul(*[Pow(i, j) for i, j in ans])
# exp(f) will be the same as result up to a multiplicative
# constant. We now find the log of that constant.
argterms = ([DE.extargs[i] for i in DE.indices('exp')] +
[DE.T[i] for i in DE.indices('log')])
const = cancel(fa.as_expr()/fd.as_expr() -
Add(*[Mul(i, j/n) for i, j in zip(argterms, u)]))
return (ans, result, n, const)
def is_log_deriv_k_t_radical_in_field(fa, fd, DE, case='auto', z=None):
"""
Checks if f can be written as the logarithmic derivative of a k(t)-radical.
It differs from is_log_deriv_k_t_radical(fa, fd, DE, Df=False)
for any given fa, fd, DE in that it finds the solution in the
given field not in some (possibly unspecified extension) and
"in_field" with the function name is used to indicate that.
f in k(t) can be written as the logarithmic derivative of a k(t) radical if
there exist n in ZZ and u in k(t) with n, u != 0 such that n*f == Du/u.
Either returns (n, u) or None, which means that f cannot be written as the
logarithmic derivative of a k(t)-radical.
case is one of {'primitive', 'exp', 'tan', 'auto'} for the primitive,
hyperexponential, and hypertangent cases, respectively. If case is 'auto',
it will attempt to determine the type of the derivation automatically.
See also
========
is_log_deriv_k_t_radical, is_deriv_k
"""
fa, fd = fa.cancel(fd, include=True)
# f must be simple
n, s = splitfactor(fd, DE)
if not s.is_one:
pass
z = z or Dummy('z')
H, b = residue_reduce(fa, fd, DE, z=z)
if not b:
# I will have to verify, but I believe that the answer should be
# None in this case. This should never happen for the
# functions given when solving the parametric logarithmic
# derivative problem when integration elementary functions (see
# Bronstein's book, page 255), so most likely this indicates a bug.
return None
roots = [(i, i.real_roots()) for i, _ in H]
if not all(len(j) == i.degree() and all(k.is_Rational for k in j) for
i, j in roots):
# If f is the logarithmic derivative of a k(t)-radical, then all the
# roots of the resultant must be rational numbers.
return None
# [(a, i), ...], where i*log(a) is a term in the log-part of the integral
# of f
respolys, residues = list(zip(*roots)) or [[], []]
# Note: this might be empty, but everything below should work find in that
# case (it should be the same as if it were [[1, 1]])
residueterms = [(H[j][1].subs(z, i), i) for j in range(len(H)) for
i in residues[j]]
# TODO: finish writing this and write tests
p = cancel(fa.as_expr()/fd.as_expr() - residue_reduce_derivation(H, DE, z))
p = p.as_poly(DE.t)
if p is None:
# f - Dg will be in k[t] if f is the logarithmic derivative of a k(t)-radical
return None
if p.degree(DE.t) >= max(1, DE.d.degree(DE.t)):
return None
if case == 'auto':
case = DE.case
if case == 'exp':
wa, wd = derivation(DE.t, DE).cancel(Poly(DE.t, DE.t), include=True)
with DecrementLevel(DE):
pa, pd = frac_in(p, DE.t, cancel=True)
wa, wd = frac_in((wa, wd), DE.t)
A = parametric_log_deriv(pa, pd, wa, wd, DE)
if A is None:
return None
n, e, u = A
u *= DE.t**e
elif case == 'primitive':
with DecrementLevel(DE):
pa, pd = frac_in(p, DE.t)
A = is_log_deriv_k_t_radical_in_field(pa, pd, DE, case='auto')
if A is None:
return None
n, u = A
elif case == 'base':
# TODO: we can use more efficient residue reduction from ratint()
if not fd.is_sqf or fa.degree() >= fd.degree():
# f is the logarithmic derivative in the base case if and only if
# f = fa/fd, fd is square-free, deg(fa) < deg(fd), and
# gcd(fa, fd) == 1. The last condition is handled by cancel() above.
return None
# Note: if residueterms = [], returns (1, 1)
# f had better be 0 in that case.
n = reduce(ilcm, [i.as_numer_denom()[1] for _, i in residueterms], S(1))
u = Mul(*[Pow(i, j*n) for i, j in residueterms])
return (n, u)
elif case == 'tan':
raise NotImplementedError("The hypertangent case is "
"not yet implemented for is_log_deriv_k_t_radical_in_field()")
elif case in ['other_linear', 'other_nonlinear']:
# XXX: If these are supported by the structure theorems, change to NotImplementedError.
raise ValueError("The %s case is not supported in this function." % case)
else:
raise ValueError("case must be one of {'primitive', 'exp', 'tan', "
"'base', 'auto'}, not %s" % case)
common_denom = reduce(ilcm, [i.as_numer_denom()[1] for i in [j for _, j in
residueterms]] + [n], S(1))
residueterms = [(i, j*common_denom) for i, j in residueterms]
m = common_denom//n
if common_denom != n*m: # Verify exact division
raise ValueError("Inexact division")
u = cancel(u**m*Mul(*[Pow(i, j) for i, j in residueterms]))
return (common_denom, u)
| [
"sympy.integrals.risch.derivation",
"sympy.polys.Poly",
"sympy.matrices.zeros",
"sympy.integrals.risch.frac_in",
"sympy.integrals.risch.DecrementLevel",
"sympy.integrals.rde.order_at",
"sympy.matrices.eye",
"sympy.core.Mul",
"sympy.integrals.rde.bound_degree",
"sympy.polys.polymatrix.PolyMatrix",
"sympy.core.S",
"sympy.integrals.risch.splitfactor",
"sympy.integrals.risch.residue_reduce",
"sympy.integrals.risch.recognize_log_derivative",
"sympy.integrals.rde.order_at_oo",
"sympy.core.compatibility.range",
"sympy.polys.cancel",
"sympy.solvers.solve",
"sympy.core.Pow",
"sympy.core.Dummy",
"sympy.integrals.rde.weak_normalizer",
"sympy.polys.sqf_list",
"sympy.integrals.risch.residue_reduce_derivation",
"sympy.integrals.risch.gcdex_diophantine"
] | [((1901, 1920), 'sympy.integrals.risch.splitfactor', 'splitfactor', (['fd', 'DE'], {}), '(fd, DE)\n', (1912, 1920), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((2022, 2041), 'sympy.integrals.risch.splitfactor', 'splitfactor', (['gd', 'DE'], {}), '(gd, DE)\n', (2033, 2041), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((7665, 7684), 'sympy.polys.Poly', 'Poly', (['d'], {'field': '(True)'}), '(d, field=True)\n', (7669, 7684), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((11160, 11173), 'sympy.core.compatibility.range', 'range', (['A.cols'], {}), '(A.cols)\n', (11165, 11173), False, 'from sympy.core.compatibility import reduce, range\n'), ((13582, 13598), 'sympy.core.compatibility.range', 'range', (['n', '(-1)', '(-1)'], {}), '(n, -1, -1)\n', (13587, 13598), False, 'from sympy.core.compatibility import reduce, range\n'), ((14063, 14069), 'sympy.matrices.eye', 'eye', (['m'], {}), '(m)\n', (14066, 14069), False, 'from sympy.matrices import zeros, eye\n'), ((14800, 14815), 'sympy.core.compatibility.range', 'range', (['n', '(0)', '(-1)'], {}), '(n, 0, -1)\n', (14805, 14815), False, 'from sympy.core.compatibility import reduce, range\n'), ((17669, 17675), 'sympy.matrices.eye', 'eye', (['m'], {}), '(m)\n', (17672, 17675), False, 'from sympy.matrices import zeros, eye\n'), ((18179, 18195), 'sympy.core.compatibility.range', 'range', (['n', '(-1)', '(-1)'], {}), '(n, -1, -1)\n', (18184, 18195), False, 'from sympy.core.compatibility import reduce, range\n'), ((23184, 23196), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['[qq]'], {}), '([qq])\n', (23190, 23196), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((23496, 23510), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['[beta]'], {}), '([beta])\n', (23502, 23510), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((25045, 25072), 'sympy.integrals.rde.weak_normalizer', 'weak_normalizer', (['fa', 'fd', 'DE'], {}), '(fa, fd, DE)\n', (25060, 25072), False, 'from sympy.integrals.rde import order_at, order_at_oo, weak_normalizer, bound_degree\n'), ((27136, 27147), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['[q]'], {}), '([q])\n', (27142, 27147), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((29013, 29053), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['[(wl[:m] + wl[-v:]) for wl in W]'], {}), '([(wl[:m] + wl[-v:]) for wl in W])\n', (29019, 29053), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((29262, 29289), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['[ni[:] for ni in N]'], {}), '([ni[:] for ni in N])\n', (29268, 29289), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((30375, 30394), 'sympy.integrals.risch.splitfactor', 'splitfactor', (['fd', 'DE'], {}), '(fd, DE)\n', (30386, 30394), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((31873, 31886), 'sympy.polys.Poly', 'Poly', (['(0)', 'DE.t'], {}), '(0, DE.t)\n', (31877, 31886), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((31896, 31909), 'sympy.polys.Poly', 'Poly', (['(1)', 'DE.t'], {}), '(1, DE.t)\n', (31900, 31909), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((34238, 34256), 'sympy.integrals.risch.splitfactor', 'splitfactor', (['l', 'DE'], {}), '(l, DE)\n', (34249, 34256), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((34649, 34663), 'sympy.solvers.solve', 'solve', (['eqs', 'c1'], {}), '(eqs, c1)\n', (34654, 34663), False, 'from sympy.solvers import solve\n'), ((39354, 39379), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['[E_part + L_part]'], {}), '([E_part + L_part])\n', (39360, 39379), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((44783, 44808), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['[E_part + L_part]'], {}), '([E_part + L_part])\n', (44789, 44808), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((47372, 47391), 'sympy.integrals.risch.splitfactor', 'splitfactor', (['fd', 'DE'], {}), '(fd, DE)\n', (47383, 47391), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((47462, 47493), 'sympy.integrals.risch.residue_reduce', 'residue_reduce', (['fa', 'fd', 'DE'], {'z': 'z'}), '(fa, fd, DE, z=z)\n', (47476, 47493), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((1994, 2007), 'sympy.polys.Poly', 'Poly', (['(1)', 'DE.t'], {}), '(1, DE.t)\n', (1998, 2007), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((4682, 4698), 'sympy.polys.Poly', 'Poly', (['DE.t', 'DE.t'], {}), '(DE.t, DE.t)\n', (4686, 4698), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((4997, 5018), 'sympy.integrals.rde.order_at', 'order_at', (['ba', 'p', 'DE.t'], {}), '(ba, p, DE.t)\n', (5005, 5018), False, 'from sympy.integrals.rde import order_at, order_at_oo, weak_normalizer, bound_degree\n'), ((5021, 5042), 'sympy.integrals.rde.order_at', 'order_at', (['bd', 'p', 'DE.t'], {}), '(bd, p, DE.t)\n', (5029, 5042), False, 'from sympy.integrals.rde import order_at, order_at_oo, weak_normalizer, bound_degree\n'), ((7911, 7927), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['(0)', 'm', '[]'], {}), '(0, m, [])\n', (7917, 7927), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((8606, 8622), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['(0)', 'm', '[]'], {}), '(0, m, [])\n', (8612, 8622), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((11192, 11205), 'sympy.core.compatibility.range', 'range', (['A.rows'], {}), '(A.rows)\n', (11197, 11205), False, 'from sympy.core.compatibility import reduce, range\n'), ((12753, 12770), 'sympy.integrals.risch.derivation', 'derivation', (['a', 'DE'], {}), '(a, DE)\n', (12763, 12770), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((13632, 13640), 'sympy.core.compatibility.range', 'range', (['m'], {}), '(m)\n', (13637, 13640), False, 'from sympy.core.compatibility import reduce, range\n'), ((13877, 13888), 'sympy.matrices.zeros', 'zeros', (['(0)', '(2)'], {}), '(0, 2)\n', (13882, 13888), False, 'from sympy.matrices import zeros, eye\n'), ((14033, 14049), 'sympy.matrices.zeros', 'zeros', (['(dc + 1)', '(1)'], {}), '(dc + 1, 1)\n', (14038, 14049), False, 'from sympy.matrices import zeros, eye\n'), ((14849, 14857), 'sympy.core.compatibility.range', 'range', (['m'], {}), '(m)\n', (14854, 14857), False, 'from sympy.core.compatibility import reduce, range\n'), ((15100, 15108), 'sympy.core.compatibility.range', 'range', (['m'], {}), '(m)\n', (15105, 15108), False, 'from sympy.core.compatibility import reduce, range\n'), ((15531, 15537), 'sympy.matrices.eye', 'eye', (['m'], {}), '(m)\n', (15534, 15537), False, 'from sympy.matrices import zeros, eye\n'), ((17326, 17342), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['(0)', 'm', '[]'], {}), '(0, m, [])\n', (17332, 17342), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((17695, 17715), 'sympy.matrices.zeros', 'zeros', (['A.rows', '(r + m)'], {}), '(A.rows, r + m)\n', (17700, 17715), False, 'from sympy.matrices import zeros, eye\n'), ((17736, 17752), 'sympy.matrices.zeros', 'zeros', (['B.rows', 'm'], {}), '(B.rows, m)\n', (17741, 17752), False, 'from sympy.matrices import zeros, eye\n'), ((18900, 18909), 'sympy.core.compatibility.range', 'range', (['ri'], {}), '(ri)\n', (18905, 18909), False, 'from sympy.core.compatibility import reduce, range\n'), ((22271, 22287), 'sympy.matrices.zeros', 'zeros', (['M.rows', '(1)'], {}), '(M.rows, 1)\n', (22276, 22287), False, 'from sympy.matrices import zeros, eye\n'), ((24232, 24238), 'sympy.matrices.eye', 'eye', (['m'], {}), '(m)\n', (24235, 24238), False, 'from sympy.matrices import zeros, eye\n'), ((26321, 26337), 'sympy.matrices.zeros', 'zeros', (['M.rows', '(1)'], {}), '(M.rows, 1)\n', (26326, 26337), False, 'from sympy.matrices import zeros, eye\n'), ((27472, 27514), 'sympy.integrals.rde.bound_degree', 'bound_degree', (['a', 'b', 'r', 'DE'], {'parametric': '(True)'}), '(a, b, r, DE, parametric=True)\n', (27484, 27514), False, 'from sympy.integrals.rde import order_at, order_at_oo, weak_normalizer, bound_degree\n'), ((28302, 28308), 'sympy.matrices.eye', 'eye', (['m'], {}), '(m)\n', (28305, 28308), False, 'from sympy.matrices import zeros, eye\n'), ((30404, 30423), 'sympy.integrals.risch.splitfactor', 'splitfactor', (['gd', 'DE'], {}), '(gd, DE)\n', (30415, 30423), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((30592, 30610), 'sympy.integrals.risch.derivation', 'derivation', (['hn', 'DE'], {}), '(hn, DE)\n', (30602, 30610), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((33250, 33261), 'sympy.core.Dummy', 'Dummy', (['"""c1"""'], {}), "('c1')\n", (33255, 33261), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((33514, 33528), 'sympy.solvers.solve', 'solve', (['eqs', 'c1'], {}), '(eqs, c1)\n', (33519, 33528), False, 'from sympy.solvers import solve\n'), ((34211, 34224), 'sympy.polys.Poly', 'Poly', (['c', 'DE.t'], {}), '(c, DE.t)\n', (34215, 34224), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((47440, 47450), 'sympy.core.Dummy', 'Dummy', (['"""z"""'], {}), "('z')\n", (47445, 47450), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((50870, 50874), 'sympy.core.S', 'S', (['(1)'], {}), '(1)\n', (50871, 50874), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((4735, 4760), 'sympy.polys.Poly', 'Poly', (['(DE.t ** 2 + 1)', 'DE.t'], {}), '(DE.t ** 2 + 1, DE.t)\n', (4739, 4760), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((12786, 12804), 'sympy.integrals.risch.derivation', 'derivation', (['ri', 'DE'], {}), '(ri, DE)\n', (12796, 12804), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((13551, 13564), 'sympy.polys.Poly', 'Poly', (['(0)', 'DE.t'], {}), '(0, DE.t)\n', (13555, 13564), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((13702, 13728), 'sympy.polys.Poly', 'Poly', (['(si * DE.t ** N)', 'DE.t'], {}), '(si * DE.t ** N, DE.t)\n', (13706, 13728), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((14769, 14782), 'sympy.polys.Poly', 'Poly', (['(0)', 'DE.t'], {}), '(0, DE.t)\n', (14773, 14782), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((14945, 14971), 'sympy.polys.Poly', 'Poly', (['(si * DE.t ** N)', 'DE.t'], {}), '(si * DE.t ** N, DE.t)\n', (14949, 14971), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((15328, 15336), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', ([], {}), '()\n', (15334, 15336), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((15497, 15513), 'sympy.matrices.zeros', 'zeros', (['(dc + 1)', '(1)'], {}), '(dc + 1, 1)\n', (15502, 15513), False, 'from sympy.matrices import zeros, eye\n'), ((15721, 15739), 'sympy.integrals.risch.DecrementLevel', 'DecrementLevel', (['DE'], {}), '(DE)\n', (15735, 15739), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((15798, 15824), 'sympy.integrals.risch.frac_in', 'frac_in', (['b', 't0'], {'field': '(True)'}), '(b, t0, field=True)\n', (15805, 15824), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((16828, 16850), 'sympy.polys.Poly', 'Poly', (['(1)', 't'], {'field': '(True)'}), '(1, t, field=True)\n', (16832, 16850), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((17251, 17262), 'sympy.matrices.zeros', 'zeros', (['d', '(1)'], {}), '(d, 1)\n', (17256, 17262), False, 'from sympy.matrices import zeros, eye\n'), ((18095, 18113), 'sympy.integrals.risch.DecrementLevel', 'DecrementLevel', (['DE'], {}), '(DE)\n', (18109, 18113), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((18136, 18164), 'sympy.integrals.risch.frac_in', 'frac_in', (['b', 'DE.t'], {'field': '(True)'}), '(b, DE.t, field=True)\n', (18143, 18164), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((18426, 18444), 'sympy.integrals.risch.DecrementLevel', 'DecrementLevel', (['DE'], {}), '(DE)\n', (18440, 18444), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((20161, 20177), 'sympy.matrices.zeros', 'zeros', (['M.rows', '(1)'], {}), '(M.rows, 1)\n', (20166, 20177), False, 'from sympy.matrices import zeros, eye\n'), ((23077, 23083), 'sympy.matrices.eye', 'eye', (['m'], {}), '(m)\n', (23080, 23083), False, 'from sympy.matrices import zeros, eye\n'), ((27119, 27125), 'sympy.matrices.eye', 'eye', (['m'], {}), '(m)\n', (27122, 27125), False, 'from sympy.matrices import zeros, eye\n'), ((30990, 31015), 'sympy.integrals.rde.order_at_oo', 'order_at_oo', (['fa', 'fd', 'DE.t'], {}), '(fa, fd, DE.t)\n', (31001, 31015), False, 'from sympy.integrals.rde import order_at, order_at_oo, weak_normalizer, bound_degree\n'), ((32458, 32475), 'sympy.polys.Poly', 'Poly', (['y_num', 'DE.t'], {}), '(y_num, DE.t)\n', (32462, 32475), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((32477, 32494), 'sympy.polys.Poly', 'Poly', (['y_den', 'DE.t'], {}), '(y_den, DE.t)\n', (32481, 32494), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((48659, 48694), 'sympy.integrals.risch.residue_reduce_derivation', 'residue_reduce_derivation', (['H', 'DE', 'z'], {}), '(H, DE, z)\n', (48684, 48694), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((49033, 49049), 'sympy.polys.Poly', 'Poly', (['DE.t', 'DE.t'], {}), '(DE.t, DE.t)\n', (49037, 49049), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((49078, 49096), 'sympy.integrals.risch.DecrementLevel', 'DecrementLevel', (['DE'], {}), '(DE)\n', (49092, 49096), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((49119, 49148), 'sympy.integrals.risch.frac_in', 'frac_in', (['p', 'DE.t'], {'cancel': '(True)'}), '(p, DE.t, cancel=True)\n', (49126, 49148), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((49170, 49193), 'sympy.integrals.risch.frac_in', 'frac_in', (['(wa, wd)', 'DE.t'], {}), '((wa, wd), DE.t)\n', (49177, 49193), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((2163, 2180), 'sympy.integrals.risch.derivation', 'derivation', (['h', 'DE'], {}), '(h, DE)\n', (2173, 2180), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((5057, 5078), 'sympy.integrals.rde.order_at', 'order_at', (['Ga', 'p', 'DE.t'], {}), '(Ga, p, DE.t)\n', (5065, 5078), False, 'from sympy.integrals.rde import order_at, order_at_oo, weak_normalizer, bound_degree\n'), ((5081, 5102), 'sympy.integrals.rde.order_at', 'order_at', (['Gd', 'p', 'DE.t'], {}), '(Gd, p, DE.t)\n', (5089, 5102), False, 'from sympy.integrals.rde import order_at, order_at_oo, weak_normalizer, bound_degree\n'), ((5257, 5273), 'sympy.polys.Poly', 'Poly', (['DE.t', 'DE.t'], {}), '(DE.t, DE.t)\n', (5261, 5273), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((5292, 5310), 'sympy.integrals.risch.DecrementLevel', 'DecrementLevel', (['DE'], {}), '(DE)\n', (5306, 5310), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((5529, 5550), 'sympy.integrals.risch.frac_in', 'frac_in', (['dcoeff', 'DE.t'], {}), '(dcoeff, DE.t)\n', (5536, 5550), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((11703, 11716), 'sympy.core.compatibility.range', 'range', (['A.rows'], {}), '(A.rows)\n', (11708, 11716), False, 'from sympy.core.compatibility import reduce, range\n'), ((14089, 14105), 'sympy.matrices.zeros', 'zeros', (['A.rows', 'm'], {}), '(A.rows, m)\n', (14094, 14105), False, 'from sympy.matrices import zeros, eye\n'), ((17773, 17784), 'sympy.matrices.zeros', 'zeros', (['m', 'r'], {}), '(m, r)\n', (17778, 17784), False, 'from sympy.matrices import zeros, eye\n'), ((18277, 18295), 'sympy.integrals.risch.DecrementLevel', 'DecrementLevel', (['DE'], {}), '(DE)\n', (18291, 18295), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((19993, 20004), 'sympy.matrices.zeros', 'zeros', (['(1)', 'm'], {}), '(1, m)\n', (19998, 20004), False, 'from sympy.matrices import zeros, eye\n'), ((24339, 24355), 'sympy.matrices.zeros', 'zeros', (['B.rows', 'm'], {}), '(B.rows, m)\n', (24344, 24355), False, 'from sympy.matrices import zeros, eye\n'), ((28409, 28425), 'sympy.matrices.zeros', 'zeros', (['B.rows', 'm'], {}), '(B.rows, m)\n', (28414, 28425), False, 'from sympy.matrices import zeros, eye\n'), ((33481, 33500), 'sympy.core.compatibility.range', 'range', (['(B + 1)', '(C + 1)'], {}), '(B + 1, C + 1)\n', (33486, 33500), False, 'from sympy.core.compatibility import reduce, range\n'), ((38493, 38511), 'sympy.integrals.risch.derivation', 'derivation', (['fa', 'DE'], {}), '(fa, DE)\n', (38503, 38511), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((38517, 38535), 'sympy.integrals.risch.derivation', 'derivation', (['fd', 'DE'], {}), '(fd, DE)\n', (38527, 38535), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((40595, 40606), 'sympy.polys.sqf_list', 'sqf_list', (['i'], {}), '(i)\n', (40603, 40606), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((40726, 40737), 'sympy.polys.sqf_list', 'sqf_list', (['d'], {}), '(d)\n', (40734, 40737), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((49005, 49025), 'sympy.integrals.risch.derivation', 'derivation', (['DE.t', 'DE'], {}), '(DE.t, DE)\n', (49015, 49025), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((49382, 49400), 'sympy.integrals.risch.DecrementLevel', 'DecrementLevel', (['DE'], {}), '(DE)\n', (49396, 49400), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((49423, 49439), 'sympy.integrals.risch.frac_in', 'frac_in', (['p', 'DE.t'], {}), '(p, DE.t)\n', (49430, 49439), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((4847, 4860), 'sympy.polys.Poly', 'Poly', (['(1)', 'DE.t'], {}), '(1, DE.t)\n', (4851, 4860), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((5818, 5843), 'sympy.polys.Poly', 'Poly', (['(DE.t ** 2 + 1)', 'DE.t'], {}), '(DE.t ** 2 + 1, DE.t)\n', (5822, 5843), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((5860, 5878), 'sympy.integrals.risch.DecrementLevel', 'DecrementLevel', (['DE'], {}), '(DE)\n', (5874, 5878), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((6114, 6135), 'sympy.integrals.risch.frac_in', 'frac_in', (['dcoeff', 'DE.t'], {}), '(dcoeff, DE.t)\n', (6121, 6135), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((6155, 6201), 'sympy.integrals.risch.recognize_log_derivative', 'recognize_log_derivative', (['(2 * betaa)', 'betad', 'DE'], {}), '(2 * betaa, betad, DE)\n', (6179, 6201), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((6668, 6681), 'sympy.polys.Poly', 'Poly', (['n', 'DE.t'], {}), '(n, DE.t)\n', (6672, 6681), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((12068, 12081), 'sympy.polys.polymatrix.PolyMatrix', 'Matrix', (['[um1]'], {}), '([um1])\n', (12074, 12081), True, 'from sympy.polys.polymatrix import PolyMatrix as Matrix\n'), ((12687, 12714), 'sympy.integrals.risch.gcdex_diophantine', 'gcdex_diophantine', (['b', 'a', 'qi'], {}), '(b, a, qi)\n', (12704, 12714), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((13782, 13802), 'sympy.integrals.risch.derivation', 'derivation', (['sitn', 'DE'], {}), '(sitn, DE)\n', (13792, 13802), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((15025, 15045), 'sympy.integrals.risch.derivation', 'derivation', (['sitn', 'DE'], {}), '(sitn, DE)\n', (15035, 15045), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((15226, 15244), 'sympy.integrals.risch.derivation', 'derivation', (['si', 'DE'], {}), '(si, DE)\n', (15236, 15244), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((15561, 15577), 'sympy.matrices.zeros', 'zeros', (['A.rows', 'm'], {}), '(A.rows, m)\n', (15566, 15577), False, 'from sympy.matrices import zeros, eye\n'), ((18775, 18792), 'sympy.matrices.zeros', 'zeros', (['M.rows', 'ri'], {}), '(M.rows, ri)\n', (18780, 18792), False, 'from sympy.matrices import zeros, eye\n'), ((19048, 19067), 'sympy.integrals.risch.derivation', 'derivation', (['hji', 'DE'], {}), '(hji, DE)\n', (19058, 19067), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((30945, 30963), 'sympy.integrals.risch.derivation', 'derivation', (['hs', 'DE'], {}), '(hs, DE)\n', (30955, 30963), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((31022, 31047), 'sympy.integrals.rde.order_at_oo', 'order_at_oo', (['ga', 'gd', 'DE.t'], {}), '(ga, gd, DE.t)\n', (31033, 31047), False, 'from sympy.integrals.rde import order_at, order_at_oo, weak_normalizer, bound_degree\n'), ((33323, 33343), 'sympy.integrals.risch.derivation', 'derivation', (['DE.t', 'DE'], {}), '(DE.t, DE)\n', (33333, 33343), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((39221, 39243), 'sympy.polys.Poly', 'Poly', (['DE.T[i]', 'DE.T[i]'], {}), '(DE.T[i], DE.T[i])\n', (39225, 39243), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((40886, 40894), 'sympy.core.Mul', 'Mul', (['*ld'], {}), '(*ld)\n', (40889, 40894), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((44650, 44672), 'sympy.polys.Poly', 'Poly', (['DE.T[i]', 'DE.T[i]'], {}), '(DE.T[i], DE.T[i])\n', (44654, 44672), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((50155, 50159), 'sympy.core.S', 'S', (['(1)'], {}), '(1)\n', (50156, 50159), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((6684, 6701), 'sympy.integrals.risch.derivation', 'derivation', (['p', 'DE'], {}), '(p, DE)\n', (6694, 6701), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((11586, 11618), 'sympy.integrals.risch.derivation', 'derivation', (['u[i]', 'DE'], {'basic': '(True)'}), '(u[i], DE, basic=True)\n', (11596, 11618), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((11640, 11675), 'sympy.integrals.risch.derivation', 'derivation', (['A[i, j]', 'DE'], {'basic': '(True)'}), '(A[i, j], DE, basic=True)\n', (11650, 11675), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((16906, 16910), 'sympy.core.S', 'S', (['(0)'], {}), '(0)\n', (16907, 16910), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((32388, 32396), 'sympy.core.compatibility.range', 'range', (['r'], {}), '(r)\n', (32393, 32396), False, 'from sympy.core.compatibility import reduce, range\n'), ((39486, 39515), 'sympy.integrals.risch.derivation', 'derivation', (['i', 'DE'], {'basic': '(True)'}), '(i, DE, basic=True)\n', (39496, 39515), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((40101, 40110), 'sympy.core.Mul', 'Mul', (['i', 'j'], {}), '(i, j)\n', (40104, 40110), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((40878, 40885), 'sympy.core.Mul', 'Mul', (['*l'], {}), '(*l)\n', (40881, 40885), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((43898, 43916), 'sympy.integrals.risch.derivation', 'derivation', (['fa', 'DE'], {}), '(fa, DE)\n', (43908, 43916), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((43922, 43940), 'sympy.integrals.risch.derivation', 'derivation', (['fd', 'DE'], {}), '(fd, DE)\n', (43932, 43940), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((44914, 44943), 'sympy.integrals.risch.derivation', 'derivation', (['i', 'DE'], {'basic': '(True)'}), '(i, DE, basic=True)\n', (44924, 44943), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((45827, 45836), 'sympy.core.Pow', 'Pow', (['i', 'j'], {}), '(i, j)\n', (45830, 45836), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((51090, 51099), 'sympy.core.Pow', 'Pow', (['i', 'j'], {}), '(i, j)\n', (51093, 51099), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((11425, 11454), 'sympy.integrals.risch.derivation', 'derivation', (['x', 'DE'], {'basic': '(True)'}), '(x, DE, basic=True)\n', (11435, 11454), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((11476, 11511), 'sympy.integrals.risch.derivation', 'derivation', (['A[i, j]', 'DE'], {'basic': '(True)'}), '(A[i, j], DE, basic=True)\n', (11486, 11511), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((11858, 11883), 'sympy.polys.cancel', 'cancel', (['(r - Asj * Rm1[jj])'], {}), '(r - Asj * Rm1[jj])\n', (11864, 11883), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((11979, 12000), 'sympy.polys.cancel', 'cancel', (['(r - Asj * um1)'], {}), '(r - Asj * um1)\n', (11985, 12000), False, 'from sympy.polys import Poly, lcm, cancel, sqf_list\n'), ((50179, 50192), 'sympy.core.Pow', 'Pow', (['i', '(j * n)'], {}), '(i, j * n)\n', (50182, 50192), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((18336, 18356), 'sympy.integrals.risch.derivation', 'derivation', (['DE.t', 'DE'], {}), '(DE.t, DE)\n', (18346, 18356), False, 'from sympy.integrals.risch import gcdex_diophantine, frac_in, derivation, residue_reduce, splitfactor, residue_reduce_derivation, DecrementLevel, recognize_log_derivative\n'), ((46194, 46207), 'sympy.core.Mul', 'Mul', (['i', '(j / n)'], {}), '(i, j / n)\n', (46197, 46207), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((40639, 40653), 'sympy.core.Pow', 'Pow', (['icoeff', 'j'], {}), '(icoeff, j)\n', (40642, 40653), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((40658, 40671), 'sympy.core.Pow', 'Pow', (['b', '(e * j)'], {}), '(b, e * j)\n', (40661, 40671), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((40771, 40785), 'sympy.core.Pow', 'Pow', (['dcoeff', 'j'], {}), '(dcoeff, j)\n', (40774, 40785), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n'), ((40790, 40803), 'sympy.core.Pow', 'Pow', (['b', '(e * j)'], {}), '(b, e * j)\n', (40793, 40803), False, 'from sympy.core import Dummy, ilcm, Add, Mul, Pow, S\n')] |
from pprint import pprint
import yaml
import netmiko
import paramiko
def send_cmd_with_prompt(device, command, *, wait_for, confirmation):
if type(wait_for) == str:
wait_for = [wait_for]
if type(confirmation) == str:
confirmation = [confirmation]
with netmiko.Netmiko(**device) as ssh:
ssh.enable()
result = ssh.send_command_timing(
command, strip_prompt=False, strip_command=False
)
for wait, confirm in zip(wait_for, confirmation):
if wait in result:
result += ssh.send_command_timing(
confirm, strip_prompt=False, strip_command=False
)
return result
if __name__ == "__main__":
with open("devices.yaml") as f:
devices = yaml.safe_load(f)
r1 = devices[0]
out = send_cmd_with_prompt(
r1, "copy run start", wait_for="Destination filename", confirmation="\n"
)
print(out)
"""
R1#copy run start
Destination filename [startup-config]?
Building configuration...
[OK]
R1#
"""
| [
"yaml.safe_load",
"netmiko.Netmiko"
] | [((282, 307), 'netmiko.Netmiko', 'netmiko.Netmiko', ([], {}), '(**device)\n', (297, 307), False, 'import netmiko\n'), ((782, 799), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (796, 799), False, 'import yaml\n')] |
# GROWNG BEYOND EARTH CONTROL BOX Traning
# RASPBERRY PI PICO / MICROPYTHON
# FAIRCHILD TROPICAL BOTANIC GARDEN, Oct 18, 2021
# The Growing Beyond Earth (GBE) control box is a device that controls
# the LED lights and fan in a GBE growth chamber. It can also control
# accessories including a 12v water pump and environmental sensors.
# The device is based on a Raspberry Pi Pico microcontroller running
# Micropython.
# lesson Written by @MarioTheMaker
from sys import stdin, stdout, exit
import machine
import time
#Set the brightness for each color
red_brightness = 100
green_brightness = 100
blue_brightness = 100
white_brightness = 100
# Pulse width modulation (PWM) is a way to get an artificial analog output on a digital pin.
# It achieves this by rapidly toggling the pin from low to high. There are two parameters
# associated with this: the frequency of the toggling, and the duty cycle.
# The duty cycle is defined to be how long the pin is high compared with the length of a
# single period (low plus high time). Maximum duty cycle is when the pin is high all of the
# time, and minimum is when it is low all of the time.
# https://projects.raspberrypi.org/en/projects/getting-started-with-the-pico/7#:
# control I/O pins
# machine.Pin(id, mode=- 1, pull=- 1, *, value, drive, alt)
# Access the pin peripheral (GPIO pin) associated with the given id.
# If additional arguments are given in the constructor then they are used to initialise
# the pin. Any settings that are not specified will remain in their previous state.
# More info https://docs.micropython.org/en/latest/library/machine.Pin.html
r=machine.PWM(machine.Pin(0)); r.freq(20000) # Red channel
g=machine.PWM(machine.Pin(2)); g.freq(20000) # Green channel
b=machine.PWM(machine.Pin(1)); b.freq(20000) # Blue channel
w=machine.PWM(machine.Pin(3)); w.freq(20000) # White channel
# More info https://docs.micropython.org/en/latest/library/machine.PWM.html
# Start a loop and change the brightness multiplier "n"
# PWM.duty_u16([value]) Get the current duty cycle of the PWM output,
# as an unsigned 16-bit value in the range 0 to 65535 inclusive.
n = 100
while n > 0:
print("Power Level ",n)
r.duty_u16(int(red_brightness)*n)
g.duty_u16(int(green_brightness)*n)
b.duty_u16(int(blue_brightness)*n)
w.duty_u16(int(white_brightness)*n)
time.sleep(.3)
n = n - 5
#Turn all the lights off
time.sleep(3)
r.duty_u16(0)
g.duty_u16(0)
b.duty_u16(0)
w.duty_u16(0)
| [
"machine.Pin",
"time.sleep"
] | [((2409, 2422), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2419, 2422), False, 'import time\n'), ((1639, 1653), 'machine.Pin', 'machine.Pin', (['(0)'], {}), '(0)\n', (1650, 1653), False, 'import machine\n'), ((1700, 1714), 'machine.Pin', 'machine.Pin', (['(2)'], {}), '(2)\n', (1711, 1714), False, 'import machine\n'), ((1763, 1777), 'machine.Pin', 'machine.Pin', (['(1)'], {}), '(1)\n', (1774, 1777), False, 'import machine\n'), ((1825, 1839), 'machine.Pin', 'machine.Pin', (['(3)'], {}), '(3)\n', (1836, 1839), False, 'import machine\n'), ((2353, 2368), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (2363, 2368), False, 'import time\n')] |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for mobilenet_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow as tf
from nets.mobilenet import conv_blocks as ops
from nets.mobilenet import mobilenet
from nets.mobilenet import mobilenet_v2
slim = tf.contrib.slim
def find_ops(optype):
"""Find ops of a given type in graphdef or a graph.
Args:
optype: operation type (e.g. Conv2D)
Returns:
List of operations.
"""
gd = tf.get_default_graph()
return [var for var in gd.get_operations() if var.type == optype]
class MobilenetV2Test(tf.test.TestCase):
def setUp(self):
tf.reset_default_graph()
def testCreation(self):
spec = dict(mobilenet_v2.V2_DEF)
_, ep = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)
num_convs = len(find_ops('Conv2D'))
# This is mostly a sanity test. No deep reason for these particular
# constants.
#
# All but first 2 and last one have two convolutions, and there is one
# extra conv that is not in the spec. (logits)
self.assertEqual(num_convs, len(spec['spec']) * 2 - 2)
# Check that depthwise are exposed.
for i in range(2, 17):
self.assertIn('layer_%d/depthwise_output' % i, ep)
def testCreationNoClasses(self):
spec = copy.deepcopy(mobilenet_v2.V2_DEF)
net, ep = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec,
num_classes=None)
self.assertIs(net, ep['global_pool'])
def testImageSizes(self):
for input_size, output_size in [(224, 7), (192, 6), (160, 5),
(128, 4), (96, 3)]:
tf.reset_default_graph()
_, ep = mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, input_size, input_size, 3)))
self.assertEqual(ep['layer_18/output'].get_shape().as_list()[1:3],
[output_size] * 2)
def testWithSplits(self):
spec = copy.deepcopy(mobilenet_v2.V2_DEF)
spec['overrides'] = {
(ops.expanded_conv,): dict(split_expansion=2),
}
_, _ = mobilenet.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)), conv_defs=spec)
num_convs = len(find_ops('Conv2D'))
# All but 3 op has 3 conv operatore, the remainign 3 have one
# and there is one unaccounted.
self.assertEqual(num_convs, len(spec['spec']) * 3 - 5)
def testWithOutputStride8(self):
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=8,
scope='MobilenetV2')
self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])
def testDivisibleBy(self):
tf.reset_default_graph()
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
divisible_by=16,
min_depth=32)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
self.assertSameElements([32, 64, 96, 160, 192, 320, 384, 576, 960, 1280,
1001], s)
def testDivisibleByWithArgScope(self):
tf.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided.
with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
self.assertSameElements(s, [32, 192, 128, 1001])
def testFineGrained(self):
tf.reset_default_graph()
# Verifies that depth_multiplier arg scope actually works
# if no default min_depth is provided.
mobilenet_v2.mobilenet(
tf.placeholder(tf.float32, (10, 224, 224, 2)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.01,
finegrain_classification_mode=True)
s = [op.outputs[0].get_shape().as_list()[-1] for op in find_ops('Conv2D')]
s = set(s)
# All convolutions will be 8->48, except for the last one.
self.assertSameElements(s, [8, 48, 1001, 1280])
def testMobilenetBase(self):
tf.reset_default_graph()
# Verifies that mobilenet_base returns pre-pooling layer.
with slim.arg_scope((mobilenet.depth_multiplier,), min_depth=32):
net, _ = mobilenet_v2.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF, depth_multiplier=0.1)
self.assertEqual(net.get_shape().as_list(), [10, 7, 7, 128])
def testWithOutputStride16(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=16)
self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])
def testWithOutputStride8AndExplicitPadding(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=8,
use_explicit_padding=True,
scope='MobilenetV2')
self.assertEqual(out.get_shape().as_list()[1:3], [28, 28])
def testWithOutputStride16AndExplicitPadding(self):
tf.reset_default_graph()
out, _ = mobilenet.mobilenet_base(
tf.placeholder(tf.float32, (10, 224, 224, 16)),
conv_defs=mobilenet_v2.V2_DEF,
output_stride=16,
use_explicit_padding=True)
self.assertEqual(out.get_shape().as_list()[1:3], [14, 14])
def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self):
sc = mobilenet.training_scope(is_training=None)
self.assertNotIn('is_training', sc[slim.arg_scope_func_key(
slim.batch_norm)])
def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self):
sc = mobilenet.training_scope(is_training=False)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet.training_scope(is_training=True)
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
sc = mobilenet.training_scope()
self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)])
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow.reset_default_graph",
"tensorflow.placeholder",
"nets.mobilenet.mobilenet.training_scope",
"tensorflow.test.main",
"copy.deepcopy",
"tensorflow.get_default_graph"
] | [((1190, 1212), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1210, 1212), True, 'import tensorflow as tf\n'), ((7068, 7082), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (7080, 7082), True, 'import tensorflow as tf\n'), ((1348, 1372), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1370, 1372), True, 'import tensorflow as tf\n'), ((2035, 2069), 'copy.deepcopy', 'copy.deepcopy', (['mobilenet_v2.V2_DEF'], {}), '(mobilenet_v2.V2_DEF)\n', (2048, 2069), False, 'import copy\n'), ((2692, 2726), 'copy.deepcopy', 'copy.deepcopy', (['mobilenet_v2.V2_DEF'], {}), '(mobilenet_v2.V2_DEF)\n', (2705, 2726), False, 'import copy\n'), ((3440, 3464), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3462, 3464), True, 'import tensorflow as tf\n'), ((3891, 3915), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3913, 3915), True, 'import tensorflow as tf\n'), ((4428, 4452), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (4450, 4452), True, 'import tensorflow as tf\n'), ((4993, 5017), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (5015, 5017), True, 'import tensorflow as tf\n'), ((5423, 5447), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (5445, 5447), True, 'import tensorflow as tf\n'), ((5729, 5753), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (5751, 5753), True, 'import tensorflow as tf\n'), ((6099, 6123), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (6121, 6123), True, 'import tensorflow as tf\n'), ((6461, 6503), 'nets.mobilenet.mobilenet.training_scope', 'mobilenet.training_scope', ([], {'is_training': 'None'}), '(is_training=None)\n', (6485, 6503), False, 'from nets.mobilenet import mobilenet\n'), ((6668, 6711), 'nets.mobilenet.mobilenet.training_scope', 'mobilenet.training_scope', ([], {'is_training': '(False)'}), '(is_training=False)\n', (6692, 6711), False, 'from nets.mobilenet import mobilenet\n'), ((6800, 6842), 'nets.mobilenet.mobilenet.training_scope', 'mobilenet.training_scope', ([], {'is_training': '(True)'}), '(is_training=True)\n', (6824, 6842), False, 'from nets.mobilenet import mobilenet\n'), ((6931, 6957), 'nets.mobilenet.mobilenet.training_scope', 'mobilenet.training_scope', ([], {}), '()\n', (6955, 6957), False, 'from nets.mobilenet import mobilenet\n'), ((1478, 1524), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 16)'], {}), '(tf.float32, (10, 224, 224, 16))\n', (1492, 1524), True, 'import tensorflow as tf\n'), ((2113, 2159), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 16)'], {}), '(tf.float32, (10, 224, 224, 16))\n', (2127, 2159), True, 'import tensorflow as tf\n'), ((2402, 2426), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2424, 2426), True, 'import tensorflow as tf\n'), ((2854, 2900), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 16)'], {}), '(tf.float32, (10, 224, 224, 16))\n', (2868, 2900), True, 'import tensorflow as tf\n'), ((3202, 3248), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 16)'], {}), '(tf.float32, (10, 224, 224, 16))\n', (3216, 3248), True, 'import tensorflow as tf\n'), ((3501, 3547), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 16)'], {}), '(tf.float32, (10, 224, 224, 16))\n', (3515, 3547), True, 'import tensorflow as tf\n'), ((4595, 4640), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 2)'], {}), '(tf.float32, (10, 224, 224, 2))\n', (4609, 4640), True, 'import tensorflow as tf\n'), ((5495, 5541), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 16)'], {}), '(tf.float32, (10, 224, 224, 16))\n', (5509, 5541), True, 'import tensorflow as tf\n'), ((5801, 5847), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 16)'], {}), '(tf.float32, (10, 224, 224, 16))\n', (5815, 5847), True, 'import tensorflow as tf\n'), ((6171, 6217), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 16)'], {}), '(tf.float32, (10, 224, 224, 16))\n', (6185, 6217), True, 'import tensorflow as tf\n'), ((2475, 2534), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, input_size, input_size, 3)'], {}), '(tf.float32, (10, input_size, input_size, 3))\n', (2489, 2534), True, 'import tensorflow as tf\n'), ((4131, 4176), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 2)'], {}), '(tf.float32, (10, 224, 224, 2))\n', (4145, 4176), True, 'import tensorflow as tf\n'), ((5204, 5250), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(10, 224, 224, 16)'], {}), '(tf.float32, (10, 224, 224, 16))\n', (5218, 5250), True, 'import tensorflow as tf\n')] |
from firebase import firebase
import os
import datetime
import json
import logging
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from github3 import login
firebase_url = os.environ['FIREBASE_DB']
firebase_secret = os.environ['FIREBASE_SECRET']
firebase_path = os.environ['FIREBASE_PATH']
firebase_username = os.environ['FIREBASE_USERNAME'] # not checked ATM
gh_token = os.environ['GH_TOKEN']
gh_gist = os.environ['GH_GIST']
gh_fname = os.environ['GH_FNAME']
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def connect_firebase():
f = firebase.FirebaseApplication(firebase_url, None)
f.authentication = firebase.FirebaseAuthentication(firebase_secret, firebase_username, admin=True)
return f
logger.info('==================================')
logger.info('Fetching firebase data')
f = connect_firebase()
data = f.get(firebase_path, None)
new_content = json.dumps(data, ensure_ascii=False, indent=2, sort_keys=True)
logger.info('Reading existing gist')
gh = login(token=gh_token)
gist = gh.gist(gh_gist)
old_content = ""
for f in gist.iter_files():
if f.filename == gh_fname:
old_content = f.content
break
if old_content == new_content:
logger.info('No changes detected')
else:
logger.info('Updating gist with new content')
gist.edit(files={
gh_fname: {
"content": new_content
}
})
logger.info('Done.') | [
"logging.basicConfig",
"logging.getLogger",
"firebase.firebase.FirebaseApplication",
"json.dumps",
"firebase.firebase.FirebaseAuthentication",
"github3.login"
] | [((487, 526), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (506, 526), False, 'import logging\n'), ((536, 563), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (553, 563), False, 'import logging\n'), ((916, 978), 'json.dumps', 'json.dumps', (['data'], {'ensure_ascii': '(False)', 'indent': '(2)', 'sort_keys': '(True)'}), '(data, ensure_ascii=False, indent=2, sort_keys=True)\n', (926, 978), False, 'import json\n'), ((1022, 1043), 'github3.login', 'login', ([], {'token': 'gh_token'}), '(token=gh_token)\n', (1027, 1043), False, 'from github3 import login\n'), ((595, 643), 'firebase.firebase.FirebaseApplication', 'firebase.FirebaseApplication', (['firebase_url', 'None'], {}), '(firebase_url, None)\n', (623, 643), False, 'from firebase import firebase\n'), ((665, 744), 'firebase.firebase.FirebaseAuthentication', 'firebase.FirebaseAuthentication', (['firebase_secret', 'firebase_username'], {'admin': '(True)'}), '(firebase_secret, firebase_username, admin=True)\n', (696, 744), False, 'from firebase import firebase\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `ged4py.date` module."""
import unittest
from ged4py.calendar import (
CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate,
CalendarDateVisitor
)
from ged4py.date import (
DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated,
DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod,
DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes,
DateValueVisitor
)
class TestDateVisitor(CalendarDateVisitor, DateValueVisitor):
def visitGregorian(self, date):
if not isinstance(date, GregorianDate):
raise TypeError(str(type(date)))
return ("gregorian", date)
def visitJulian(self, date):
if not isinstance(date, JulianDate):
raise TypeError(str(type(date)))
return ("julian", date)
def visitHebrew(self, date):
if not isinstance(date, HebrewDate):
raise TypeError(str(type(date)))
return ("hebrew", date)
def visitFrench(self, date):
if not isinstance(date, FrenchDate):
raise TypeError(str(type(date)))
return ("french", date)
def visitSimple(self, date):
if not isinstance(date, DateValueSimple):
raise TypeError(str(type(date)))
return ("simple", date.date)
def visitPeriod(self, date):
if not isinstance(date, DateValuePeriod):
raise TypeError(str(type(date)))
return ("period", date.date1, date.date2)
def visitFrom(self, date):
if not isinstance(date, DateValueFrom):
raise TypeError(str(type(date)))
return ("from", date.date)
def visitTo(self, date):
if not isinstance(date, DateValueTo):
raise TypeError(str(type(date)))
return ("to", date.date)
def visitRange(self, date):
if not isinstance(date, DateValueRange):
raise TypeError(str(type(date)))
return ("range", date.date1, date.date2)
def visitBefore(self, date):
if not isinstance(date, DateValueBefore):
raise TypeError(str(type(date)))
return ("before", date.date)
def visitAfter(self, date):
if not isinstance(date, DateValueAfter):
raise TypeError(str(type(date)))
return ("after", date.date)
def visitAbout(self, date):
if not isinstance(date, DateValueAbout):
raise TypeError(str(type(date)))
return ("about", date.date)
def visitCalculated(self, date):
if not isinstance(date, DateValueCalculated):
raise TypeError(str(type(date)))
return ("calculated", date.date)
def visitEstimated(self, date):
if not isinstance(date, DateValueEstimated):
raise TypeError(str(type(date)))
return ("estimated", date.date)
def visitInterpreted(self, date):
if not isinstance(date, DateValueInterpreted):
raise TypeError(str(type(date)))
return ("interpreted", date.date, date.phrase)
def visitPhrase(self, date):
if not isinstance(date, DateValuePhrase):
raise TypeError(str(type(date)))
return ("phrase", date.phrase)
class TestDetailDate(unittest.TestCase):
"""Tests for `ged4py.date` module."""
def test_001_cal_date(self):
"""Test date.CalendarDate class."""
date = GregorianDate(2017, "OCT", 9)
self.assertEqual(date.year, 2017)
self.assertIsNone(date.dual_year)
self.assertFalse(date.bc)
self.assertEqual(date.year_str, "2017")
self.assertEqual(date.month, "OCT")
self.assertEqual(date.month_num, 10)
self.assertEqual(date.day, 9)
self.assertEqual(date.calendar, CalendarType.GREGORIAN)
date = GregorianDate(2017, "OCT", bc=True)
self.assertEqual(date.year, 2017)
self.assertIsNone(date.dual_year)
self.assertTrue(date.bc)
self.assertEqual(date.year_str, "2017 B.C.")
self.assertEqual(date.month, "OCT")
self.assertEqual(date.month_num, 10)
self.assertIsNone(date.day)
self.assertEqual(date.calendar, CalendarType.GREGORIAN)
date = GregorianDate(1699, "FEB", dual_year=1700)
self.assertEqual(date.year, 1699)
self.assertEqual(date.dual_year, 1700)
self.assertFalse(date.bc)
self.assertEqual(date.year_str, "1699/00")
self.assertEqual(date.month, "FEB")
self.assertEqual(date.month_num, 2)
self.assertIsNone(date.day)
self.assertEqual(date.calendar, CalendarType.GREGORIAN)
date = HebrewDate(5000)
self.assertEqual(date.year, 5000)
self.assertFalse(date.bc)
self.assertEqual(date.year_str, "5000")
self.assertIsNone(date.month)
self.assertIsNone(date.month_num)
self.assertIsNone(date.day)
self.assertEqual(date.calendar, CalendarType.HEBREW)
date = FrenchDate(1, "FRUC", 1)
self.assertEqual(date.year, 1)
self.assertFalse(date.bc)
self.assertEqual(date.year_str, "1")
self.assertEqual(date.month, "FRUC")
self.assertEqual(date.month_num, 12)
self.assertEqual(date.day, 1)
self.assertEqual(date.calendar, CalendarType.FRENCH_R)
date = JulianDate(5, "JAN", bc=True)
self.assertEqual(date.year, 5)
self.assertTrue(date.bc)
self.assertEqual(date.year_str, "5 B.C.")
self.assertEqual(date.month, "JAN")
self.assertEqual(date.month_num, 1)
self.assertIsNone(date.day)
self.assertEqual(date.calendar, CalendarType.JULIAN)
def test_002_cal_date_key(self):
"""Test date.CalendarDate class."""
date = GregorianDate(2017, "OCT", 9)
self.assertEqual(date.key(), (2458035.5, 0))
date = GregorianDate(1699, "FEB", 1, dual_year=1700)
self.assertEqual(date.key(), (2342003.5, 0))
date = FrenchDate(2017, "VENT", bc=True)
self.assertEqual(date.key(), (1638959.5, 1))
date = HebrewDate(2017, "TSH", 22)
self.assertEqual(date.key(), (1084542.5, 0))
date = JulianDate(1000)
self.assertEqual(date.key(), (2086672.5, 1))
def test_003_cal_date_cmp(self):
"""Test date.CalendarDate class."""
self.assertTrue(GregorianDate(2016, "JAN", 1) < GregorianDate(2017, "JAN", 1))
self.assertTrue(GregorianDate(2017, "JAN", 1) < GregorianDate(2017, "FEB", 1))
self.assertTrue(GregorianDate(2017, "JAN", 1) < GregorianDate(2017, "JAN", 2))
self.assertTrue(GregorianDate(2017, "JAN", 1) <= GregorianDate(2017, "JAN", 2))
self.assertTrue(GregorianDate(2017, "JAN", 2) > GregorianDate(2017, "JAN", 1))
self.assertTrue(GregorianDate(2017, "JAN", 2) >= GregorianDate(2017, "JAN", 1))
self.assertTrue(GregorianDate(2017, "JAN", 1) == GregorianDate(2017, "JAN", 1))
self.assertTrue(GregorianDate(2017, "JAN", 1) != GregorianDate(2017, "JAN", 2))
# missing day compares as "past" the last day of month, but before next month
self.assertTrue(GregorianDate(2017, "JAN") > GregorianDate(2017, "JAN", 31))
self.assertTrue(GregorianDate(2017, "JAN") < GregorianDate(2017, "FEB", 1))
# missing month compares as "past" the last day of year, but before next year
self.assertTrue(GregorianDate(2017) > GregorianDate(2017, "DEC", 31))
self.assertTrue(GregorianDate(2017) < GregorianDate(2018, "JAN", 1))
# dual date
self.assertTrue(GregorianDate(1700, "JAN", 1) == GregorianDate(1699, "JAN", 1, dual_year=1700))
# compare Gregorian and Julian dates
self.assertTrue(GregorianDate(1582, "OCT", 15) == JulianDate(1582, "OCT", 5))
self.assertTrue(GregorianDate(1582, "OCT", 16) > JulianDate(1582, "OCT", 5))
self.assertTrue(JulianDate(1582, "OCT", 6) > GregorianDate(1582, "OCT", 15))
self.assertTrue(GregorianDate(2000, "JAN", 14) == JulianDate(2000, "JAN", 1))
# compare Gregorian and French dates
self.assertTrue(GregorianDate(1792, "SEP", 22) == FrenchDate(1, "VEND", 1))
self.assertTrue(GregorianDate(1792, "SEP", 23) > FrenchDate(1, "VEND", 1))
self.assertTrue(FrenchDate(1, "VEND", 2) > GregorianDate(1792, "SEP", 22))
self.assertTrue(GregorianDate(2020, "SEP", 21) == FrenchDate(228, "COMP", 5))
# compare Gregorian and Hebrew dates
self.assertTrue(GregorianDate(2020, "JAN", 1) == HebrewDate(5780, "SVN", 4))
def test_004_cal_date_str(self):
"""Test date.CalendarDate class."""
date = GregorianDate(2017, "OCT", 9)
self.assertEqual(str(date), "9 OCT 2017")
date = GregorianDate(2017, "OCT", bc=True)
self.assertEqual(str(date), "OCT 2017 B.C.")
date = GregorianDate(1699, "JAN", 1, dual_year=1700)
self.assertEqual(str(date), "1 JAN 1699/00")
date = HebrewDate(5000)
self.assertEqual(str(date), "@#DHEBREW@ 5000")
date = FrenchDate(1, "VEND", 1)
self.assertEqual(str(date), "@#DFRENCH R@ 1 VEND 1")
date = JulianDate(1582, "OCT", 5)
self.assertEqual(str(date), "@#DJULIAN@ 5 OCT 1582")
def test_005_cal_date_parse(self):
"""Test date.CalendarDate.parse method."""
date = CalendarDate.parse("31 MAY 2020")
self.assertIsInstance(date, GregorianDate)
self.assertEqual(date.year, 2020)
self.assertIsNone(date.dual_year)
self.assertFalse(date.bc)
self.assertEqual(date.month, "MAY")
self.assertEqual(date.month_num, 5)
self.assertEqual(date.day, 31)
self.assertEqual(date.original, "31 MAY 2020")
self.assertEqual(date.calendar, CalendarType.GREGORIAN)
date = CalendarDate.parse("@#DGREGORIAN@ 10 MAR 1698/99")
self.assertIsInstance(date, GregorianDate)
self.assertEqual(date.year, 1698)
self.assertEqual(date.dual_year, 1699)
self.assertFalse(date.bc)
self.assertEqual(date.month, "MAR")
self.assertEqual(date.month_num, 3)
self.assertEqual(date.day, 10)
self.assertEqual(date.original, "@#DGREGORIAN@ 10 MAR 1698/99")
self.assertEqual(date.calendar, CalendarType.GREGORIAN)
date = CalendarDate.parse("10 MAR 1699/00")
self.assertIsInstance(date, GregorianDate)
self.assertEqual(date.year, 1699)
self.assertEqual(date.dual_year, 1700)
self.assertEqual(date.original, "10 MAR 1699/00")
self.assertEqual(date.calendar, CalendarType.GREGORIAN)
date = CalendarDate.parse("@#DJULIAN@ 100 B.C.")
self.assertIsInstance(date, JulianDate)
self.assertEqual(date.year, 100)
self.assertTrue(date.bc)
self.assertIsNone(date.month)
self.assertIsNone(date.month_num)
self.assertIsNone(date.day)
self.assertEqual(date.original, "@#DJULIAN@ 100 B.C.")
self.assertEqual(date.calendar, CalendarType.JULIAN)
date = CalendarDate.parse("@#DFRENCH R@ 15 GERM 0001")
self.assertIsInstance(date, FrenchDate)
self.assertEqual(date.year, 1)
self.assertFalse(date.bc)
self.assertEqual(date.month, "GERM")
self.assertEqual(date.month_num, 7)
self.assertEqual(date.day, 15)
self.assertEqual(date.original, "@#DFRENCH R@ 15 GERM 0001")
self.assertEqual(date.calendar, CalendarType.FRENCH_R)
date = CalendarDate.parse("@#DHEBREW@ 7 NSN 5000")
self.assertIsInstance(date, HebrewDate)
self.assertEqual(date.year, 5000)
self.assertFalse(date.bc)
self.assertEqual(date.month, "NSN")
self.assertEqual(date.month_num, 8)
self.assertEqual(date.day, 7)
self.assertEqual(date.original, "@#DHEBREW@ 7 NSN 5000")
self.assertEqual(date.calendar, CalendarType.HEBREW)
# cannot handle ROMAN
with self.assertRaises(ValueError):
date = CalendarDate.parse("@#DROMAN@ 2020")
# cannot handle UNKNOWN
with self.assertRaises(ValueError):
date = CalendarDate.parse("@#DUNKNOWN@ 2020")
# dual year only works for GREGORIAN
with self.assertRaises(ValueError):
date = CalendarDate.parse("@#DJULIAN@ 2020/21")
# cannot parse nonsense
with self.assertRaises(ValueError):
date = CalendarDate.parse("start of time")
def test_006_cal_date_visitor(self):
"""Test date.CalendarDate.accept method."""
visitor = TestDateVisitor()
date = GregorianDate(2017, "OCT", 9)
value = date.accept(visitor)
self.assertEqual(value, ("gregorian", date))
date = HebrewDate(5000)
value = date.accept(visitor)
self.assertEqual(value, ("hebrew", date))
date = FrenchDate(1, "VEND", 1)
value = date.accept(visitor)
self.assertEqual(value, ("french", date))
date = JulianDate(1582, "OCT", 5)
value = date.accept(visitor)
self.assertEqual(value, ("julian", date))
def test_007_cal_date_hash(self):
"""Test date.CalendarDate hash."""
self.assertEqual(hash(GregorianDate(2017, "OCT", 9)),
hash(GregorianDate(2017, "OCT", 9)))
self.assertEqual(hash(GregorianDate(2017, "OCT", 9, bc=True)),
hash(GregorianDate(2017, "OCT", 9, bc=True)))
self.assertEqual(hash(FrenchDate(1, "VEND", 1)),
hash(FrenchDate(1, "VEND", 1)))
self.assertEqual(hash(FrenchDate(1)),
hash(FrenchDate(1)))
def test_010_date_no_date(self):
"""Test date.DateValue class."""
date = DateValue.parse("not a date")
self.assertIsInstance(date, DateValuePhrase)
self.assertEqual(date.kind, DateValueTypes.PHRASE)
self.assertEqual(date.phrase, "not a date")
self.assertEqual(str(date), "(not a date)")
def test_012_date_parse_period(self):
"""Test date.DateValue class."""
date = DateValue.parse("FROM 1967")
self.assertIsInstance(date, DateValueFrom)
self.assertEqual(date.kind, DateValueTypes.FROM)
self.assertEqual(date.date, GregorianDate(1967))
self.assertEqual(str(date), "FROM 1967")
date = DateValue.parse("TO 1 JAN 2017")
self.assertIsInstance(date, DateValueTo)
self.assertEqual(date.kind, DateValueTypes.TO)
self.assertEqual(date.date, GregorianDate(2017, "JAN", 1))
self.assertEqual(str(date), "TO 1 JAN 2017")
date = DateValue.parse("FROM 1920 TO 2000")
self.assertIsInstance(date, DateValuePeriod)
self.assertEqual(date.kind, DateValueTypes.PERIOD)
self.assertEqual(date.date1, GregorianDate(1920))
self.assertEqual(date.date2, GregorianDate(2000))
self.assertEqual(str(date), "FROM 1920 TO 2000")
date = DateValue.parse("from mar 1920 to 1 apr 2000")
self.assertIsInstance(date, DateValuePeriod)
self.assertEqual(date.kind, DateValueTypes.PERIOD)
self.assertEqual(date.date1, GregorianDate(1920, "MAR"))
self.assertEqual(date.date2, GregorianDate(2000, "APR", 1))
self.assertEqual(str(date), "FROM MAR 1920 TO 1 APR 2000")
def test_013_date_parse_range(self):
"""Test date.DateValue class."""
date = DateValue.parse("BEF 1967B.C.")
self.assertIsInstance(date, DateValueBefore)
self.assertEqual(date.kind, DateValueTypes.BEFORE)
self.assertEqual(date.date, GregorianDate(1967, bc=True))
self.assertEqual(str(date), "BEFORE 1967 B.C.")
date = DateValue.parse("AFT 1 JAN 2017")
self.assertIsInstance(date, DateValueAfter)
self.assertEqual(date.kind, DateValueTypes.AFTER)
self.assertEqual(date.date, GregorianDate(2017, "JAN", 1))
self.assertEqual(str(date), "AFTER 1 JAN 2017")
date = DateValue.parse("BET @#DJULIAN@ 1600 AND 2000")
self.assertIsInstance(date, DateValueRange)
self.assertEqual(date.kind, DateValueTypes.RANGE)
self.assertEqual(date.date1, JulianDate(1600))
self.assertEqual(date.date2, GregorianDate(2000))
self.assertEqual(str(date), "BETWEEN @#DJULIAN@ 1600 AND 2000")
date = DateValue.parse("bet mar 1920 and apr 2000")
self.assertIsInstance(date, DateValueRange)
self.assertEqual(date.kind, DateValueTypes.RANGE)
self.assertEqual(date.date1, GregorianDate(1920, "MAR"))
self.assertEqual(date.date2, GregorianDate(2000, "APR"))
self.assertEqual(str(date), "BETWEEN MAR 1920 AND APR 2000")
def test_014_date_parse_approx(self):
"""Test date.DateValue class."""
dates = {"500 B.C.": GregorianDate(500, bc=True),
"JAN 2017": GregorianDate(2017, "JAN"),
"31 JAN 2017": GregorianDate(2017, "JAN", 31)}
approx = [
("ABT", "ABOUT", DateValueAbout, DateValueTypes.ABOUT),
("CAL", "CALCULATED", DateValueCalculated, DateValueTypes.CALCULATED),
("EST", "ESTIMATED", DateValueEstimated, DateValueTypes.ESTIMATED)
]
for appr, fmt, klass, typeEnum in approx:
for datestr, value in dates.items():
date = DateValue.parse(appr + " " + datestr)
self.assertIsInstance(date, klass)
self.assertEqual(date.kind, typeEnum)
self.assertEqual(str(date), fmt + " " + datestr)
self.assertEqual(date.date, value)
def test_015_date_parse_phrase(self):
"""Test date.DateValue class."""
date = DateValue.parse("(some phrase)")
self.assertIsInstance(date, DateValuePhrase)
self.assertEqual(date.kind, DateValueTypes.PHRASE)
self.assertEqual(date.phrase, "some phrase")
date = DateValue.parse("INT 1967 B.C. (some phrase)")
self.assertIsInstance(date, DateValueInterpreted)
self.assertEqual(date.kind, DateValueTypes.INTERPRETED)
self.assertEqual(date.date, GregorianDate(1967, bc=True))
self.assertEqual(date.phrase, "some phrase")
self.assertEqual(str(date), "INTERPRETED 1967 B.C. (some phrase)")
date = DateValue.parse("INT @#DGREGORIAN@ 1 JAN 2017 (some phrase)")
self.assertIsInstance(date, DateValueInterpreted)
self.assertEqual(date.kind, DateValueTypes.INTERPRETED)
self.assertEqual(date.date, GregorianDate(2017, "JAN", 1))
self.assertEqual(date.phrase, "some phrase")
self.assertEqual(str(date), "INTERPRETED 1 JAN 2017 (some phrase)")
def test_016_date_parse_simple(self):
"""Test date.DateValue class."""
date = DateValue.parse("1967 B.C.")
self.assertIsInstance(date, DateValueSimple)
self.assertEqual(date.kind, DateValueTypes.SIMPLE)
self.assertEqual(date.date, GregorianDate(1967, bc=True))
self.assertEqual(str(date), "1967 B.C.")
date = DateValue.parse("@#DGREGORIAN@ 1 JAN 2017")
self.assertIsInstance(date, DateValueSimple)
self.assertEqual(date.kind, DateValueTypes.SIMPLE)
self.assertEqual(date.date, GregorianDate(2017, "JAN", 1))
self.assertEqual(str(date), "1 JAN 2017")
def test_017_date_cmp(self):
"""Test date.Date class."""
dv = DateValue.parse("2016")
self.assertIsInstance(dv.key(), tuple)
self.assertEqual(dv.key(), (GregorianDate(2016), GregorianDate(2016)))
dv = DateValue.parse("31 DEC 2000")
self.assertIsInstance(dv.key(), tuple)
self.assertEqual(dv.key(), (GregorianDate(2000, "DEC", 31), GregorianDate(2000, "DEC", 31)))
dv = DateValue.parse("BET 31 DEC 2000 AND 1 JAN 2001")
self.assertIsInstance(dv.key(), tuple)
self.assertEqual(dv.key(), (GregorianDate(2000, "DEC", 31), GregorianDate(2001, "JAN", 1)))
# order of dates is messed up
dv = DateValue.parse("BET 31 DEC 2000 AND 1 JAN 2000")
self.assertIsInstance(dv.key(), tuple)
self.assertEqual(dv.key(), (GregorianDate(2000, "DEC", 31), GregorianDate(2000, "JAN", 1)))
self.assertTrue(DateValue.parse("2016") < DateValue.parse("2017"))
self.assertTrue(DateValue.parse("2 JAN 2016") > DateValue.parse("1 JAN 2016"))
self.assertTrue(DateValue.parse("BET 1900 AND 2000") < DateValue.parse("FROM 1920 TO 1999"))
# comparing simple date with range
self.assertTrue(DateValue.parse("1 JAN 2000") > DateValue.parse("BET 1 JAN 1999 AND 1 JAN 2000"))
self.assertNotEqual(DateValue.parse("1 JAN 2000"), DateValue.parse("BET 1 JAN 2000 AND 1 JAN 2001"))
self.assertTrue(DateValue.parse("1 JAN 2000") < DateValue.parse("BET 1 JAN 2000 AND 1 JAN 2001"))
self.assertTrue(DateValue.parse("1 JAN 2000") > DateValue.parse("BEF 1 JAN 2000"))
self.assertTrue(DateValue.parse("1 JAN 2000") > DateValue.parse("TO 1 JAN 2000"))
self.assertTrue(DateValue.parse("1 JAN 2000") < DateValue.parse("AFT 1 JAN 2000"))
self.assertTrue(DateValue.parse("1 JAN 2000") < DateValue.parse("FROM 1 JAN 2000"))
# comparing ranges
self.assertEqual(DateValue.parse("FROM 1 JAN 2000 TO 1 JAN 2001"),
DateValue.parse("BET 1 JAN 2000 AND 1 JAN 2001"))
self.assertTrue(DateValue.parse("FROM 1 JAN 1999 TO 1 JAN 2001") <
DateValue.parse("BET 1 JAN 2000 AND 1 JAN 2001"))
self.assertTrue(DateValue.parse("FROM 1 JAN 2000 TO 1 JAN 2002") >
DateValue.parse("BET 1 JAN 2000 AND 1 JAN 2001"))
# Less specific date compares later than more specific
self.assertTrue(DateValue.parse("2000") > DateValue.parse("31 DEC 2000"))
self.assertTrue(DateValue.parse("DEC 2000") > DateValue.parse("31 DEC 2000"))
# phrase is always later than any regular date
self.assertTrue(DateValue.parse("(Could be 1996 or 1998)") > DateValue.parse("2000"))
# "empty" date is always later than any regular date
self.assertTrue(DateValue.parse("") > DateValue.parse("2000"))
def test_018_date_parse_empty(self):
"""Test date.DateValue class."""
for value in (None, ""):
date = DateValue.parse(value)
self.assertIsInstance(date, DateValuePhrase)
self.assertEqual(date.kind, DateValueTypes.PHRASE)
self.assertIsNone(date.phrase)
self.assertEqual(str(date), "")
def test_019_date_value_visitor(self):
"""Test date.DateValue class."""
visitor = TestDateVisitor()
date1 = GregorianDate(2017, "JAN", 1)
date2 = GregorianDate(2017, "DEC", 31)
value = DateValueSimple(date1).accept(visitor)
self.assertEqual(value, ("simple", date1))
value = DateValueFrom(date1).accept(visitor)
self.assertEqual(value, ("from", date1))
value = DateValueTo(date1).accept(visitor)
self.assertEqual(value, ("to", date1))
value = DateValuePeriod(date1, date2).accept(visitor)
self.assertEqual(value, ("period", date1, date2))
value = DateValueBefore(date1).accept(visitor)
self.assertEqual(value, ("before", date1))
value = DateValueAfter(date1).accept(visitor)
self.assertEqual(value, ("after", date1))
value = DateValueRange(date1, date2).accept(visitor)
self.assertEqual(value, ("range", date1, date2))
value = DateValueAbout(date1).accept(visitor)
self.assertEqual(value, ("about", date1))
value = DateValueCalculated(date1).accept(visitor)
self.assertEqual(value, ("calculated", date1))
value = DateValueEstimated(date1).accept(visitor)
self.assertEqual(value, ("estimated", date1))
value = DateValueInterpreted(date1, "phrase").accept(visitor)
self.assertEqual(value, ("interpreted", date1, "phrase"))
value = DateValuePhrase("phrase").accept(visitor)
self.assertEqual(value, ("phrase", "phrase"))
def test_020_date_hash(self):
"""Test date.Date hash"""
dv1 = DateValue.parse("2016")
dv2 = DateValue.parse("2016")
self.assertEqual(hash(dv1), hash(dv2))
dv1 = DateValue.parse("31 DEC 2000")
dv2 = DateValue.parse("31 DEC 2000")
self.assertEqual(hash(dv1), hash(dv2))
dv1 = DateValue.parse("BET 31 DEC 2000 AND 1 JAN 2001")
dv2 = DateValue.parse("BET 31 DEC 2000 AND 1 JAN 2001")
self.assertEqual(hash(dv1), hash(dv2))
| [
"ged4py.date.DateValueSimple",
"ged4py.date.DateValueAfter",
"ged4py.date.DateValuePhrase",
"ged4py.calendar.FrenchDate",
"ged4py.date.DateValueRange",
"ged4py.date.DateValue.parse",
"ged4py.date.DateValueAbout",
"ged4py.date.DateValuePeriod",
"ged4py.calendar.HebrewDate",
"ged4py.date.DateValueCalculated",
"ged4py.date.DateValueEstimated",
"ged4py.date.DateValueInterpreted",
"ged4py.calendar.GregorianDate",
"ged4py.calendar.JulianDate",
"ged4py.date.DateValueTo",
"ged4py.date.DateValueFrom",
"ged4py.calendar.CalendarDate.parse",
"ged4py.date.DateValueBefore"
] | [((3455, 3484), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""', '(9)'], {}), "(2017, 'OCT', 9)\n", (3468, 3484), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((3858, 3893), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""'], {'bc': '(True)'}), "(2017, 'OCT', bc=True)\n", (3871, 3893), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((4269, 4311), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1699)', '"""FEB"""'], {'dual_year': '(1700)'}), "(1699, 'FEB', dual_year=1700)\n", (4282, 4311), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((4690, 4706), 'ged4py.calendar.HebrewDate', 'HebrewDate', (['(5000)'], {}), '(5000)\n', (4700, 4706), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((5024, 5048), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)', '"""FRUC"""', '(1)'], {}), "(1, 'FRUC', 1)\n", (5034, 5048), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((5374, 5403), 'ged4py.calendar.JulianDate', 'JulianDate', (['(5)', '"""JAN"""'], {'bc': '(True)'}), "(5, 'JAN', bc=True)\n", (5384, 5403), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((5809, 5838), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""', '(9)'], {}), "(2017, 'OCT', 9)\n", (5822, 5838), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((5908, 5953), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1699)', '"""FEB"""', '(1)'], {'dual_year': '(1700)'}), "(1699, 'FEB', 1, dual_year=1700)\n", (5921, 5953), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6023, 6056), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(2017)', '"""VENT"""'], {'bc': '(True)'}), "(2017, 'VENT', bc=True)\n", (6033, 6056), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6126, 6153), 'ged4py.calendar.HebrewDate', 'HebrewDate', (['(2017)', '"""TSH"""', '(22)'], {}), "(2017, 'TSH', 22)\n", (6136, 6153), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6223, 6239), 'ged4py.calendar.JulianDate', 'JulianDate', (['(1000)'], {}), '(1000)\n', (6233, 6239), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8697, 8726), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""', '(9)'], {}), "(2017, 'OCT', 9)\n", (8710, 8726), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8793, 8828), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""'], {'bc': '(True)'}), "(2017, 'OCT', bc=True)\n", (8806, 8828), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8898, 8943), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1699)', '"""JAN"""', '(1)'], {'dual_year': '(1700)'}), "(1699, 'JAN', 1, dual_year=1700)\n", (8911, 8943), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((9013, 9029), 'ged4py.calendar.HebrewDate', 'HebrewDate', (['(5000)'], {}), '(5000)\n', (9023, 9029), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((9101, 9125), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)', '"""VEND"""', '(1)'], {}), "(1, 'VEND', 1)\n", (9111, 9125), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((9203, 9229), 'ged4py.calendar.JulianDate', 'JulianDate', (['(1582)', '"""OCT"""', '(5)'], {}), "(1582, 'OCT', 5)\n", (9213, 9229), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((9398, 9431), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""31 MAY 2020"""'], {}), "('31 MAY 2020')\n", (9416, 9431), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((9863, 9913), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""@#DGREGORIAN@ 10 MAR 1698/99"""'], {}), "('@#DGREGORIAN@ 10 MAR 1698/99')\n", (9881, 9913), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((10367, 10403), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""10 MAR 1699/00"""'], {}), "('10 MAR 1699/00')\n", (10385, 10403), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((10682, 10723), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""@#DJULIAN@ 100 B.C."""'], {}), "('@#DJULIAN@ 100 B.C.')\n", (10700, 10723), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((11102, 11149), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""@#DFRENCH R@ 15 GERM 0001"""'], {}), "('@#DFRENCH R@ 15 GERM 0001')\n", (11120, 11149), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((11547, 11590), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""@#DHEBREW@ 7 NSN 5000"""'], {}), "('@#DHEBREW@ 7 NSN 5000')\n", (11565, 11590), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((12662, 12691), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""', '(9)'], {}), "(2017, 'OCT', 9)\n", (12675, 12691), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((12798, 12814), 'ged4py.calendar.HebrewDate', 'HebrewDate', (['(5000)'], {}), '(5000)\n', (12808, 12814), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((12918, 12942), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)', '"""VEND"""', '(1)'], {}), "(1, 'VEND', 1)\n", (12928, 12942), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13046, 13072), 'ged4py.calendar.JulianDate', 'JulianDate', (['(1582)', '"""OCT"""', '(5)'], {}), "(1582, 'OCT', 5)\n", (13056, 13072), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13810, 13839), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""not a date"""'], {}), "('not a date')\n", (13825, 13839), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((14156, 14184), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""FROM 1967"""'], {}), "('FROM 1967')\n", (14171, 14184), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((14415, 14447), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""TO 1 JAN 2017"""'], {}), "('TO 1 JAN 2017')\n", (14430, 14447), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((14688, 14724), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""FROM 1920 TO 2000"""'], {}), "('FROM 1920 TO 2000')\n", (14703, 14724), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((15026, 15072), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""from mar 1920 to 1 apr 2000"""'], {}), "('from mar 1920 to 1 apr 2000')\n", (15041, 15072), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((15484, 15515), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BEF 1967B.C."""'], {}), "('BEF 1967B.C.')\n", (15499, 15515), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((15766, 15799), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""AFT 1 JAN 2017"""'], {}), "('AFT 1 JAN 2017')\n", (15781, 15799), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((16049, 16096), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET @#DJULIAN@ 1600 AND 2000"""'], {}), "('BET @#DJULIAN@ 1600 AND 2000')\n", (16064, 16096), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((16408, 16452), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""bet mar 1920 and apr 2000"""'], {}), "('bet mar 1920 and apr 2000')\n", (16423, 16452), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((17768, 17800), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""(some phrase)"""'], {}), "('(some phrase)')\n", (17783, 17800), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((17982, 18028), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""INT 1967 B.C. (some phrase)"""'], {}), "('INT 1967 B.C. (some phrase)')\n", (17997, 18028), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((18361, 18422), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""INT @#DGREGORIAN@ 1 JAN 2017 (some phrase)"""'], {}), "('INT @#DGREGORIAN@ 1 JAN 2017 (some phrase)')\n", (18376, 18422), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((18841, 18869), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""1967 B.C."""'], {}), "('1967 B.C.')\n", (18856, 18869), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((19113, 19156), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""@#DGREGORIAN@ 1 JAN 2017"""'], {}), "('@#DGREGORIAN@ 1 JAN 2017')\n", (19128, 19156), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((19470, 19493), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""2016"""'], {}), "('2016')\n", (19485, 19493), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((19634, 19664), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""31 DEC 2000"""'], {}), "('31 DEC 2000')\n", (19649, 19664), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((19827, 19876), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 31 DEC 2000 AND 1 JAN 2001"""'], {}), "('BET 31 DEC 2000 AND 1 JAN 2001')\n", (19842, 19876), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20076, 20125), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 31 DEC 2000 AND 1 JAN 2000"""'], {}), "('BET 31 DEC 2000 AND 1 JAN 2000')\n", (20091, 20125), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((22762, 22791), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (22775, 22791), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((22808, 22838), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""DEC"""', '(31)'], {}), "(2017, 'DEC', 31)\n", (22821, 22838), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((24267, 24290), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""2016"""'], {}), "('2016')\n", (24282, 24290), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((24305, 24328), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""2016"""'], {}), "('2016')\n", (24320, 24328), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((24391, 24421), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""31 DEC 2000"""'], {}), "('31 DEC 2000')\n", (24406, 24421), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((24436, 24466), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""31 DEC 2000"""'], {}), "('31 DEC 2000')\n", (24451, 24466), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((24529, 24578), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 31 DEC 2000 AND 1 JAN 2001"""'], {}), "('BET 31 DEC 2000 AND 1 JAN 2001')\n", (24544, 24578), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((24593, 24642), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 31 DEC 2000 AND 1 JAN 2001"""'], {}), "('BET 31 DEC 2000 AND 1 JAN 2001')\n", (24608, 24642), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((12061, 12097), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""@#DROMAN@ 2020"""'], {}), "('@#DROMAN@ 2020')\n", (12079, 12097), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((12194, 12232), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""@#DUNKNOWN@ 2020"""'], {}), "('@#DUNKNOWN@ 2020')\n", (12212, 12232), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((12342, 12382), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""@#DJULIAN@ 2020/21"""'], {}), "('@#DJULIAN@ 2020/21')\n", (12360, 12382), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((12479, 12514), 'ged4py.calendar.CalendarDate.parse', 'CalendarDate.parse', (['"""start of time"""'], {}), "('start of time')\n", (12497, 12514), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((14329, 14348), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1967)'], {}), '(1967)\n', (14342, 14348), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((14588, 14617), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (14601, 14617), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((14874, 14893), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1920)'], {}), '(1920)\n', (14887, 14893), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((14932, 14951), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)'], {}), '(2000)\n', (14945, 14951), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((15222, 15248), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1920)', '"""MAR"""'], {}), "(1920, 'MAR')\n", (15235, 15248), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((15287, 15316), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)', '"""APR"""', '(1)'], {}), "(2000, 'APR', 1)\n", (15300, 15316), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((15664, 15692), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1967)'], {'bc': '(True)'}), '(1967, bc=True)\n', (15677, 15692), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((15946, 15975), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (15959, 15975), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((16244, 16260), 'ged4py.calendar.JulianDate', 'JulianDate', (['(1600)'], {}), '(1600)\n', (16254, 16260), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((16299, 16318), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)'], {}), '(2000)\n', (16312, 16318), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((16600, 16626), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1920)', '"""MAR"""'], {}), "(1920, 'MAR')\n", (16613, 16626), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((16665, 16691), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)', '"""APR"""'], {}), "(2000, 'APR')\n", (16678, 16691), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((16876, 16903), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(500)'], {'bc': '(True)'}), '(500, bc=True)\n', (16889, 16903), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((16934, 16960), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""'], {}), "(2017, 'JAN')\n", (16947, 16960), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((16994, 17024), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(31)'], {}), "(2017, 'JAN', 31)\n", (17007, 17024), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((18187, 18215), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1967)'], {'bc': '(True)'}), '(1967, bc=True)\n', (18200, 18215), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((18581, 18610), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (18594, 18610), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((19018, 19046), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1967)'], {'bc': '(True)'}), '(1967, bc=True)\n', (19031, 19046), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((19305, 19334), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (19318, 19334), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((20715, 20744), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""1 JAN 2000"""'], {}), "('1 JAN 2000')\n", (20730, 20744), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20746, 20794), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 1 JAN 2000 AND 1 JAN 2001"""'], {}), "('BET 1 JAN 2000 AND 1 JAN 2001')\n", (20761, 20794), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21319, 21367), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""FROM 1 JAN 2000 TO 1 JAN 2001"""'], {}), "('FROM 1 JAN 2000 TO 1 JAN 2001')\n", (21334, 21367), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21394, 21442), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 1 JAN 2000 AND 1 JAN 2001"""'], {}), "('BET 1 JAN 2000 AND 1 JAN 2001')\n", (21409, 21442), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((22393, 22415), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['value'], {}), '(value)\n', (22408, 22415), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((6400, 6429), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2016)', '"""JAN"""', '(1)'], {}), "(2016, 'JAN', 1)\n", (6413, 6429), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6432, 6461), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (6445, 6461), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6487, 6516), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (6500, 6516), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6519, 6548), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""FEB"""', '(1)'], {}), "(2017, 'FEB', 1)\n", (6532, 6548), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6574, 6603), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (6587, 6603), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6606, 6635), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(2)'], {}), "(2017, 'JAN', 2)\n", (6619, 6635), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6662, 6691), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (6675, 6691), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6695, 6724), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(2)'], {}), "(2017, 'JAN', 2)\n", (6708, 6724), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6750, 6779), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(2)'], {}), "(2017, 'JAN', 2)\n", (6763, 6779), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6782, 6811), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (6795, 6811), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6837, 6866), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(2)'], {}), "(2017, 'JAN', 2)\n", (6850, 6866), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6870, 6899), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (6883, 6899), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6925, 6954), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (6938, 6954), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((6958, 6987), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (6971, 6987), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7013, 7042), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(1)'], {}), "(2017, 'JAN', 1)\n", (7026, 7042), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7046, 7075), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(2)'], {}), "(2017, 'JAN', 2)\n", (7059, 7075), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7188, 7214), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""'], {}), "(2017, 'JAN')\n", (7201, 7214), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7217, 7247), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""', '(31)'], {}), "(2017, 'JAN', 31)\n", (7230, 7247), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7273, 7299), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""JAN"""'], {}), "(2017, 'JAN')\n", (7286, 7299), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7302, 7331), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""FEB"""', '(1)'], {}), "(2017, 'FEB', 1)\n", (7315, 7331), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7443, 7462), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)'], {}), '(2017)\n', (7456, 7462), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7465, 7495), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""DEC"""', '(31)'], {}), "(2017, 'DEC', 31)\n", (7478, 7495), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7521, 7540), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)'], {}), '(2017)\n', (7534, 7540), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7543, 7572), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2018)', '"""JAN"""', '(1)'], {}), "(2018, 'JAN', 1)\n", (7556, 7572), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7619, 7648), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1700)', '"""JAN"""', '(1)'], {}), "(1700, 'JAN', 1)\n", (7632, 7648), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7652, 7697), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1699)', '"""JAN"""', '(1)'], {'dual_year': '(1700)'}), "(1699, 'JAN', 1, dual_year=1700)\n", (7665, 7697), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7769, 7799), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1582)', '"""OCT"""', '(15)'], {}), "(1582, 'OCT', 15)\n", (7782, 7799), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7803, 7829), 'ged4py.calendar.JulianDate', 'JulianDate', (['(1582)', '"""OCT"""', '(5)'], {}), "(1582, 'OCT', 5)\n", (7813, 7829), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7855, 7885), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1582)', '"""OCT"""', '(16)'], {}), "(1582, 'OCT', 16)\n", (7868, 7885), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7888, 7914), 'ged4py.calendar.JulianDate', 'JulianDate', (['(1582)', '"""OCT"""', '(5)'], {}), "(1582, 'OCT', 5)\n", (7898, 7914), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7940, 7966), 'ged4py.calendar.JulianDate', 'JulianDate', (['(1582)', '"""OCT"""', '(6)'], {}), "(1582, 'OCT', 6)\n", (7950, 7966), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((7969, 7999), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1582)', '"""OCT"""', '(15)'], {}), "(1582, 'OCT', 15)\n", (7982, 7999), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8025, 8055), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)', '"""JAN"""', '(14)'], {}), "(2000, 'JAN', 14)\n", (8038, 8055), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8059, 8085), 'ged4py.calendar.JulianDate', 'JulianDate', (['(2000)', '"""JAN"""', '(1)'], {}), "(2000, 'JAN', 1)\n", (8069, 8085), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8157, 8187), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1792)', '"""SEP"""', '(22)'], {}), "(1792, 'SEP', 22)\n", (8170, 8187), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8191, 8215), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)', '"""VEND"""', '(1)'], {}), "(1, 'VEND', 1)\n", (8201, 8215), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8241, 8271), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1792)', '"""SEP"""', '(23)'], {}), "(1792, 'SEP', 23)\n", (8254, 8271), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8274, 8298), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)', '"""VEND"""', '(1)'], {}), "(1, 'VEND', 1)\n", (8284, 8298), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8324, 8348), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)', '"""VEND"""', '(2)'], {}), "(1, 'VEND', 2)\n", (8334, 8348), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8351, 8381), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(1792)', '"""SEP"""', '(22)'], {}), "(1792, 'SEP', 22)\n", (8364, 8381), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8407, 8437), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2020)', '"""SEP"""', '(21)'], {}), "(2020, 'SEP', 21)\n", (8420, 8437), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8441, 8467), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(228)', '"""COMP"""', '(5)'], {}), "(228, 'COMP', 5)\n", (8451, 8467), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8539, 8568), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2020)', '"""JAN"""', '(1)'], {}), "(2020, 'JAN', 1)\n", (8552, 8568), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((8572, 8598), 'ged4py.calendar.HebrewDate', 'HebrewDate', (['(5780)', '"""SVN"""', '(4)'], {}), "(5780, 'SVN', 4)\n", (8582, 8598), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13273, 13302), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""', '(9)'], {}), "(2017, 'OCT', 9)\n", (13286, 13302), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13335, 13364), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""', '(9)'], {}), "(2017, 'OCT', 9)\n", (13348, 13364), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13397, 13435), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""', '(9)'], {'bc': '(True)'}), "(2017, 'OCT', 9, bc=True)\n", (13410, 13435), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13468, 13506), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2017)', '"""OCT"""', '(9)'], {'bc': '(True)'}), "(2017, 'OCT', 9, bc=True)\n", (13481, 13506), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13539, 13563), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)', '"""VEND"""', '(1)'], {}), "(1, 'VEND', 1)\n", (13549, 13563), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13596, 13620), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)', '"""VEND"""', '(1)'], {}), "(1, 'VEND', 1)\n", (13606, 13620), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13653, 13666), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)'], {}), '(1)\n', (13663, 13666), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((13699, 13712), 'ged4py.calendar.FrenchDate', 'FrenchDate', (['(1)'], {}), '(1)\n', (13709, 13712), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((17409, 17446), 'ged4py.date.DateValue.parse', 'DateValue.parse', (["(appr + ' ' + datestr)"], {}), "(appr + ' ' + datestr)\n", (17424, 17446), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((19577, 19596), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2016)'], {}), '(2016)\n', (19590, 19596), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((19598, 19617), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2016)'], {}), '(2016)\n', (19611, 19617), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((19748, 19778), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)', '"""DEC"""', '(31)'], {}), "(2000, 'DEC', 31)\n", (19761, 19778), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((19780, 19810), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)', '"""DEC"""', '(31)'], {}), "(2000, 'DEC', 31)\n", (19793, 19810), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((19960, 19990), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)', '"""DEC"""', '(31)'], {}), "(2000, 'DEC', 31)\n", (19973, 19990), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((19992, 20021), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2001)', '"""JAN"""', '(1)'], {}), "(2001, 'JAN', 1)\n", (20005, 20021), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((20209, 20239), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)', '"""DEC"""', '(31)'], {}), "(2000, 'DEC', 31)\n", (20222, 20239), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((20241, 20270), 'ged4py.calendar.GregorianDate', 'GregorianDate', (['(2000)', '"""JAN"""', '(1)'], {}), "(2000, 'JAN', 1)\n", (20254, 20270), False, 'from ged4py.calendar import CalendarType, CalendarDate, FrenchDate, GregorianDate, HebrewDate, JulianDate, CalendarDateVisitor\n'), ((20298, 20321), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""2016"""'], {}), "('2016')\n", (20313, 20321), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20324, 20347), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""2017"""'], {}), "('2017')\n", (20339, 20347), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20373, 20402), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""2 JAN 2016"""'], {}), "('2 JAN 2016')\n", (20388, 20402), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20405, 20434), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""1 JAN 2016"""'], {}), "('1 JAN 2016')\n", (20420, 20434), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20460, 20496), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 1900 AND 2000"""'], {}), "('BET 1900 AND 2000')\n", (20475, 20496), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20499, 20535), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""FROM 1920 TO 1999"""'], {}), "('FROM 1920 TO 1999')\n", (20514, 20535), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20605, 20634), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""1 JAN 2000"""'], {}), "('1 JAN 2000')\n", (20620, 20634), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20637, 20685), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 1 JAN 1999 AND 1 JAN 2000"""'], {}), "('BET 1 JAN 1999 AND 1 JAN 2000')\n", (20652, 20685), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20820, 20849), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""1 JAN 2000"""'], {}), "('1 JAN 2000')\n", (20835, 20849), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20852, 20900), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 1 JAN 2000 AND 1 JAN 2001"""'], {}), "('BET 1 JAN 2000 AND 1 JAN 2001')\n", (20867, 20900), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20926, 20955), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""1 JAN 2000"""'], {}), "('1 JAN 2000')\n", (20941, 20955), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((20958, 20991), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BEF 1 JAN 2000"""'], {}), "('BEF 1 JAN 2000')\n", (20973, 20991), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21017, 21046), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""1 JAN 2000"""'], {}), "('1 JAN 2000')\n", (21032, 21046), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21049, 21081), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""TO 1 JAN 2000"""'], {}), "('TO 1 JAN 2000')\n", (21064, 21081), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21107, 21136), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""1 JAN 2000"""'], {}), "('1 JAN 2000')\n", (21122, 21136), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21139, 21172), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""AFT 1 JAN 2000"""'], {}), "('AFT 1 JAN 2000')\n", (21154, 21172), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21198, 21227), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""1 JAN 2000"""'], {}), "('1 JAN 2000')\n", (21213, 21227), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21230, 21264), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""FROM 1 JAN 2000"""'], {}), "('FROM 1 JAN 2000')\n", (21245, 21264), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21468, 21516), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""FROM 1 JAN 1999 TO 1 JAN 2001"""'], {}), "('FROM 1 JAN 1999 TO 1 JAN 2001')\n", (21483, 21516), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21543, 21591), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 1 JAN 2000 AND 1 JAN 2001"""'], {}), "('BET 1 JAN 2000 AND 1 JAN 2001')\n", (21558, 21591), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21617, 21665), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""FROM 1 JAN 2000 TO 1 JAN 2002"""'], {}), "('FROM 1 JAN 2000 TO 1 JAN 2002')\n", (21632, 21665), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21692, 21740), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""BET 1 JAN 2000 AND 1 JAN 2001"""'], {}), "('BET 1 JAN 2000 AND 1 JAN 2001')\n", (21707, 21740), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21830, 21853), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""2000"""'], {}), "('2000')\n", (21845, 21853), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21856, 21886), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""31 DEC 2000"""'], {}), "('31 DEC 2000')\n", (21871, 21886), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21912, 21939), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""DEC 2000"""'], {}), "('DEC 2000')\n", (21927, 21939), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((21942, 21972), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""31 DEC 2000"""'], {}), "('31 DEC 2000')\n", (21957, 21972), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((22054, 22096), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""(Could be 1996 or 1998)"""'], {}), "('(Could be 1996 or 1998)')\n", (22069, 22096), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((22099, 22122), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""2000"""'], {}), "('2000')\n", (22114, 22122), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((22210, 22229), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['""""""'], {}), "('')\n", (22225, 22229), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((22232, 22255), 'ged4py.date.DateValue.parse', 'DateValue.parse', (['"""2000"""'], {}), "('2000')\n", (22247, 22255), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((22856, 22878), 'ged4py.date.DateValueSimple', 'DateValueSimple', (['date1'], {}), '(date1)\n', (22871, 22878), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((22963, 22983), 'ged4py.date.DateValueFrom', 'DateValueFrom', (['date1'], {}), '(date1)\n', (22976, 22983), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((23066, 23084), 'ged4py.date.DateValueTo', 'DateValueTo', (['date1'], {}), '(date1)\n', (23077, 23084), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((23165, 23194), 'ged4py.date.DateValuePeriod', 'DateValuePeriod', (['date1', 'date2'], {}), '(date1, date2)\n', (23180, 23194), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((23286, 23308), 'ged4py.date.DateValueBefore', 'DateValueBefore', (['date1'], {}), '(date1)\n', (23301, 23308), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((23393, 23414), 'ged4py.date.DateValueAfter', 'DateValueAfter', (['date1'], {}), '(date1)\n', (23407, 23414), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((23498, 23526), 'ged4py.date.DateValueRange', 'DateValueRange', (['date1', 'date2'], {}), '(date1, date2)\n', (23512, 23526), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((23617, 23638), 'ged4py.date.DateValueAbout', 'DateValueAbout', (['date1'], {}), '(date1)\n', (23631, 23638), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((23722, 23748), 'ged4py.date.DateValueCalculated', 'DateValueCalculated', (['date1'], {}), '(date1)\n', (23741, 23748), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((23837, 23862), 'ged4py.date.DateValueEstimated', 'DateValueEstimated', (['date1'], {}), '(date1)\n', (23855, 23862), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((23950, 23987), 'ged4py.date.DateValueInterpreted', 'DateValueInterpreted', (['date1', '"""phrase"""'], {}), "(date1, 'phrase')\n", (23970, 23987), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n'), ((24087, 24112), 'ged4py.date.DateValuePhrase', 'DateValuePhrase', (['"""phrase"""'], {}), "('phrase')\n", (24102, 24112), False, 'from ged4py.date import DateValue, DateValueAbout, DateValueAfter, DateValueBefore, DateValueCalculated, DateValueEstimated, DateValueFrom, DateValueInterpreted, DateValuePeriod, DateValuePhrase, DateValueRange, DateValueSimple, DateValueTo, DateValueTypes, DateValueVisitor\n')] |
import time
import h5py
import hdbscan
import numpy as np
import torch
from sklearn.cluster import MeanShift
from pytorch3dunet.datasets.hdf5 import SliceBuilder
from pytorch3dunet.unet3d.utils import get_logger
from pytorch3dunet.unet3d.utils import unpad
logger = get_logger('UNet3DPredictor')
class _AbstractPredictor:
def __init__(self, model, loader, output_file, config, **kwargs):
self.model = model
self.loader = loader
self.output_file = output_file
self.config = config
self.predictor_config = kwargs
@staticmethod
def _volume_shape(dataset):
# TODO: support multiple internal datasets
raw = dataset.raws[0]
if raw.ndim == 3:
return raw.shape
else:
return raw.shape[1:]
@staticmethod
def _get_output_dataset_names(number_of_datasets, prefix='predictions'):
if number_of_datasets == 1:
return [prefix]
else:
return [f'{prefix}{i}' for i in range(number_of_datasets)]
def predict(self):
raise NotImplementedError
class StandardPredictor(_AbstractPredictor):
"""
Applies the model on the given dataset and saves the result in the `output_file` in the H5 format.
Predictions from the network are kept in memory. If the results from the network don't fit in into RAM
use `LazyPredictor` instead.
The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is
not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number
of the output head from the network.
Args:
model (Unet3D): trained 3D UNet model used for prediction
data_loader (torch.utils.data.DataLoader): input data loader
output_file (str): path to the output H5 file
config (dict): global config dict
"""
def __init__(self, model, loader, output_file, config, **kwargs):
super().__init__(model, loader, output_file, config, **kwargs)
def predict(self):
out_channels = self.config['model'].get('out_channels')
if out_channels is None:
out_channels = self.config['model']['dt_out_channels']
prediction_channel = self.config.get('prediction_channel', None)
if prediction_channel is not None:
logger.info(f"Using only channel '{prediction_channel}' from the network output")
device = self.config['device']
output_heads = self.config['model'].get('output_heads', 1)
logger.info(f'Running prediction on {len(self.loader)} batches...')
# dimensionality of the the output predictions
volume_shape = self._volume_shape(self.loader.dataset)
if prediction_channel is None:
prediction_maps_shape = (out_channels,) + volume_shape
else:
# single channel prediction map
prediction_maps_shape = (1,) + volume_shape
logger.info(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}')
avoid_block_artifacts = self.predictor_config.get('avoid_block_artifacts', True)
logger.info(f'Avoid block artifacts: {avoid_block_artifacts}')
# create destination H5 file
h5_output_file = h5py.File(self.output_file, 'w')
# allocate prediction and normalization arrays
logger.info('Allocating prediction and normalization arrays...')
prediction_maps, normalization_masks = self._allocate_prediction_maps(prediction_maps_shape,
output_heads, h5_output_file)
# Sets the module in evaluation mode explicitly (necessary for batchnorm/dropout layers if present)
self.model.eval()
# Set the `testing=true` flag otherwise the final Softmax/Sigmoid won't be applied!
self.model.testing = True
# Run predictions on the entire input dataset
with torch.no_grad():
for batch, indices in self.loader:
# send batch to device
batch = batch.to(device)
# forward pass
predictions = self.model(batch)
# wrap predictions into a list if there is only one output head from the network
if output_heads == 1:
predictions = [predictions]
# for each output head
for prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps,
normalization_masks):
# convert to numpy array
prediction = prediction.cpu().numpy()
# for each batch sample
for pred, index in zip(prediction, indices):
# save patch index: (C,D,H,W)
if prediction_channel is None:
channel_slice = slice(0, out_channels)
else:
channel_slice = slice(0, 1)
index = (channel_slice,) + index
if prediction_channel is not None:
# use only the 'prediction_channel'
logger.info(f"Using channel '{prediction_channel}'...")
pred = np.expand_dims(pred[prediction_channel], axis=0)
logger.info(f'Saving predictions for slice:{index}...')
if avoid_block_artifacts:
# unpad in order to avoid block artifacts in the output probability maps
u_prediction, u_index = unpad(pred, index, volume_shape)
# accumulate probabilities into the output prediction array
prediction_map[u_index] += u_prediction
# count voxel visits for normalization
normalization_mask[u_index] += 1
else:
# accumulate probabilities into the output prediction array
prediction_map[index] += pred
# count voxel visits for normalization
normalization_mask[index] += 1
# save results to
self._save_results(prediction_maps, normalization_masks, output_heads, h5_output_file, self.loader.dataset)
# close the output H5 file
h5_output_file.close()
def _allocate_prediction_maps(self, output_shape, output_heads, output_file):
# initialize the output prediction arrays
prediction_maps = [np.zeros(output_shape, dtype='float32') for _ in range(output_heads)]
# initialize normalization mask in order to average out probabilities of overlapping patches
normalization_masks = [np.zeros(output_shape, dtype='uint8') for _ in range(output_heads)]
return prediction_maps, normalization_masks
def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset):
# save probability maps
prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')
for prediction_map, normalization_mask, prediction_dataset in zip(prediction_maps, normalization_masks,
prediction_datasets):
prediction_map = prediction_map / normalization_mask
if dataset.mirror_padding:
pad_width = dataset.pad_width
logger.info(f'Dataset loaded with mirror padding, pad_width: {pad_width}. Cropping before saving...')
prediction_map = prediction_map[:, pad_width:-pad_width, pad_width:-pad_width, pad_width:-pad_width]
logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...')
output_file.create_dataset(prediction_dataset, data=prediction_map, compression="gzip")
class LazyPredictor(StandardPredictor):
"""
Applies the model on the given dataset and saves the result in the `output_file` in the H5 format.
Predicted patches are directly saved into the H5 and they won't be stored in memory. Since this predictor
is slower than the `StandardPredictor` it should only be used when the predicted volume does not fit into RAM.
The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is
not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number
of the output head from the network.
Args:
model (Unet3D): trained 3D UNet model used for prediction
data_loader (torch.utils.data.DataLoader): input data loader
output_file (str): path to the output H5 file
config (dict): global config dict
"""
def __init__(self, model, loader, output_file, config, **kwargs):
super().__init__(model, loader, output_file, config, **kwargs)
def _allocate_prediction_maps(self, output_shape, output_heads, output_file):
# allocate datasets for probability maps
prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')
prediction_maps = [
output_file.create_dataset(dataset_name, shape=output_shape, dtype='float32', chunks=True,
compression='gzip')
for dataset_name in prediction_datasets]
# allocate datasets for normalization masks
normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization')
normalization_masks = [
output_file.create_dataset(dataset_name, shape=output_shape, dtype='uint8', chunks=True,
compression='gzip')
for dataset_name in normalization_datasets]
return prediction_maps, normalization_masks
def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset):
if dataset.mirror_padding:
logger.warn(
f'Mirror padding unsupported in LazyPredictor. Output predictions will be padded with pad_width: {dataset.pad_width}')
prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions')
normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization')
# normalize the prediction_maps inside the H5
for prediction_map, normalization_mask, prediction_dataset, normalization_dataset in zip(prediction_maps,
normalization_masks,
prediction_datasets,
normalization_datasets):
# split the volume into 4 parts and load each into the memory separately
logger.info(f'Normalizing {prediction_dataset}...')
z, y, x = prediction_map.shape[1:]
# take slices which are 1/27 of the original volume
patch_shape = (z // 3, y // 3, x // 3)
for index in SliceBuilder._build_slices(prediction_map, patch_shape=patch_shape, stride_shape=patch_shape):
logger.info(f'Normalizing slice: {index}')
prediction_map[index] /= normalization_mask[index]
# make sure to reset the slice that has been visited already in order to avoid 'double' normalization
# when the patches overlap with each other
normalization_mask[index] = 1
logger.info(f'Deleting {normalization_dataset}...')
del output_file[normalization_dataset]
class EmbeddingsPredictor(_AbstractPredictor):
"""
Applies the embedding model on the given dataset and saves the result in the `output_file` in the H5 format.
The resulting volume is the segmentation itself (not the embedding vectors) obtained by clustering embeddings
with HDBSCAN or MeanShift algorithm patch by patch and then stitching the patches together.
"""
def __init__(self, model, loader, output_file, config, clustering, iou_threshold=0.7, noise_label=-1, **kwargs):
super().__init__(model, loader, output_file, config, **kwargs)
self.iou_threshold = iou_threshold
self.noise_label = noise_label
self.clustering = clustering
assert clustering in ['hdbscan', 'meanshift'], 'Only HDBSCAN and MeanShift are supported'
logger.info(f'IoU threshold: {iou_threshold}')
self.clustering_name = clustering
self.clustering = self._get_clustering(clustering, kwargs)
def predict(self):
device = self.config['device']
output_heads = self.config['model'].get('output_heads', 1)
logger.info(f'Running prediction on {len(self.loader)} patches...')
# dimensionality of the the output segmentation
volume_shape = self._volume_shape(self.loader.dataset)
logger.info(f'The shape of the output segmentation (DHW): {volume_shape}')
logger.info('Allocating segmentation array...')
# initialize the output prediction arrays
output_segmentations = [np.zeros(volume_shape, dtype='int32') for _ in range(output_heads)]
# initialize visited_voxels arrays
visited_voxels_arrays = [np.zeros(volume_shape, dtype='uint8') for _ in range(output_heads)]
# Sets the module in evaluation mode explicitly
self.model.eval()
self.model.testing = True
# Run predictions on the entire input dataset
with torch.no_grad():
for batch, indices in self.loader:
# logger.info(f'Predicting embeddings for slice:{index}')
# send batch to device
batch = batch.to(device)
# forward pass
embeddings = self.model(batch)
# wrap predictions into a list if there is only one output head from the network
if output_heads == 1:
embeddings = [embeddings]
for prediction, output_segmentation, visited_voxels_array in zip(embeddings, output_segmentations,
visited_voxels_arrays):
# convert to numpy array
prediction = prediction.cpu().numpy()
# iterate sequentially because of the current simple stitching that we're using
for pred, index in zip(prediction, indices):
# convert embeddings to segmentation with hdbscan clustering
segmentation = self._embeddings_to_segmentation(pred)
# stitch patches
self._merge_segmentation(segmentation, index, output_segmentation, visited_voxels_array)
# save results
with h5py.File(self.output_file, 'w') as output_file:
prediction_datasets = self._get_output_dataset_names(output_heads,
prefix=f'segmentation/{self.clustering_name}')
for output_segmentation, prediction_dataset in zip(output_segmentations, prediction_datasets):
logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...')
output_file.create_dataset(prediction_dataset, data=output_segmentation, compression="gzip")
def _embeddings_to_segmentation(self, embeddings):
"""
Cluster embeddings vectors with HDBSCAN and return the segmented volume.
Args:
embeddings (ndarray): 4D (CDHW) embeddings tensor
Returns:
3D (DHW) segmentation
"""
# shape of the output segmentation
output_shape = embeddings.shape[1:]
# reshape (C, D, H, W) -> (C, D * H * W) and transpose -> (D * H * W, C)
flattened_embeddings = embeddings.reshape(embeddings.shape[0], -1).transpose()
logger.info('Clustering embeddings...')
# perform clustering and reshape in order to get the segmentation volume
start = time.time()
clusters = self.clustering.fit_predict(flattened_embeddings).reshape(output_shape)
logger.info(
f'Number of clusters found by {self.clustering}: {np.max(clusters)}. Duration: {time.time() - start} sec.')
return clusters
def _merge_segmentation(self, segmentation, index, output_segmentation, visited_voxels_array):
"""
Given the `segmentation` patch, its `index` in the `output_segmentation` array and the array visited voxels
merge the segmented patch (`segmentation`) into the `output_segmentation`
Args:
segmentation (ndarray): segmented patch
index (tuple): position of the patch inside `output_segmentation` volume
output_segmentation (ndarray): current state of the output segmentation
visited_voxels_array (ndarray): array of voxels visited so far (same size as `output_segmentation`); visited
voxels will be marked by a number greater than 0
"""
index = tuple(index)
# get new unassigned label
max_label = np.max(output_segmentation) + 1
# make sure there are no clashes between current segmentation patch and the output_segmentation
# but keep the noise label
noise_mask = segmentation == self.noise_label
segmentation += int(max_label)
segmentation[noise_mask] = self.noise_label
# get the overlap mask in the current patch
overlap_mask = visited_voxels_array[index] > 0
# get the new labels inside the overlap_mask
new_labels = np.unique(segmentation[overlap_mask])
merged_labels = self._merge_labels(output_segmentation[index], new_labels, segmentation)
# relabel new segmentation with the merged labels
for current_label, new_label in merged_labels:
segmentation[segmentation == new_label] = current_label
# update the output_segmentation
output_segmentation[index] = segmentation
# visit the patch
visited_voxels_array[index] += 1
def _merge_labels(self, current_segmentation, new_labels, new_segmentation):
def _most_frequent_label(labels):
unique, counts = np.unique(labels, return_counts=True)
ind = np.argmax(counts)
return unique[ind]
result = []
# iterate over new_labels and merge regions if the IoU exceeds a given threshold
for new_label in new_labels:
# skip 'noise' label assigned by hdbscan
if new_label == self.noise_label:
continue
new_label_mask = new_segmentation == new_label
# get only the most frequent overlapping label
most_frequent_label = _most_frequent_label(current_segmentation[new_label_mask])
# skip 'noise' label
if most_frequent_label == self.noise_label:
continue
current_label_mask = current_segmentation == most_frequent_label
# compute Jaccard index
iou = np.bitwise_and(new_label_mask, current_label_mask).sum() / np.bitwise_or(new_label_mask,
current_label_mask).sum()
if iou > self.iou_threshold:
# merge labels
result.append((most_frequent_label, new_label))
return result
def _get_clustering(self, clustering_alg, kwargs):
logger.info(f'Using {clustering_alg} for clustering')
if clustering_alg == 'hdbscan':
min_cluster_size = kwargs.get('min_cluster_size', 50)
min_samples = kwargs.get('min_samples', None),
metric = kwargs.get('metric', 'euclidean')
cluster_selection_method = kwargs.get('cluster_selection_method', 'eom')
logger.info(f'HDBSCAN params: min_cluster_size: {min_cluster_size}, min_samples: {min_samples}')
return hdbscan.HDBSCAN(min_cluster_size=min_cluster_size, min_samples=min_samples, metric=metric,
cluster_selection_method=cluster_selection_method)
else:
bandwidth = kwargs['bandwidth']
logger.info(f'MeanShift params: bandwidth: {bandwidth}, bin_seeding: True')
# use fast MeanShift with bin seeding
return MeanShift(bandwidth=bandwidth, bin_seeding=True)
| [
"numpy.bitwise_or",
"numpy.unique",
"sklearn.cluster.MeanShift",
"pytorch3dunet.unet3d.utils.unpad",
"pytorch3dunet.datasets.hdf5.SliceBuilder._build_slices",
"numpy.argmax",
"h5py.File",
"pytorch3dunet.unet3d.utils.get_logger",
"numpy.max",
"numpy.zeros",
"numpy.bitwise_and",
"numpy.expand_dims",
"torch.no_grad",
"time.time",
"hdbscan.HDBSCAN"
] | [((269, 298), 'pytorch3dunet.unet3d.utils.get_logger', 'get_logger', (['"""UNet3DPredictor"""'], {}), "('UNet3DPredictor')\n", (279, 298), False, 'from pytorch3dunet.unet3d.utils import get_logger\n'), ((3293, 3325), 'h5py.File', 'h5py.File', (['self.output_file', '"""w"""'], {}), "(self.output_file, 'w')\n", (3302, 3325), False, 'import h5py\n'), ((16480, 16491), 'time.time', 'time.time', ([], {}), '()\n', (16489, 16491), False, 'import time\n'), ((18073, 18110), 'numpy.unique', 'np.unique', (['segmentation[overlap_mask]'], {}), '(segmentation[overlap_mask])\n', (18082, 18110), True, 'import numpy as np\n'), ((3991, 4006), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4004, 4006), False, 'import torch\n'), ((6737, 6776), 'numpy.zeros', 'np.zeros', (['output_shape'], {'dtype': '"""float32"""'}), "(output_shape, dtype='float32')\n", (6745, 6776), True, 'import numpy as np\n'), ((6939, 6976), 'numpy.zeros', 'np.zeros', (['output_shape'], {'dtype': '"""uint8"""'}), "(output_shape, dtype='uint8')\n", (6947, 6976), True, 'import numpy as np\n'), ((11449, 11546), 'pytorch3dunet.datasets.hdf5.SliceBuilder._build_slices', 'SliceBuilder._build_slices', (['prediction_map'], {'patch_shape': 'patch_shape', 'stride_shape': 'patch_shape'}), '(prediction_map, patch_shape=patch_shape,\n stride_shape=patch_shape)\n', (11475, 11546), False, 'from pytorch3dunet.datasets.hdf5 import SliceBuilder\n'), ((13521, 13558), 'numpy.zeros', 'np.zeros', (['volume_shape'], {'dtype': '"""int32"""'}), "(volume_shape, dtype='int32')\n", (13529, 13558), True, 'import numpy as np\n'), ((13665, 13702), 'numpy.zeros', 'np.zeros', (['volume_shape'], {'dtype': '"""uint8"""'}), "(volume_shape, dtype='uint8')\n", (13673, 13702), True, 'import numpy as np\n'), ((13917, 13932), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13930, 13932), False, 'import torch\n'), ((15241, 15273), 'h5py.File', 'h5py.File', (['self.output_file', '"""w"""'], {}), "(self.output_file, 'w')\n", (15250, 15273), False, 'import h5py\n'), ((17576, 17603), 'numpy.max', 'np.max', (['output_segmentation'], {}), '(output_segmentation)\n', (17582, 17603), True, 'import numpy as np\n'), ((18700, 18737), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (18709, 18737), True, 'import numpy as np\n'), ((18756, 18773), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (18765, 18773), True, 'import numpy as np\n'), ((20450, 20595), 'hdbscan.HDBSCAN', 'hdbscan.HDBSCAN', ([], {'min_cluster_size': 'min_cluster_size', 'min_samples': 'min_samples', 'metric': 'metric', 'cluster_selection_method': 'cluster_selection_method'}), '(min_cluster_size=min_cluster_size, min_samples=min_samples,\n metric=metric, cluster_selection_method=cluster_selection_method)\n', (20465, 20595), False, 'import hdbscan\n'), ((20842, 20890), 'sklearn.cluster.MeanShift', 'MeanShift', ([], {'bandwidth': 'bandwidth', 'bin_seeding': '(True)'}), '(bandwidth=bandwidth, bin_seeding=True)\n', (20851, 20890), False, 'from sklearn.cluster import MeanShift\n'), ((16666, 16682), 'numpy.max', 'np.max', (['clusters'], {}), '(clusters)\n', (16672, 16682), True, 'import numpy as np\n'), ((16696, 16707), 'time.time', 'time.time', ([], {}), '()\n', (16705, 16707), False, 'import time\n'), ((19532, 19582), 'numpy.bitwise_and', 'np.bitwise_and', (['new_label_mask', 'current_label_mask'], {}), '(new_label_mask, current_label_mask)\n', (19546, 19582), True, 'import numpy as np\n'), ((19591, 19640), 'numpy.bitwise_or', 'np.bitwise_or', (['new_label_mask', 'current_label_mask'], {}), '(new_label_mask, current_label_mask)\n', (19604, 19640), True, 'import numpy as np\n'), ((5415, 5463), 'numpy.expand_dims', 'np.expand_dims', (['pred[prediction_channel]'], {'axis': '(0)'}), '(pred[prediction_channel], axis=0)\n', (5429, 5463), True, 'import numpy as np\n'), ((5749, 5781), 'pytorch3dunet.unet3d.utils.unpad', 'unpad', (['pred', 'index', 'volume_shape'], {}), '(pred, index, volume_shape)\n', (5754, 5781), False, 'from pytorch3dunet.unet3d.utils import unpad\n')] |